!44 fix CVE-2020-27781

From: @chixinze
Reviewed-by: 
Signed-off-by:
This commit is contained in:
openeuler-ci-bot 2021-07-25 01:23:46 +00:00 committed by Gitee
commit 82b406111c
7 changed files with 746 additions and 1 deletions

View File

@ -0,0 +1,48 @@
From 5dbc6bf0a67183bff7d7ca48ccd90ebbce492408 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C4=90=E1=BA=B7ng=20Minh=20D=C5=A9ng?= <dungdm93@live.com>
Date: Sun, 10 May 2020 11:37:23 +0700
Subject: [PATCH 1/5] pybind/ceph_volume_client: Fix PEP-8 SyntaxWarning
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Đặng Minh Dũng <dungdm93@live.com>
(cherry picked from commit 3ce9a89a5a1a2d7fa3d57c597b781a6aece7cbb5)
---
src/pybind/ceph_volume_client.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index 7d7e5b49e40..25cd6b91ae2 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -355,7 +355,7 @@ class CephFSVolumeClient(object):
continue
(group_id, volume_id) = volume.split('/')
- group_id = group_id if group_id is not 'None' else None
+ group_id = group_id if group_id != 'None' else None
volume_path = VolumePath(group_id, volume_id)
access_level = volume_data['access_level']
@@ -378,7 +378,7 @@ class CephFSVolumeClient(object):
if vol_meta['auths'][auth_id] == want_auth:
continue
- readonly = True if access_level is 'r' else False
+ readonly = access_level == 'r'
self._authorize_volume(volume_path, auth_id, readonly)
# Recovered from partial auth updates for the auth ID's access
@@ -1120,7 +1120,7 @@ class CephFSVolumeClient(object):
# Construct auth caps that if present might conflict with the desired
# auth caps.
- unwanted_access_level = 'r' if want_access_level is 'rw' else 'rw'
+ unwanted_access_level = 'r' if want_access_level == 'rw' else 'rw'
unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, path)
if namespace:
unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format(
--
2.23.0

View File

@ -0,0 +1,73 @@
From 9b34ba1777972808ba2af0073c967dece6c70626 Mon Sep 17 00:00:00 2001
From: David Disseldorp <ddiss@suse.de>
Date: Tue, 1 Sep 2020 13:49:21 +0200
Subject: [PATCH] cmake: detect and use sigdescr_np() if available
sys_siglist is deprecated with glibc 2.32. A new thread-safe and
async-signal safe sigdescr_np() function is provided, so use it if
available.
Fixes: https://tracker.ceph.com/issues/47187
Signed-off-by: David Disseldorp <ddiss@suse.de>
(cherry picked from commit b9b6faf66ae67648626470cb4fc3f0850ac4d842)
Conflicts:
CMakeLists.txt
cmake/modules/CephChecks.cmake
- CephChecks.cmake file does not exist in nautilus; manually cherry-picked the
change in that file to top-level CMakeLists.txt
---
CMakeLists.txt | 1 +
src/global/signal_handler.h | 8 +++++---
src/include/config-h.in.cmake | 3 +++
3 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5b7a67bec60..bdeea6f9c7d 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -105,6 +105,7 @@ CHECK_FUNCTION_EXISTS(strerror_r HAVE_Strerror_R)
CHECK_FUNCTION_EXISTS(name_to_handle_at HAVE_NAME_TO_HANDLE_AT)
CHECK_FUNCTION_EXISTS(pipe2 HAVE_PIPE2)
CHECK_FUNCTION_EXISTS(accept4 HAVE_ACCEPT4)
+CHECK_FUNCTION_EXISTS(sigdescr_np HAVE_SIGDESCR_NP)
include(CMakePushCheckState)
cmake_push_check_state(RESET)
diff --git a/src/global/signal_handler.h b/src/global/signal_handler.h
index 476724201aa..c101b2e2873 100644
--- a/src/global/signal_handler.h
+++ b/src/global/signal_handler.h
@@ -20,10 +20,12 @@
typedef void (*signal_handler_t)(int);
-#ifndef HAVE_REENTRANT_STRSIGNAL
-# define sig_str(signum) sys_siglist[signum]
-#else
+#ifdef HAVE_SIGDESCR_NP
+# define sig_str(signum) sigdescr_np(signum)
+#elif HAVE_REENTRANT_STRSIGNAL
# define sig_str(signum) strsignal(signum)
+#else
+# define sig_str(signum) sys_siglist[signum]
#endif
void install_sighandler(int signum, signal_handler_t handler, int flags);
diff --git a/src/include/config-h.in.cmake b/src/include/config-h.in.cmake
index ccce8fe0017..acced696e36 100644
--- a/src/include/config-h.in.cmake
+++ b/src/include/config-h.in.cmake
@@ -235,6 +235,9 @@
/* Define to 1 if you have sched.h. */
#cmakedefine HAVE_SCHED 1
+/* Define to 1 if you have sigdescr_np. */
+#cmakedefine HAVE_SIGDESCR_NP 1
+
/* Support SSE (Streaming SIMD Extensions) instructions */
#cmakedefine HAVE_SSE
--
2.23.0

172
0002-CVE-2020-27781-2.patch Normal file
View File

@ -0,0 +1,172 @@
From ab18393db0b34506c3fd11346b6d0f1b781b9d99 Mon Sep 17 00:00:00 2001
From: Ramana Raja <rraja@redhat.com>
Date: Wed, 25 Nov 2020 16:44:35 +0530
Subject: [PATCH 2/5] pybind/ceph_volume_client: Disallow authorize auth_id
This patch disallow the ceph_volume_client to authorize the auth_id
which is not created by ceph_volume_client. Those auth_ids could be
created by other means for other use cases which should not be modified
by ceph_volume_client.
Fixes: https://tracker.ceph.com/issues/48555
Signed-off-by: Ramana Raja <rraja@redhat.com>
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit 3a85d2d04028a323952a31d18cdbefb710be2e2b)
---
src/pybind/ceph_volume_client.py | 63 ++++++++++++++++++++------------
1 file changed, 39 insertions(+), 24 deletions(-)
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index 25cd6b91ae2..e2ab64ee226 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -215,6 +215,7 @@ CEPHFSVOLUMECLIENT_VERSION_HISTORY = """
* 2 - Added get_object, put_object, delete_object methods to CephFSVolumeClient
* 3 - Allow volumes to be created without RADOS namespace isolation
* 4 - Added get_object_and_version, put_object_versioned method to CephFSVolumeClient
+ * 5 - Disallow authorize API for users not created by CephFSVolumeClient
"""
@@ -238,7 +239,7 @@ class CephFSVolumeClient(object):
"""
# Current version
- version = 4
+ version = 5
# Where shall we create our volumes?
POOL_PREFIX = "fsvolume_"
@@ -379,7 +380,18 @@ class CephFSVolumeClient(object):
continue
readonly = access_level == 'r'
- self._authorize_volume(volume_path, auth_id, readonly)
+ client_entity = "client.{0}".format(auth_id)
+ try:
+ existing_caps = self._rados_command(
+ 'auth get',
+ {
+ 'entity': client_entity
+ }
+ )
+ # FIXME: rados raising Error instead of ObjectNotFound in auth get failure
+ except rados.Error:
+ existing_caps = None
+ self._authorize_volume(volume_path, auth_id, readonly, existing_caps)
# Recovered from partial auth updates for the auth ID's access
# to a volume.
@@ -975,6 +987,18 @@ class CephFSVolumeClient(object):
"""
with self._auth_lock(auth_id):
+ client_entity = "client.{0}".format(auth_id)
+ try:
+ existing_caps = self._rados_command(
+ 'auth get',
+ {
+ 'entity': client_entity
+ }
+ )
+ # FIXME: rados raising Error instead of ObjectNotFound in auth get failure
+ except rados.Error:
+ existing_caps = None
+
# Existing meta, or None, to be updated
auth_meta = self._auth_metadata_get(auth_id)
@@ -988,7 +1012,14 @@ class CephFSVolumeClient(object):
'dirty': True,
}
}
+
if auth_meta is None:
+ if existing_caps is not None:
+ msg = "auth ID: {0} exists and not created by ceph_volume_client. Not allowed to modify".format(auth_id)
+ log.error(msg)
+ raise CephFSVolumeClientError(msg)
+
+ # non-existent auth IDs
sys.stderr.write("Creating meta for ID {0} with tenant {1}\n".format(
auth_id, tenant_id
))
@@ -998,14 +1029,6 @@ class CephFSVolumeClient(object):
'tenant_id': tenant_id.__str__() if tenant_id else None,
'volumes': volume
}
-
- # Note: this is *not* guaranteeing that the key doesn't already
- # exist in Ceph: we are allowing VolumeClient tenants to
- # 'claim' existing Ceph keys. In order to prevent VolumeClient
- # tenants from reading e.g. client.admin keys, you need to
- # have configured your VolumeClient user (e.g. Manila) to
- # have mon auth caps that prevent it from accessing those keys
- # (e.g. limit it to only access keys with a manila.* prefix)
else:
# Disallow tenants to share auth IDs
if auth_meta['tenant_id'].__str__() != tenant_id.__str__():
@@ -1025,7 +1048,7 @@ class CephFSVolumeClient(object):
self._auth_metadata_set(auth_id, auth_meta)
with self._volume_lock(volume_path):
- key = self._authorize_volume(volume_path, auth_id, readonly)
+ key = self._authorize_volume(volume_path, auth_id, readonly, existing_caps)
auth_meta['dirty'] = False
auth_meta['volumes'][volume_path_str]['dirty'] = False
@@ -1042,7 +1065,7 @@ class CephFSVolumeClient(object):
'auth_key': None
}
- def _authorize_volume(self, volume_path, auth_id, readonly):
+ def _authorize_volume(self, volume_path, auth_id, readonly, existing_caps):
vol_meta = self._volume_metadata_get(volume_path)
access_level = 'r' if readonly else 'rw'
@@ -1061,14 +1084,14 @@ class CephFSVolumeClient(object):
vol_meta['auths'].update(auth)
self._volume_metadata_set(volume_path, vol_meta)
- key = self._authorize_ceph(volume_path, auth_id, readonly)
+ key = self._authorize_ceph(volume_path, auth_id, readonly, existing_caps)
vol_meta['auths'][auth_id]['dirty'] = False
self._volume_metadata_set(volume_path, vol_meta)
return key
- def _authorize_ceph(self, volume_path, auth_id, readonly):
+ def _authorize_ceph(self, volume_path, auth_id, readonly, existing_caps):
path = self._get_path(volume_path)
log.debug("Authorizing Ceph id '{0}' for path '{1}'".format(
auth_id, path
@@ -1096,15 +1119,7 @@ class CephFSVolumeClient(object):
want_osd_cap = 'allow {0} pool={1}'.format(want_access_level,
pool_name)
- try:
- existing = self._rados_command(
- 'auth get',
- {
- 'entity': client_entity
- }
- )
- # FIXME: rados raising Error instead of ObjectNotFound in auth get failure
- except rados.Error:
+ if existing_caps is None:
caps = self._rados_command(
'auth get-or-create',
{
@@ -1116,7 +1131,7 @@ class CephFSVolumeClient(object):
})
else:
# entity exists, update it
- cap = existing[0]
+ cap = existing_caps[0]
# Construct auth caps that if present might conflict with the desired
# auth caps.
--
2.23.0

113
0003-CVE-2020-27781-3.patch Normal file
View File

@ -0,0 +1,113 @@
From 621fea6fda4f06876295f67d4767914332ff82d3 Mon Sep 17 00:00:00 2001
From: Kotresh HR <khiremat@redhat.com>
Date: Thu, 26 Nov 2020 14:48:16 +0530
Subject: [PATCH 3/5] pybind/ceph_volume_client: Preserve existing caps while
authorize/deauthorize auth-id
Authorize/Deauthorize used to overwrite the caps of auth-id which would
end up deleting existing caps. This patch fixes the same by retaining
the existing caps by appending or deleting the new caps as needed.
Fixes: https://tracker.ceph.com/issues/48555
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit 47100e528ef77e7e82dc9877424243dc6a7e7533)
---
src/pybind/ceph_volume_client.py | 43 ++++++++++++++++++++++----------
1 file changed, 30 insertions(+), 13 deletions(-)
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index e2ab64ee226..ca1f361d03c 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -973,6 +973,26 @@ class CephFSVolumeClient(object):
data['version'] = self.version
return self._metadata_set(self._volume_metadata_path(volume_path), data)
+ def _prepare_updated_caps_list(self, existing_caps, mds_cap_str, osd_cap_str, authorize=True):
+ caps_list = []
+ for k, v in existing_caps['caps'].items():
+ if k == 'mds' or k == 'osd':
+ continue
+ elif k == 'mon':
+ if not authorize and v == 'allow r':
+ continue
+ caps_list.extend((k,v))
+
+ if mds_cap_str:
+ caps_list.extend(('mds', mds_cap_str))
+ if osd_cap_str:
+ caps_list.extend(('osd', osd_cap_str))
+
+ if authorize and 'mon' not in caps_list:
+ caps_list.extend(('mon', 'allow r'))
+
+ return caps_list
+
def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None):
"""
Get-or-create a Ceph auth identity for `auth_id` and grant them access
@@ -1151,8 +1171,8 @@ class CephFSVolumeClient(object):
if not orig_mds_caps:
return want_mds_cap, want_osd_cap
- mds_cap_tokens = orig_mds_caps.split(",")
- osd_cap_tokens = orig_osd_caps.split(",")
+ mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")]
+ osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")]
if want_mds_cap in mds_cap_tokens:
return orig_mds_caps, orig_osd_caps
@@ -1173,15 +1193,14 @@ class CephFSVolumeClient(object):
orig_mds_caps, orig_osd_caps, want_mds_cap, want_osd_cap,
unwanted_mds_cap, unwanted_osd_cap)
+ caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str)
caps = self._rados_command(
'auth caps',
{
'entity': client_entity,
- 'caps': [
- 'mds', mds_cap_str,
- 'osd', osd_cap_str,
- 'mon', cap['caps'].get('mon', 'allow r')]
+ 'caps': caps_list
})
+
caps = self._rados_command(
'auth get',
{
@@ -1306,8 +1325,8 @@ class CephFSVolumeClient(object):
)
def cap_remove(orig_mds_caps, orig_osd_caps, want_mds_caps, want_osd_caps):
- mds_cap_tokens = orig_mds_caps.split(",")
- osd_cap_tokens = orig_osd_caps.split(",")
+ mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")]
+ osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")]
for want_mds_cap, want_osd_cap in zip(want_mds_caps, want_osd_caps):
if want_mds_cap in mds_cap_tokens:
@@ -1323,17 +1342,15 @@ class CephFSVolumeClient(object):
mds_cap_str, osd_cap_str = cap_remove(orig_mds_caps, orig_osd_caps,
want_mds_caps, want_osd_caps)
- if not mds_cap_str:
+ caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str, authorize=False)
+ if not caps_list:
self._rados_command('auth del', {'entity': client_entity}, decode=False)
else:
self._rados_command(
'auth caps',
{
'entity': client_entity,
- 'caps': [
- 'mds', mds_cap_str,
- 'osd', osd_cap_str,
- 'mon', cap['caps'].get('mon', 'allow r')]
+ 'caps': caps_list
})
# FIXME: rados raising Error instead of ObjectNotFound in auth get failure
--
2.23.0

View File

@ -0,0 +1,52 @@
From 6410f3dd63890f251414377de93cd51bfc372230 Mon Sep 17 00:00:00 2001
From: Kotresh HR <khiremat@redhat.com>
Date: Sun, 6 Dec 2020 12:40:20 +0530
Subject: [PATCH 4/5] pybind/ceph_volume_client: Optionally authorize existing
auth-ids
Optionally allow authorizing auth-ids not created by ceph_volume_client
via the option 'allow_existing_id'. This can help existing deployers
of manila to disallow/allow authorization of pre-created auth IDs
via a manila driver config that sets 'allow_existing_id' to False/True.
Fixes: https://tracker.ceph.com/issues/48555
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit 77b42496e25cbd4af2e80a064ddf26221b53733f)
---
src/pybind/ceph_volume_client.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index ca1f361d03c..feeb495de00 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -993,7 +993,7 @@ class CephFSVolumeClient(object):
return caps_list
- def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None):
+ def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None, allow_existing_id=False):
"""
Get-or-create a Ceph auth identity for `auth_id` and grant them access
to
@@ -1003,6 +1003,8 @@ class CephFSVolumeClient(object):
:param tenant_id: Optionally provide a stringizable object to
restrict any created cephx IDs to other callers
passing the same tenant ID.
+ :allow_existing_id: Optionally authorize existing auth-ids not
+ created by ceph_volume_client
:return:
"""
@@ -1034,7 +1036,7 @@ class CephFSVolumeClient(object):
}
if auth_meta is None:
- if existing_caps is not None:
+ if not allow_existing_id and existing_caps is not None:
msg = "auth ID: {0} exists and not created by ceph_volume_client. Not allowed to modify".format(auth_id)
log.error(msg)
raise CephFSVolumeClientError(msg)
--
2.23.0

275
0005-CVE-2020-27781-5.patch Normal file
View File

@ -0,0 +1,275 @@
From a18b92d39f5d4714e9a79c3c4a55049daec65290 Mon Sep 17 00:00:00 2001
From: Kotresh HR <khiremat@redhat.com>
Date: Tue, 1 Dec 2020 16:14:17 +0530
Subject: [PATCH 5/5] tasks/cephfs/test_volume_client: Add tests for
authorize/deauthorize
1. Add testcase for authorizing auth_id which is not added by
ceph_volume_client
2. Add testcase to test 'allow_existing_id' option
3. Add testcase for deauthorizing auth_id which has got it's caps
updated out of band
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit aa4beb3d993649a696af95cf27150cc460baaf70)
Conflicts:
qa/tasks/cephfs/test_volume_client.py
---
qa/tasks/cephfs/test_volume_client.py | 213 +++++++++++++++++++++++++-
1 file changed, 209 insertions(+), 4 deletions(-)
diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py
index 0f205ecec6e..1c37b37a0b0 100644
--- a/qa/tasks/cephfs/test_volume_client.py
+++ b/qa/tasks/cephfs/test_volume_client.py
@@ -58,7 +58,7 @@ vc.disconnect()
def _configure_guest_auth(self, volumeclient_mount, guest_mount,
guest_entity, mount_path,
namespace_prefix=None, readonly=False,
- tenant_id=None):
+ tenant_id=None, allow_existing_id=False):
"""
Set up auth credentials for the guest client to mount a volume.
@@ -83,14 +83,16 @@ vc.disconnect()
key = self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
- tenant_id="{tenant_id}")
+ tenant_id="{tenant_id}",
+ allow_existing_id="{allow_existing_id}")
print(auth_result['auth_key'])
""".format(
group_id=group_id,
volume_id=volume_id,
guest_entity=guest_entity,
readonly=readonly,
- tenant_id=tenant_id)), volume_prefix, namespace_prefix
+ tenant_id=tenant_id,
+ allow_existing_id=allow_existing_id)), volume_prefix, namespace_prefix
)
# CephFSVolumeClient's authorize() does not return the secret
@@ -858,6 +860,209 @@ vc.disconnect()
)))
self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
+ def test_authorize_auth_id_not_created_by_ceph_volume_client(self):
+ """
+ If the auth_id already exists and is not created by
+ ceph_volume_client, it's not allowed to authorize
+ the auth-id by default.
+ """
+ volumeclient_mount = self.mounts[1]
+ volumeclient_mount.umount_wait()
+
+ # Configure volumeclient_mount as the handle for driving volumeclient.
+ self._configure_vc_auth(volumeclient_mount, "manila")
+
+ group_id = "groupid"
+ volume_id = "volumeid"
+
+ # Create auth_id
+ out = self.fs.mon_manager.raw_cluster_cmd(
+ "auth", "get-or-create", "client.guest1",
+ "mds", "allow *",
+ "osd", "allow rw",
+ "mon", "allow *"
+ )
+
+ auth_id = "guest1"
+ guestclient_1 = {
+ "auth_id": auth_id,
+ "tenant_id": "tenant1",
+ }
+
+ # Create a volume.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.create_volume(vp, 1024*1024*10)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ # Cannot authorize 'guestclient_1' to access the volume.
+ # It uses auth ID 'guest1', which already exists and not
+ # created by ceph_volume_client
+ with self.assertRaises(CommandFailedError):
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ auth_id=guestclient_1["auth_id"],
+ tenant_id=guestclient_1["tenant_id"]
+ )))
+
+ # Delete volume
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.delete_volume(vp)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ def test_authorize_allow_existing_id_option(self):
+ """
+ If the auth_id already exists and is not created by
+ ceph_volume_client, it's not allowed to authorize
+ the auth-id by default but is allowed with option
+ allow_existing_id.
+ """
+ volumeclient_mount = self.mounts[1]
+ volumeclient_mount.umount_wait()
+
+ # Configure volumeclient_mount as the handle for driving volumeclient.
+ self._configure_vc_auth(volumeclient_mount, "manila")
+
+ group_id = "groupid"
+ volume_id = "volumeid"
+
+ # Create auth_id
+ out = self.fs.mon_manager.raw_cluster_cmd(
+ "auth", "get-or-create", "client.guest1",
+ "mds", "allow *",
+ "osd", "allow rw",
+ "mon", "allow *"
+ )
+
+ auth_id = "guest1"
+ guestclient_1 = {
+ "auth_id": auth_id,
+ "tenant_id": "tenant1",
+ }
+
+ # Create a volume.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.create_volume(vp, 1024*1024*10)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ # Cannot authorize 'guestclient_1' to access the volume
+ # by default, which already exists and not created by
+ # ceph_volume_client but is allowed with option 'allow_existing_id'.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}",
+ allow_existing_id="{allow_existing_id}")
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ auth_id=guestclient_1["auth_id"],
+ tenant_id=guestclient_1["tenant_id"],
+ allow_existing_id=True
+ )))
+
+ # Delete volume
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.delete_volume(vp)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ def test_deauthorize_auth_id_after_out_of_band_update(self):
+ """
+ If the auth_id authorized by ceph_volume_client is updated
+ out of band, the auth_id should not be deleted after a
+ deauthorize. It should only remove caps associated it.
+ """
+ volumeclient_mount = self.mounts[1]
+ volumeclient_mount.umount_wait()
+
+ # Configure volumeclient_mount as the handle for driving volumeclient.
+ self._configure_vc_auth(volumeclient_mount, "manila")
+
+ group_id = "groupid"
+ volume_id = "volumeid"
+
+
+ auth_id = "guest1"
+ guestclient_1 = {
+ "auth_id": auth_id,
+ "tenant_id": "tenant1",
+ }
+
+ # Create a volume.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.create_volume(vp, 1024*1024*10)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ # Authorize 'guestclient_1' to access the volume.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ auth_id=guestclient_1["auth_id"],
+ tenant_id=guestclient_1["tenant_id"]
+ )))
+
+ # Update caps for guestclient_1 out of band
+ out = self.fs.mon_manager.raw_cluster_cmd(
+ "auth", "caps", "client.guest1",
+ "mds", "allow rw path=/volumes/groupid, allow rw path=/volumes/groupid/volumeid",
+ "osd", "allow rw pool=cephfs_data namespace=fsvolumens_volumeid",
+ "mon", "allow r",
+ "mgr", "allow *"
+ )
+
+ # Deauthorize guestclient_1
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.deauthorize(vp, "{guest_entity}")
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ guest_entity=guestclient_1["auth_id"]
+ )))
+
+ # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
+ # guestclient_1. The mgr and mds caps should be present which was updated out of band.
+ out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
+
+ self.assertEqual("client.guest1", out[0]["entity"])
+ self.assertEqual("allow rw path=/volumes/groupid", out[0]["caps"]["mds"])
+ self.assertEqual("allow *", out[0]["caps"]["mgr"])
+ self.assertNotIn("osd", out[0]["caps"])
+
+ # Delete volume
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.delete_volume(vp)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
def test_recover_metadata(self):
"""
That volume client can recover from partial auth updates using
@@ -1078,7 +1283,7 @@ vc.disconnect()
guest_mount.umount_wait()
# Set auth caps for the auth ID using the volumeclient
- self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path)
+ self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path, allow_existing_id=True)
# Mount the volume in the guest using the auth ID to assert that the
# auth caps are valid
--
2.23.0

View File

@ -110,7 +110,7 @@
################################################################################# #################################################################################
Name: ceph Name: ceph
Version: 14.2.15 Version: 14.2.15
Release: 3%{?dist} Release: 4%{?dist}
%if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler} %if 0%{?fedora} || 0%{?rhel} || 0%{?openEuler}
Epoch: 2 Epoch: 2
%endif %endif
@ -127,6 +127,15 @@ Group: System/Filesystems
URL: http://ceph.com/ URL: http://ceph.com/
Source0: %{?_remote_tarball_prefix}ceph-14.2.15.tar.gz Source0: %{?_remote_tarball_prefix}ceph-14.2.15.tar.gz
# backport
Patch0: 0001-cmake-detect-and-use-sigdescr_np-if-available.patch
# backport for cves
Patch1: 0001-CVE-2020-27781-1.patch
Patch2: 0002-CVE-2020-27781-2.patch
Patch3: 0003-CVE-2020-27781-3.patch
Patch4: 0004-CVE-2020-27781-4.patch
Patch5: 0005-CVE-2020-27781-5.patch
%if 0%{?suse_version} %if 0%{?suse_version}
# _insert_obs_source_lines_here # _insert_obs_source_lines_here
ExclusiveArch: x86_64 aarch64 ppc64le s390x ExclusiveArch: x86_64 aarch64 ppc64le s390x
@ -2033,6 +2042,9 @@ exit 0
%changelog %changelog
* Sun Jul 18 2021 chixinze <xmdxcxz@gmail.com> - 1:14.2.15-4
- fix CVE-2020-27781
* Thu Feb 18 2021 yanglongkang <yanglongkang@huawei.com> - 1:14.2.15-3 * Thu Feb 18 2021 yanglongkang <yanglongkang@huawei.com> - 1:14.2.15-3
- Temporary delete mgr-rook, mgr-ssh, mgr-dashboard and k8sevents - Temporary delete mgr-rook, mgr-ssh, mgr-dashboard and k8sevents