Package init

This commit is contained in:
overweight 2019-09-30 10:34:07 -04:00
commit 89cf4ef42c
9 changed files with 2451 additions and 0 deletions

View File

@ -0,0 +1,13 @@
--- ceph-12.1.1.orig/src/rocksdb/util/murmurhash.cc 2017-04-27 01:13:46.000000000 +0100
+++ ceph-12.1.1.orig/src/rocksdb/util/murmurhash.cc 2017-07-25 11:37:28.910266684 +0100
@@ -113,8 +113,8 @@ unsigned int MurmurHash2 ( const void *
switch(len)
{
- case 3: h ^= data[2] << 16;
- case 2: h ^= data[1] << 8;
+ case 3: h ^= data[2] << 16; // fallthrough
+ case 2: h ^= data[1] << 8; // fallthrough
case 1: h ^= data[0];
h *= m;
};

View File

@ -0,0 +1,127 @@
From 2f0a7153460acc3f21462236f470ec3471fa2ee1 Mon Sep 17 00:00:00 2001
From: Boris Ranto <branto@redhat.com>
Date: Mon, 31 Jul 2017 19:50:23 +0200
Subject: [PATCH] cmake: Support ppc64
The ppc64 support requires a couple of changes:
- adding the ppc64 support to cmake
- changing optimized crc32 code to compile on ppc64le only
- moving ifdef condition before crc32_align to avoid defined but not
used warning
Signed-off-by: Boris Ranto <branto@redhat.com>
---
cmake/modules/SIMDExt.cmake | 15 ++++++++++++++-
src/CMakeLists.txt | 4 +++-
src/arch/ppc.c | 8 ++++----
src/common/crc32c_ppc.c | 6 +++---
4 files changed, 24 insertions(+), 9 deletions(-)
diff --git a/cmake/modules/SIMDExt.cmake b/cmake/modules/SIMDExt.cmake
index 5330835..c47667d 100644
--- a/cmake/modules/SIMDExt.cmake
+++ b/cmake/modules/SIMDExt.cmake
@@ -109,7 +109,20 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i386|i686|amd64|x86_64|AMD64")
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686|amd64|x86_64|AMD64")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(powerpc|ppc)64le")
set(HAVE_PPC64LE 1)
- message(STATUS " we are ppc64le")
+ message(STATUS " we are ppc64")
+ CHECK_C_COMPILER_FLAG("-maltivec" HAS_ALTIVEC)
+ if(HAS_ALTIVEC)
+ message(STATUS " HAS_ALTIVEC yes")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maltivec")
+ set(CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -maltivec")
+ endif()
+ CHECK_C_COMPILER_FLAG("-mcpu=power8" HAVE_POWER8)
+ if(HAVE_POWER8)
+ message(STATUS " HAVE_POWER8 yes")
+ endif()
+elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(power|ppc)64")
+ set(HAVE_PPC64 1)
+ message(STATUS " we are ppc64")
CHECK_C_COMPILER_FLAG("-maltivec" HAS_ALTIVEC)
if(HAS_ALTIVEC)
message(STATUS " HAS_ALTIVEC yes")
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 66f0c14..38d1913 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -568,7 +568,9 @@ if(HAVE_INTEL)
endif(HAVE_GOOD_YASM_ELF64)
elseif(HAVE_POWER8)
list(APPEND libcommon_files
- common/crc32c_ppc.c
+ common/crc32c_ppc.c)
+elseif(HAVE_PPC64LE)
+ list(APPEND libcommon_files
common/crc32c_ppc_asm.S
common/crc32c_ppc_fast_zero_asm.S)
endif(HAVE_INTEL)
diff --git a/src/arch/ppc.c b/src/arch/ppc.c
index f21e2fe..11d3a49 100644
--- a/src/arch/ppc.c
+++ b/src/arch/ppc.c
@@ -14,10 +14,10 @@ int ceph_arch_ppc_crc32 = 0;
#include <stdio.h>
-#if __linux__ && __powerpc64__
+#ifdef HAVE_PPC64LE
#include <sys/auxv.h>
#include <asm/cputable.h>
-#endif /* __linux__ && __powerpc64__ */
+#endif /* HAVE_PPC64LE */
#ifndef PPC_FEATURE2_VEC_CRYPTO
#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
@@ -31,9 +31,9 @@ int ceph_arch_ppc_probe(void)
{
ceph_arch_ppc_crc32 = 0;
-#if __linux__ && __powerpc64__
+#ifdef HAVE_PPC64LE
if (getauxval(AT_HWCAP2) & PPC_FEATURE2_VEC_CRYPTO) ceph_arch_ppc_crc32 = 1;
-#endif /* __linux__ && __powerpc64__ */
+#endif /* HAVE_PPC64LE */
return 0;
}
diff --git a/src/common/crc32c_ppc.c b/src/common/crc32c_ppc.c
index 43756e2..52fd1c4 100644
--- a/src/common/crc32c_ppc.c
+++ b/src/common/crc32c_ppc.c
@@ -20,6 +20,7 @@
#define VMX_ALIGN 16
#define VMX_ALIGN_MASK (VMX_ALIGN-1)
+#ifdef HAVE_PPC64LE
#ifdef REFLECT
static unsigned int crc32_align(unsigned int crc, unsigned char const *p,
unsigned long len)
@@ -38,7 +39,6 @@ static unsigned int crc32_align(unsigned int crc, unsigned char const *p,
}
#endif
-#ifdef HAVE_POWER8
static inline unsigned long polynomial_multiply(unsigned int a, unsigned int b) {
vector unsigned int va = {a, 0, 0, 0};
vector unsigned int vb = {b, 0, 0, 0};
@@ -134,7 +134,7 @@ uint32_t ceph_crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len)
return crc;
}
-#else /* HAVE_POWER8 */
+#else /* HAVE_PPC64LE */
/* This symbol has to exist on non-ppc architectures (and on legacy
* ppc systems using power7 or below) in order to compile properly
@@ -145,4 +145,4 @@ uint32_t ceph_crc32c_ppc(uint32_t crc, unsigned char const *data, unsigned len)
return 0;
}
-#endif /* HAVE_POWER8 */
+#endif /* HAVE_PPC64LE */
--
2.9.4

View File

@ -0,0 +1,31 @@
From 74a754690736f6608b0d4d9c807df0bd777a129d Mon Sep 17 00:00:00 2001
From: Boris Ranto <branto@redhat.com>
Date: Fri, 8 Dec 2017 00:21:38 +0100
Subject: [PATCH] librbd: Conditionally import TrimRequest.cc
We include TrimRequest.cc in librbd tests at two places:
- operation/test_mock_TrimRequest.cc
- operation/test_mock_ResizeRequest.cc
That causes linking errors when doing the builds because some of the
structures are defined twice.
Signed-off-by: Boris Ranto <branto@redhat.com>
---
src/librbd/operation/TrimRequest.cc | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/librbd/operation/TrimRequest.cc b/src/librbd/operation/TrimRequest.cc
index 28f2deb..929ca51 100644
--- a/src/librbd/operation/TrimRequest.cc
+++ b/src/librbd/operation/TrimRequest.cc
@@ -362,4 +362,6 @@ void TrimRequest<I>::send_finish(int r) {
} // namespace operation
} // namespace librbd
+#ifndef TEST_F
template class librbd::operation::TrimRequest<librbd::ImageCtx>;
+#endif
--
2.9.5

View File

@ -0,0 +1,155 @@
--- ceph-12.2.3/src/rocksdb/table/block.h.orig 2018-02-22 07:49:38.044899631 -0500
+++ ceph-12.2.3/src/rocksdb/table/block.h 2018-02-22 07:58:55.855899631 -0500
@@ -65,7 +65,8 @@
// Create bitmap and set all the bits to 0
bitmap_ = new std::atomic<uint32_t>[bitmap_size];
- memset(bitmap_, 0, bitmap_size * kBytesPersEntry);
+ // memset(bitmap_, 0, bitmap_size * kBytesPersEntry);
+ { unsigned i = 0; for (; i < bitmap_size;) bitmap_[i++] = 0; }
RecordTick(GetStatistics(), READ_AMP_TOTAL_READ_BYTES,
num_bits_needed << bytes_per_bit_pow_);
--- ceph-12.2.3/src/rocksdb/db/c.cc.orig 2018-02-22 08:14:56.033899631 -0500
+++ ceph-12.2.3/src/rocksdb/db/c.cc 2018-02-22 10:06:39.759899631 -0500
@@ -1322,11 +1322,6 @@
b->rep.PutLogData(Slice(blob, len));
}
-void rocksdb_writebatch_iterate(
- rocksdb_writebatch_t* b,
- void* state,
- void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
- void (*deleted)(void*, const char* k, size_t klen)) {
class H : public WriteBatch::Handler {
public:
void* state_;
@@ -1339,6 +1334,12 @@
(*deleted_)(state_, key.data(), key.size());
}
};
+
+void rocksdb_writebatch_iterate(
+ rocksdb_writebatch_t* b,
+ void* state,
+ void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
+ void (*deleted)(void*, const char* k, size_t klen)) {
H handler;
handler.state_ = state;
handler.put_ = put;
@@ -1579,18 +1580,6 @@
void* state,
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
void (*deleted)(void*, const char* k, size_t klen)) {
- class H : public WriteBatch::Handler {
- public:
- void* state_;
- void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen);
- void (*deleted_)(void*, const char* k, size_t klen);
- virtual void Put(const Slice& key, const Slice& value) override {
- (*put_)(state_, key.data(), key.size(), value.data(), value.size());
- }
- virtual void Delete(const Slice& key) override {
- (*deleted_)(state_, key.data(), key.size());
- }
- };
H handler;
handler.state_ = state;
handler.put_ = put;
@@ -2532,13 +2521,9 @@
delete filter;
}
-rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom_format(int bits_per_key, bool original_format) {
- // Make a rocksdb_filterpolicy_t, but override all of its methods so
- // they delegate to a NewBloomFilterPolicy() instead of user
- // supplied C functions.
- struct Wrapper : public rocksdb_filterpolicy_t {
+ struct WrapperFP : public rocksdb_filterpolicy_t {
const FilterPolicy* rep_;
- ~Wrapper() { delete rep_; }
+ ~WrapperFP() { delete rep_; }
const char* Name() const override { return rep_->Name(); }
void CreateFilter(const Slice* keys, int n,
std::string* dst) const override {
@@ -2549,11 +2534,16 @@
}
static void DoNothing(void*) { }
};
- Wrapper* wrapper = new Wrapper;
+
+rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom_format(int bits_per_key, bool original_format) {
+ // Make a rocksdb_filterpolicy_t, but override all of its methods so
+ // they delegate to a NewBloomFilterPolicy() instead of user
+ // supplied C functions.
+ WrapperFP* wrapper = new WrapperFP;
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key, original_format);
wrapper->state_ = nullptr;
wrapper->delete_filter_ = nullptr;
- wrapper->destructor_ = &Wrapper::DoNothing;
+ wrapper->destructor_ = &WrapperFP::DoNothing;
return wrapper;
}
@@ -2889,10 +2879,9 @@
delete st;
}
-rocksdb_slicetransform_t* rocksdb_slicetransform_create_fixed_prefix(size_t prefixLen) {
- struct Wrapper : public rocksdb_slicetransform_t {
+ struct WrapperST : public rocksdb_slicetransform_t {
const SliceTransform* rep_;
- ~Wrapper() { delete rep_; }
+ ~WrapperST() { delete rep_; }
const char* Name() const override { return rep_->Name(); }
Slice Transform(const Slice& src) const override {
return rep_->Transform(src);
@@ -2903,31 +2892,20 @@
bool InRange(const Slice& src) const override { return rep_->InRange(src); }
static void DoNothing(void*) { }
};
- Wrapper* wrapper = new Wrapper;
+
+rocksdb_slicetransform_t* rocksdb_slicetransform_create_fixed_prefix(size_t prefixLen) {
+ WrapperST* wrapper = new WrapperST;
wrapper->rep_ = rocksdb::NewFixedPrefixTransform(prefixLen);
wrapper->state_ = nullptr;
- wrapper->destructor_ = &Wrapper::DoNothing;
+ wrapper->destructor_ = &WrapperST::DoNothing;
return wrapper;
}
rocksdb_slicetransform_t* rocksdb_slicetransform_create_noop() {
- struct Wrapper : public rocksdb_slicetransform_t {
- const SliceTransform* rep_;
- ~Wrapper() { delete rep_; }
- const char* Name() const override { return rep_->Name(); }
- Slice Transform(const Slice& src) const override {
- return rep_->Transform(src);
- }
- bool InDomain(const Slice& src) const override {
- return rep_->InDomain(src);
- }
- bool InRange(const Slice& src) const override { return rep_->InRange(src); }
- static void DoNothing(void*) { }
- };
- Wrapper* wrapper = new Wrapper;
+ WrapperST* wrapper = new WrapperST;
wrapper->rep_ = rocksdb::NewNoopTransform();
wrapper->state_ = nullptr;
- wrapper->destructor_ = &Wrapper::DoNothing;
+ wrapper->destructor_ = &WrapperST::DoNothing;
return wrapper;
}
--- ceph-12.2.3/src/rocksdb/memtable/inlineskiplist.h.orig 2018-02-22 10:34:06.918899631 -0500
+++ ceph-12.2.3/src/rocksdb/memtable/inlineskiplist.h 2018-02-22 10:34:44.145899631 -0500
@@ -279,7 +279,7 @@
// next_[0]. This is used for passing data from AllocateKey to Insert.
void StashHeight(const int height) {
assert(sizeof(int) <= sizeof(next_[0]));
- memcpy(&next_[0], &height, sizeof(int));
+ memcpy(static_cast<void*>(&next_[0]), &height, sizeof(int));
}
// Retrieves the value passed to StashHeight. Undefined after a call

26
6000-CVE-2018-16889.patch Normal file
View File

@ -0,0 +1,26 @@
--- a/src/rgw/rgw_rest_s3.cc 2018-08-31 01:24:39.000000000 +0800
+++ b/src/rgw/rgw_rest_s3.cc 2019-04-04 17:22:37.615000000 +0800
@@ -3751,8 +3751,9 @@ AWSGeneralAbstractor::get_auth_data_v4(c
boost::optional<std::string> canonical_headers = \
get_v4_canonical_headers(s->info, signed_hdrs, using_qs);
if (canonical_headers) {
- ldout(s->cct, 10) << "canonical headers format = " << *canonical_headers
- << dendl;
+ using sanitize = rgw::crypt_sanitize::log_content;
+ ldout(s->cct, 10) << "canonical headers format = "
+ << sanitize{*canonical_headers} << dendl;
} else {
throw -EPERM;
}
--- a/src/rgw/rgw_auth_s3.cc 2018-08-31 01:24:39.000000000 +0800
+++ b/src/rgw/rgw_auth_s3.cc 2019-04-04 17:25:27.208000000 +0800
@@ -659,7 +659,8 @@ get_v4_canon_req_hash(CephContext* cct,
const auto canonical_req_hash = calc_hash_sha256(canonical_req);
- ldout(cct, 10) << "canonical request = " << canonical_req << dendl;
+ using sanitize = rgw::crypt_sanitize::log_content;
+ ldout(cct, 10) << "canonical request = " << sanitize{canonical_req} << dendl;
ldout(cct, 10) << "canonical request hash = "
<< buf_to_hex(canonical_req_hash).data() << dendl;

172
6001-CVE-2018-16846-1.patch Normal file
View File

@ -0,0 +1,172 @@
From 4337e6a7d9f92c8549ebee20d0dd67a01e49857f Mon Sep 17 00:00:00 2001
From: "Robin H. Johnson" <rjohnson@digitalocean.com>
Date: Fri, 21 Sep 2018 14:49:34 -0700
Subject: [PATCH] rgw: enforce bounds on max-keys/max-uploads/max-parts
RGW S3 listing operations provided a way for authenticated users to
cause a denial of service against OMAPs holding bucket indices.
Bound the min & max values that a user could pass into the max-X
parameters, to keep the system safe. The default of 1000 is chosen to
match AWS S3 behavior.
Affected operations:
- ListBucket, via max-keys
- ListBucketVersions, via max-keys
- ListBucketMultiPartUploads, via max-uploads
- ListMultipartUploadParts, via max-parts
The Swift bucket listing codepath already enforced a limit, so is
unaffected by this issue.
Prior to this commit, the effective limit is the lower of
osd_max_omap_entries_per_request or osd_max_omap_bytes_per_request.
Backport: luminous, mimic
Fixes: http://tracker.ceph.com/issues/35994
Signed-off-by: Robin H. Johnson <rjohnson@digitalocean.com>
(cherry picked from commit d79f68a1e31f4bc917eec1b6bbc8e8446377dc6b)
Conflicts:
src/common/options.cc:
Conflicts due to options from master
---
src/common/options.cc | 11 +++++++++++
src/rgw/rgw_op.cc | 21 +++++----------------
src/rgw/rgw_op.h | 25 +++++++++++++++++++++++++
src/rgw/rgw_rest.cc | 11 +++++------
src/rgw/rgw_rest_swift.cc | 2 ++
5 files changed, 48 insertions(+), 22 deletions(-)
diff --git a/src/common/options.cc b/src/common/options.cc
index c1a0e7b05ea0..5b62a3f7c3d6 100644
--- a/src/common/options.cc
+++ b/src/common/options.cc
@@ -5705,6 +5705,17 @@ std::vector<Option> get_rgw_options() {
"of RGW instances under heavy use. If you would like "
"to turn off cache expiry, set this value to zero."),
+ Option("rgw_max_listing_results", Option::TYPE_UINT,
+ Option::LEVEL_ADVANCED)
+ .set_default(1000)
+ .set_min_max(1, 100000)
+ .add_service("rgw")
+ .set_description("Upper bound on results in listing operations, ListBucket max-keys"),
+ .set_long_description("This caps the maximum permitted value for listing-like operations in RGW S3. "
+ "Affects ListBucket(max-keys), "
+ "ListBucketVersions(max-keys), "
+ "ListBucketMultiPartUploads(max-uploads), "
+ "ListMultipartUploadParts(max-parts)"),
});
}
diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc
index 6e7daadcd228..c17d04988169 100644
--- a/src/rgw/rgw_op.cc
+++ b/src/rgw/rgw_op.cc
@@ -2279,22 +2279,11 @@ int RGWListBucket::verify_permission()
int RGWListBucket::parse_max_keys()
{
- if (!max_keys.empty()) {
- char *endptr;
- max = strtol(max_keys.c_str(), &endptr, 10);
- if (endptr) {
- if (endptr == max_keys.c_str()) return -EINVAL;
- while (*endptr && isspace(*endptr)) // ignore white space
- endptr++;
- if (*endptr) {
- return -EINVAL;
- }
- }
- } else {
- max = default_max;
- }
-
- return 0;
+ // Bound max value of max-keys to configured value for security
+ // Bound min value of max-keys to '0'
+ // Some S3 clients explicitly send max-keys=0 to detect if the bucket is
+ // empty without listing any items.
+ op_ret = parse_value_and_bound(max_keys, &max, 0, g_conf()->rgw_max_listing_results, default_max);
}
void RGWListBucket::pre_exec()
diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h
index e4d8cd4a980b..521a3d179d76 100644
--- a/src/rgw/rgw_op.h
+++ b/src/rgw/rgw_op.h
@@ -2214,6 +2214,31 @@ class RGWGetClusterStat : public RGWOp {
virtual const string name() { return "get_cluster_stat"; }
};
+static inline int parse_value_and_bound(const string &input, long *output, const long lower_bound, const long upper_bound, const long default_val)
+{
+ if (!input.empty()) {
+ char *endptr;
+ *output = strtol(input.c_str(), &endptr, 10);
+ if (endptr) {
+ if (endptr == input.c_str()) return -EINVAL;
+ while (*endptr && isspace(*endptr)) // ignore white space
+ endptr++;
+ if (*endptr) {
+ return -EINVAL;
+ }
+ }
+ if(*output > upper_bound) {
+ *output = upper_bound;
+ }
+ if(*output < lower_bound) {
+ *output = lower_bound;
+ }
+ } else {
+ *output = default_val;
+ }
+
+ return 0;
+}
#endif /* CEPH_RGW_OP_H */
diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc
index 80a886ec5d11..539cebeb6981 100644
--- a/src/rgw/rgw_rest.cc
+++ b/src/rgw/rgw_rest.cc
@@ -1659,8 +1659,7 @@ int RGWListMultipart_ObjStore::get_params()
}
string str = s->info.args.get("max-parts");
- if (!str.empty())
- max_parts = atoi(str.c_str());
+ op_ret = parse_value_and_bound(str, &max_parts, 0, g_conf()->rgw_max_listing_results, max_parts);
return op_ret;
}
@@ -1670,10 +1669,10 @@ int RGWListBucketMultiparts_ObjStore::get_params()
delimiter = s->info.args.get("delimiter");
prefix = s->info.args.get("prefix");
string str = s->info.args.get("max-uploads");
- if (!str.empty())
- max_uploads = atoi(str.c_str());
- else
- max_uploads = default_max;
+ op_ret = parse_value_and_bound(str, &max_uploads, 0, g_conf()->rgw_max_listing_results, default_max);
+ if (op_ret < 0) {
+ return op_ret;
+ }
string key_marker = s->info.args.get("key-marker");
string upload_id_marker = s->info.args.get("upload-id-marker");
diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc
index c9d96d9631bf..35e192c150ed 100644
--- a/src/rgw/rgw_rest_swift.cc
+++ b/src/rgw/rgw_rest_swift.cc
@@ -303,6 +303,8 @@ int RGWListBucket_ObjStore_SWIFT::get_params()
if (op_ret < 0) {
return op_ret;
}
+ // S3 behavior is to silently cap the max-keys.
+ // Swift behavior is to abort.
if (max > default_max)
return -ERR_PRECONDITION_FAILED;

121
6002-CVE-2018-16846-2.patch Normal file
View File

@ -0,0 +1,121 @@
From ab29bed2fc9f961fe895de1086a8208e21ddaddc Mon Sep 17 00:00:00 2001
From: Joao Eduardo Luis <joao@suse.de>
Date: Thu, 29 Nov 2018 01:05:31 +0000
Subject: [PATCH] rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <joao@suse.de>
Signed-off-by: Abhishek Lekshmanan <abhishek@suse.com>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
---
src/common/options.cc | 2 +-
src/rgw/rgw_op.cc | 4 +++-
src/rgw/rgw_op.h | 19 ++++++++++++-------
src/rgw/rgw_rest.cc | 8 ++++++--
4 files changed, 22 insertions(+), 11 deletions(-)
diff --git a/src/common/options.cc b/src/common/options.cc
index a543256d8ad3..d906d1d04e10 100644
--- a/src/common/options.cc
+++ b/src/common/options.cc
@@ -6238,7 +6238,7 @@ std::vector<Option> get_rgw_options() {
.set_default(1000)
.set_min_max(1, 100000)
.add_service("rgw")
- .set_description("Upper bound on results in listing operations, ListBucket max-keys"),
+ .set_description("Upper bound on results in listing operations, ListBucket max-keys")
.set_long_description("This caps the maximum permitted value for listing-like operations in RGW S3. "
"Affects ListBucket(max-keys), "
"ListBucketVersions(max-keys), "
diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc
index 509592943c67..1863d7933375 100644
--- a/src/rgw/rgw_op.cc
+++ b/src/rgw/rgw_op.cc
@@ -2383,7 +2383,9 @@ int RGWListBucket::parse_max_keys()
// Bound min value of max-keys to '0'
// Some S3 clients explicitly send max-keys=0 to detect if the bucket is
// empty without listing any items.
- op_ret = parse_value_and_bound(max_keys, &max, 0, g_conf()->rgw_max_listing_results, default_max);
+ return parse_value_and_bound(max_keys, max, 0,
+ s->cct->_conf->get_val<uint64_t>("rgw_max_listing_results"),
+ default_max);
}
void RGWListBucket::pre_exec()
diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h
index 57352ae8c142..21bc8c7a6fb9 100644
--- a/src/rgw/rgw_op.h
+++ b/src/rgw/rgw_op.h
@@ -2235,11 +2235,16 @@ class RGWGetClusterStat : public RGWOp {
virtual const string name() { return "get_cluster_stat"; }
};
-static inline int parse_value_and_bound(const string &input, long *output, const long lower_bound, const long upper_bound, const long default_val)
+static inline int parse_value_and_bound(
+ const string &input,
+ int &output,
+ const long lower_bound,
+ const long upper_bound,
+ const long default_val)
{
if (!input.empty()) {
char *endptr;
- *output = strtol(input.c_str(), &endptr, 10);
+ output = strtol(input.c_str(), &endptr, 10);
if (endptr) {
if (endptr == input.c_str()) return -EINVAL;
while (*endptr && isspace(*endptr)) // ignore white space
@@ -2248,14 +2253,14 @@ static inline int parse_value_and_bound(const string &input, long *output, const
return -EINVAL;
}
}
- if(*output > upper_bound) {
- *output = upper_bound;
+ if(output > upper_bound) {
+ output = upper_bound;
}
- if(*output < lower_bound) {
- *output = lower_bound;
+ if(output < lower_bound) {
+ output = lower_bound;
}
} else {
- *output = default_val;
+ output = default_val;
}
return 0;
diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc
index c87192d5674b..fdb1a713efe0 100644
--- a/src/rgw/rgw_rest.cc
+++ b/src/rgw/rgw_rest.cc
@@ -1588,7 +1588,9 @@ int RGWListMultipart_ObjStore::get_params()
}
string str = s->info.args.get("max-parts");
- op_ret = parse_value_and_bound(str, &max_parts, 0, g_conf()->rgw_max_listing_results, max_parts);
+ op_ret = parse_value_and_bound(str, max_parts, 0,
+ g_conf->get_val<uint64_t>("rgw_max_listing_results"),
+ max_parts);
return op_ret;
}
@@ -1598,7 +1600,9 @@ int RGWListBucketMultiparts_ObjStore::get_params()
delimiter = s->info.args.get("delimiter");
prefix = s->info.args.get("prefix");
string str = s->info.args.get("max-uploads");
- op_ret = parse_value_and_bound(str, &max_uploads, 0, g_conf()->rgw_max_listing_results, default_max);
+ op_ret = parse_value_and_bound(str, max_uploads, 0,
+ g_conf->get_val<uint64_t>("rgw_max_listing_results"),
+ default_max);
if (op_ret < 0) {
return op_ret;
}

BIN
ceph-12.2.8.tar.gz Normal file

Binary file not shown.

1806
ceph.spec Normal file

File diff suppressed because it is too large Load Diff