!113 backport patches from upstream

From: @zhang-mingyi66 
Reviewed-by: @nlgwcy 
Signed-off-by: @nlgwcy
This commit is contained in:
openeuler-ci-bot 2023-12-08 00:50:36 +00:00 committed by Gitee
commit 73c4d2973f
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
5 changed files with 362 additions and 1 deletions

View File

@ -0,0 +1,67 @@
From ad0783c4309a37690810a152a8902c3d12b086b5 Mon Sep 17 00:00:00 2001
From: Yuze Chi <chiyuze@google.com>
Date: Thu, 2 Jun 2022 22:51:56 -0700
Subject: [PATCH] libbpf: Fix is_pow_of_2
Move the correct definition from linker.c into libbpf_internal.h.
Fixes: 0087a681fa8c ("libbpf: Automatically fix up BPF_MAP_TYPE_RINGBUF size, if necessary")
Reported-by: Yuze Chi <chiyuze@google.com>
Signed-off-by: Yuze Chi <chiyuze@google.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220603055156.2830463-1-irogers@google.com
---
src/libbpf.c | 5 -----
src/libbpf_internal.h | 5 +++++
src/linker.c | 5 -----
3 files changed, 5 insertions(+), 10 deletions(-)
diff --git a/src/libbpf.c b/src/libbpf.c
index 5afe4cbd6..b03165687 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -5071,11 +5071,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
static void bpf_map__destroy(struct bpf_map *map);
-static bool is_pow_of_2(size_t x)
-{
- return x && (x & (x - 1));
-}
-
static size_t adjust_ringbuf_sz(size_t sz)
{
__u32 page_sz = sysconf(_SC_PAGE_SIZE);
diff --git a/src/libbpf_internal.h b/src/libbpf_internal.h
index 4abdbe2fe..ef5d97507 100644
--- a/src/libbpf_internal.h
+++ b/src/libbpf_internal.h
@@ -580,4 +580,9 @@ struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man,
const char *usdt_provider, const char *usdt_name,
__u64 usdt_cookie);
+static inline bool is_pow_of_2(size_t x)
+{
+ return x && (x & (x - 1)) == 0;
+}
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/src/linker.c b/src/linker.c
index 9aa016fb5..85c0fddf5 100644
--- a/src/linker.c
+++ b/src/linker.c
@@ -697,11 +697,6 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
return err;
}
-static bool is_pow_of_2(size_t x)
-{
- return x && (x & (x - 1)) == 0;
-}
-
static int linker_sanity_check_elf(struct src_obj *obj)
{
struct src_sec *sec;
--
2.33.0

View File

@ -0,0 +1,92 @@
From f117080307163d7057034341aa8ff6b201041599 Mon Sep 17 00:00:00 2001
From: Andrii Nakryiko <andrii@kernel.org>
Date: Mon, 10 Jul 2023 19:41:50 -0700
Subject: [PATCH] libbpf: Fix realloc API handling in zero-sized edge cases
realloc() and reallocarray() can either return NULL or a special
non-NULL pointer, if their size argument is zero. This requires a bit
more care to handle NULL-as-valid-result situation differently from
NULL-as-error case. This has caused real issues before ([0]), and just
recently bit again in production when performing bpf_program__attach_usdt().
This patch fixes 4 places that do or potentially could suffer from this
mishandling of NULL, including the reported USDT-related one.
There are many other places where realloc()/reallocarray() is used and
NULL is always treated as an error value, but all those have guarantees
that their size is always non-zero, so those spot don't need any extra
handling.
[0] d08ab82f59d5 ("libbpf: Fix double-free when linker processes empty sections")
Fixes: 999783c8bbda ("libbpf: Wire up spec management and other arch-independent USDT logic")
Fixes: b63b3c490eee ("libbpf: Add bpf_program__set_insns function")
Fixes: 697f104db8a6 ("libbpf: Support custom SEC() handlers")
Fixes: b12688267280 ("libbpf: Change the order of data and text relocations.")
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230711024150.1566433-1-andrii@kernel.org
---
src/libbpf.c | 15 ++++++++++++---
src/usdt.c | 5 ++++-
2 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/src/libbpf.c b/src/libbpf.c
index 78635feb1..63311a73c 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -6161,7 +6161,11 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra
if (main_prog == subprog)
return 0;
relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
- if (!relos)
+ /* if new count is zero, reallocarray can return a valid NULL result;
+ * in this case the previous pointer will be freed, so we *have to*
+ * reassign old pointer to the new value (even if it's NULL)
+ */
+ if (!relos && new_cnt)
return -ENOMEM;
if (subprog->nr_reloc)
memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
@@ -8532,7 +8536,8 @@ int bpf_program__set_insns(struct bpf_program *prog,
return -EBUSY;
insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
- if (!insns) {
+ /* NULL is a valid return from reallocarray if the new count is zero */
+ if (!insns && new_insn_cnt) {
pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
return -ENOMEM;
}
@@ -8841,7 +8846,11 @@ int libbpf_unregister_prog_handler(int handler_id)
/* try to shrink the array, but it's ok if we couldn't */
sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
- if (sec_defs)
+ /* if new count is zero, reallocarray can return a valid NULL result;
+ * in this case the previous pointer will be freed, so we *have to*
+ * reassign old pointer to the new value (even if it's NULL)
+ */
+ if (sec_defs || custom_sec_def_cnt == 0)
custom_sec_defs = sec_defs;
return 0;
diff --git a/src/usdt.c b/src/usdt.c
index f1a141555..37455d00b 100644
--- a/src/usdt.c
+++ b/src/usdt.c
@@ -852,8 +852,11 @@ static int bpf_link_usdt_detach(struct bpf_link *link)
* system is so exhausted on memory, it's the least of user's
* concerns, probably.
* So just do our best here to return those IDs to usdt_manager.
+ * Another edge case when we can legitimately get NULL is when
+ * new_cnt is zero, which can happen in some edge cases, so we
+ * need to be careful about that.
*/
- if (new_free_ids) {
+ if (new_free_ids || new_cnt == 0) {
memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
man->free_spec_ids = new_free_ids;
--
2.33.0

View File

@ -0,0 +1,37 @@
From 20699ecf61f21b761fcc369ddc85da5f5cf5a1cf Mon Sep 17 00:00:00 2001
From: Marco Vedovati <marco.vedovati@crowdstrike.com>
Date: Thu, 10 Aug 2023 14:43:53 -0700
Subject: [PATCH] libbpf: Set close-on-exec flag on gzopen
Enable the close-on-exec flag when using gzopen. This is especially important
for multithreaded programs making use of libbpf, where a fork + exec could
race with libbpf library calls, potentially resulting in a file descriptor
leaked to the new process. This got missed in 59842c5451fe ("libbpf: Ensure
libbpf always opens files with O_CLOEXEC").
Fixes: 59842c5451fe ("libbpf: Ensure libbpf always opens files with O_CLOEXEC")
Signed-off-by: Marco Vedovati <marco.vedovati@crowdstrike.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230810214350.106301-1-martin.kelly@crowdstrike.com
---
src/libbpf.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/libbpf.c b/src/libbpf.c
index 17883f5a4..b14a4376a 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -1978,9 +1978,9 @@ static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
return -ENAMETOOLONG;
/* gzopen also accepts uncompressed files. */
- file = gzopen(buf, "r");
+ file = gzopen(buf, "re");
if (!file)
- file = gzopen("/proc/config.gz", "r");
+ file = gzopen("/proc/config.gz", "re");
if (!file) {
pr_warn("failed to open system Kconfig\n");
--
2.33.0

View File

@ -0,0 +1,154 @@
From 610707057ac60311fde94b3a049de9d4826a3bf2 Mon Sep 17 00:00:00 2001
From: Andrii Nakryiko <andrii@kernel.org>
Date: Fri, 15 Jul 2022 16:09:51 -0700
Subject: [PATCH] libbpf: make RINGBUF map size adjustments more eagerly
Make libbpf adjust RINGBUF map size (rounding it up to closest power-of-2
of page_size) more eagerly: during open phase when initializing the map
and on explicit calls to bpf_map__set_max_entries().
Such approach allows user to check actual size of BPF ringbuf even
before it's created in the kernel, but also it prevents various edge
case scenarios where BPF ringbuf size can get out of sync with what it
would be in kernel. One of them (reported in [0]) is during an attempt
to pin/reuse BPF ringbuf.
Move adjust_ringbuf_sz() helper closer to its first actual use. The
implementation of the helper is unchanged.
Also make detection of whether bpf_object is already loaded more robust
by checking obj->loaded explicitly, given that map->fd can be < 0 even
if bpf_object is already loaded due to ability to disable map creation
with bpf_map__set_autocreate(map, false).
[0] Closes: https://github.com/libbpf/libbpf/pull/530
Fixes: 0087a681fa8c ("libbpf: Automatically fix up BPF_MAP_TYPE_RINGBUF size, if necessary")
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/r/20220715230952.2219271-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
src/libbpf.c | 77 ++++++++++++++++++++++++++++------------------------
1 file changed, 42 insertions(+), 35 deletions(-)
diff --git a/src/libbpf.c b/src/libbpf.c
index 9b5500659..b01fe01b0 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -2331,6 +2331,37 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
return 0;
}
+static size_t adjust_ringbuf_sz(size_t sz)
+{
+ __u32 page_sz = sysconf(_SC_PAGE_SIZE);
+ __u32 mul;
+
+ /* if user forgot to set any size, make sure they see error */
+ if (sz == 0)
+ return 0;
+ /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
+ * a power-of-2 multiple of kernel's page size. If user diligently
+ * satisified these conditions, pass the size through.
+ */
+ if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
+ return sz;
+
+ /* Otherwise find closest (page_sz * power_of_2) product bigger than
+ * user-set size to satisfy both user size request and kernel
+ * requirements and substitute correct max_entries for map creation.
+ */
+ for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
+ if (mul * page_sz > sz)
+ return mul * page_sz;
+ }
+
+ /* if it's impossible to satisfy the conditions (i.e., user size is
+ * very close to UINT_MAX but is not a power-of-2 multiple of
+ * page_size) then just return original size and let kernel reject it
+ */
+ return sz;
+}
+
static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
{
map->def.type = def->map_type;
@@ -2344,6 +2375,10 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
map->btf_key_type_id = def->key_type_id;
map->btf_value_type_id = def->value_type_id;
+ /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
+ if (map->def.type == BPF_MAP_TYPE_RINGBUF)
+ map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
+
if (def->parts & MAP_DEF_MAP_TYPE)
pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
@@ -4317,9 +4352,15 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
{
- if (map->fd >= 0)
+ if (map->obj->loaded)
return libbpf_err(-EBUSY);
+
map->def.max_entries = max_entries;
+
+ /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
+ if (map->def.type == BPF_MAP_TYPE_RINGBUF)
+ map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
+
return 0;
}
@@ -4875,37 +4916,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
static void bpf_map__destroy(struct bpf_map *map);
-static size_t adjust_ringbuf_sz(size_t sz)
-{
- __u32 page_sz = sysconf(_SC_PAGE_SIZE);
- __u32 mul;
-
- /* if user forgot to set any size, make sure they see error */
- if (sz == 0)
- return 0;
- /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
- * a power-of-2 multiple of kernel's page size. If user diligently
- * satisified these conditions, pass the size through.
- */
- if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
- return sz;
-
- /* Otherwise find closest (page_sz * power_of_2) product bigger than
- * user-set size to satisfy both user size request and kernel
- * requirements and substitute correct max_entries for map creation.
- */
- for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
- if (mul * page_sz > sz)
- return mul * page_sz;
- }
-
- /* if it's impossible to satisfy the conditions (i.e., user size is
- * very close to UINT_MAX but is not a power-of-2 multiple of
- * page_size) then just return original size and let kernel reject it
- */
- return sz;
-}
-
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
LIBBPF_OPTS(bpf_map_create_opts, create_attr);
@@ -4944,9 +4954,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
}
switch (def->type) {
- case BPF_MAP_TYPE_RINGBUF:
- map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
- /* fallthrough */
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
case BPF_MAP_TYPE_CGROUP_ARRAY:
case BPF_MAP_TYPE_STACK_TRACE:
--
2.33.0

View File

@ -4,7 +4,7 @@
Name: %{githubname}
Version: %{githubver}
Release: 10
Release: 11
Summary: Libbpf library
License: LGPLv2 or BSD
@ -33,6 +33,10 @@ Patch0017: backport-libbpf-Use-elf_getshdrnum-instead-of-e_shnum.patch
Patch0018: 0001-sync-bpf-helper-funcs-from-kernel.patch
Patch0019: backport-libbpf-Ensure-FD-3-during-bpf_map__reuse_fd.patch
Patch0020: backport-libbpf-Ensure-libbpf-always-opens-files-with-O_CLOEX.patch
Patch0021: backport-libbpf-Fix-realloc-API-handling-in-zero-sized-edge-cases.patch
Patch0022: backport-libbpf-Set-close-on-exec-flag-on-gzopen.patch
Patch0023: backport-libbpf-Fix-is_pow_of_2.patch
Patch0024: backport-libbpf-make-RINGBUF-map-size-adjustments-more-eagerly.patch
# This package supersedes libbpf from kernel-tools,
# which has default Epoch: 0. By having Epoch: 1
@ -85,6 +89,13 @@ developing applications that use %{name}
%{_libdir}/libbpf.a
%changelog
* Thu Dec 7 2023 zhangmingyi <zhangmingyi5@huawei.com> 2:0.8.1-11
- backport patches from upstream:
backport-libbpf-Fix-realloc-API-handling-in-zero-sized-edge-cases.patch
backport-libbpf-Set-close-on-exec-flag-on-gzopen.patch
backport-libbpf-Fix-is_pow_of_2.patch
backport-libbpf-make-RINGBUF-map-size-adjustments-more-eagerly.patch
* Mon Aug 14 2023 zhangmingyi <zhangmingyi5@huawei.com> 2:0.8.1-10
- backport patches from upstream:
backport-libbpf-Ensure-FD-3-during-bpf_map__reuse_fd.patch