Compare commits

..

No commits in common. "bf0e5f163becc1eee99fc776f999de7d9954587c" and "cd6666c66a863296b8e3b955db72b9aa21facdcb" have entirely different histories.

12 changed files with 1 additions and 933 deletions

View File

@ -1,72 +0,0 @@
From 93391a6f90774d4e6ee3d21972ef937eb183067c Mon Sep 17 00:00:00 2001
From: Super User <root@localhost.localdomain>
Date: Fri, 14 Mar 2025 17:31:35 +0800
Subject: [PATCH] add sw_64 support
---
src/bpf_tracing.h | 35 +++++++++++++++++++++++++++++++++++
1 file changed, 35 insertions(+)
diff --git a/src/bpf_tracing.h b/src/bpf_tracing.h
index 6fb3d0f..ba30c3f 100644
--- a/src/bpf_tracing.h
+++ b/src/bpf_tracing.h
@@ -35,6 +35,9 @@
#elif defined(__TARGET_ARCH_loongarch)
#define bpf_target_loongarch
#define bpf_target_defined
+#elif defined(__TARGET_ARCH_sw_64)
+ #define bpf_target_sw64
+ #define bpf_target_defined
#else
/* Fall back to what the compiler says */
@@ -68,6 +71,9 @@
#elif defined(__loongarch__)
#define bpf_target_loongarch
#define bpf_target_defined
+#elif defined(__sw_64__)
+ #define bpf_target_sw64
+ #define bpf_target_defined
#endif /* no compiler target */
#endif
@@ -442,6 +448,35 @@ struct pt_regs___arm64 {
#define __PT_SP_REG regs[3]
#define __PT_IP_REG csr_era
+#elif defined(bpf_target_sw64)
+
+/* sw64 provides struct user_pt_regs instead of struct pt_regs to userspace */
+struct pt_regs;
+#define PT_REGS_SW64 const volatile struct user_pt_regs
+#define PT_REGS_PARM1(x) (((PT_REGS_SW64 *)(x))->regs[16])
+#define PT_REGS_PARM2(x) (((PT_REGS_SW64 *)(x))->regs[17])
+#define PT_REGS_PARM3(x) (((PT_REGS_SW64 *)(x))->regs[18])
+#define PT_REGS_PARM4(x) (((PT_REGS_SW64 *)(x))->regs[19])
+#define PT_REGS_PARM5(x) (((PT_REGS_SW64 *)(x))->regs[20])
+#define PT_REGS_RET(x) (((PT_REGS_SW64 *)(x))->regs[26])
+/* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_FP(x) (((PT_REGS_SW64 *)(x))->regs[15])
+#define PT_REGS_RC(x) (((PT_REGS_SW64 *)(x))->regs[0])
+#define PT_REGS_SP(x) (((PT_REGS_SW64 *)(x))->regs[30])
+#define PT_REGS_IP(x) (((PT_REGS_SW64 *)(x))->pc)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), regs[16])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), regs[17])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), regs[18])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), regs[19])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), regs[20])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), regs[26])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), regs[15])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), regs[0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), regs[30])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_SW64 *)(x), pc)
+
+
#endif
#if defined(bpf_target_defined)
--
2.43.0

View File

@ -1,53 +0,0 @@
From 1867490d8fc635c552569d51c48debff588d2191 Mon Sep 17 00:00:00 2001
From: Andreas Ziegler <ziegler.andreas@siemens.com>
Date: Wed, 3 Jul 2024 10:34:36 +0200
Subject: [PATCH] libbpf: Add NULL checks to bpf_object__{prev_map,next_map}
In the current state, an erroneous call to
bpf_object__find_map_by_name(NULL, ...) leads to a segmentation
fault through the following call chain:
bpf_object__find_map_by_name(obj = NULL, ...)
-> bpf_object__for_each_map(pos, obj = NULL)
-> bpf_object__next_map((obj = NULL), NULL)
-> return (obj = NULL)->maps
While calling bpf_object__find_map_by_name with obj = NULL is
obviously incorrect, this should not lead to a segmentation
fault but rather be handled gracefully.
As __bpf_map__iter already handles this situation correctly, we
can delegate the check for the regular case there and only add
a check in case the prev or next parameter is NULL.
Signed-off-by: Andreas Ziegler <ziegler.andreas@siemens.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20240703083436.505124-1-ziegler.andreas@siemens.com
---
src/libbpf.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/libbpf.c b/src/libbpf.c
index 4a28fac49..30f121754 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -10375,7 +10375,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
- if (prev == NULL)
+ if (prev == NULL && obj != NULL)
return obj->maps;
return __bpf_map__iter(prev, obj, 1);
@@ -10384,7 +10384,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
struct bpf_map *
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
{
- if (next == NULL) {
+ if (next == NULL && obj != NULL) {
if (!obj->nr_maps)
return NULL;
return obj->maps + obj->nr_maps - 1;
--
2.33.0

View File

@ -1,49 +0,0 @@
From 89ca11a79bb93824e82897bdb48727b5d75e469a Mon Sep 17 00:00:00 2001
From: Andrey Grafin <conquistador@yandex-team.ru>
Date: Wed, 17 Jan 2024 16:06:18 +0300
Subject: [PATCH] libbpf: Apply map_set_def_max_entries() for inner_maps on
creation
This patch allows to auto create BPF_MAP_TYPE_ARRAY_OF_MAPS and
BPF_MAP_TYPE_HASH_OF_MAPS with values of BPF_MAP_TYPE_PERF_EVENT_ARRAY
by bpf_object__load().
Previous behaviour created a zero filled btf_map_def for inner maps and
tried to use it for a map creation but the linux kernel forbids to create
a BPF_MAP_TYPE_PERF_EVENT_ARRAY map with max_entries=0.
Fixes: 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map support")
Signed-off-by: Andrey Grafin <conquistador@yandex-team.ru>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Acked-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20240117130619.9403-1-conquistador@yandex-team.ru
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
src/libbpf.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/libbpf.c b/src/libbpf.c
index afd09571c..b8b00da62 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -70,6 +70,7 @@
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
+static int map_set_def_max_entries(struct bpf_map *map);
static const char * const attach_type_name[] = {
[BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
@@ -5172,6 +5173,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
if (bpf_map_type__is_map_in_map(def->type)) {
if (map->inner_map) {
+ err = map_set_def_max_entries(map->inner_map);
+ if (err)
+ return err;
err = bpf_object__create_map(obj, map->inner_map, true);
if (err) {
pr_warn("map '%s': failed to create inner map: %d\n",
--
2.33.0

View File

@ -1,56 +0,0 @@
From 3827aa514cba7db16b81236712a46e8b70260fcd Mon Sep 17 00:00:00 2001
From: "Jose E. Marchesi" <jose.marchesi@oracle.com>
Date: Wed, 8 May 2024 12:13:13 +0200
Subject: [PATCH] bpf: Avoid uninitialized value in BPF_CORE_READ_BITFIELD
[Changes from V1:
- Use a default branch in the switch statement to initialize `val'.]
GCC warns that `val' may be used uninitialized in the
BPF_CRE_READ_BITFIELD macro, defined in bpf_core_read.h as:
[...]
unsigned long long val; \
[...] \
switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
case 1: val = *(const unsigned char *)p; break; \
case 2: val = *(const unsigned short *)p; break; \
case 4: val = *(const unsigned int *)p; break; \
case 8: val = *(const unsigned long long *)p; break; \
} \
[...]
val; \
} \
This patch adds a default entry in the switch statement that sets
`val' to zero in order to avoid the warning, and random values to be
used in case __builtin_preserve_field_info returns unexpected values
for BPF_FIELD_BYTE_SIZE.
Tested in bpf-next master.
No regressions.
Signed-off-by: Jose E. Marchesi <jose.marchesi@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240508101313.16662-1-jose.marchesi@oracle.com
Conflict: NA
Reference:https://github.com/libbpf/libbpf/commit/3827aa514cba7db16b81236712a46e8b70260fcd
---
src/bpf_core_read.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/bpf_core_read.h b/src/bpf_core_read.h
index b5c7ce5c2..c0e13cdf9 100644
--- a/src/bpf_core_read.h
+++ b/src/bpf_core_read.h
@@ -104,6 +104,7 @@ enum bpf_enum_value_kind {
case 2: val = *(const unsigned short *)p; break; \
case 4: val = *(const unsigned int *)p; break; \
case 8: val = *(const unsigned long long *)p; break; \
+ default: val = 0; break; \
} \
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
if (__CORE_RELO(s, field, SIGNED)) \

View File

@ -1,33 +0,0 @@
From ecf998ed8ff51efd3887ff7caca0a0cc56a88082 Mon Sep 17 00:00:00 2001
From: Eric Long <i@hack3r.moe>
Date: Wed, 2 Oct 2024 14:25:06 +0800
Subject: [PATCH] libbpf: Do not resolve size on duplicate FUNCs
FUNCs do not have sizes, thus currently btf__resolve_size will fail
with -EINVAL. Add conditions so that we only update size when the BTF
object is not function or function prototype.
Signed-off-by: Eric Long <i@hack3r.moe>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20241002-libbpf-dup-extern-funcs-v4-1-560eb460ff90@hack3r.moe
---
src/linker.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/linker.c b/src/linker.c
index 81dbbdd79..f83c1c299 100644
--- a/src/linker.c
+++ b/src/linker.c
@@ -2451,6 +2451,10 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
if (glob_sym && glob_sym->var_idx >= 0) {
__s64 sz;
+ /* FUNCs don't have size, nothing to update */
+ if (btf_is_func(t))
+ continue;
+
dst_var = &dst_sec->sec_vars[glob_sym->var_idx];
/* Because underlying BTF type might have
* changed, so might its size have changed, so

View File

@ -1,103 +0,0 @@
From 7b5237996a42c3b8a6fe8ccae656047de2831f58 Mon Sep 17 00:00:00 2001
From: David Vernet <void@manifault.com>
Date: Wed, 24 Jul 2024 12:14:58 -0500
Subject: [PATCH] libbpf: Don't take direct pointers into BTF data from st_ops
In struct bpf_struct_ops, we have take a pointer to a BTF type name, and
a struct btf_type. This was presumably done for convenience, but can
actually result in subtle and confusing bugs given that BTF data can be
invalidated before a program is loaded. For example, in sched_ext, we
may sometimes resize a data section after a skeleton has been opened,
but before the struct_ops scheduler map has been loaded. This may cause
the BTF data to be realloc'd, which can then cause a UAF when loading
the program because the struct_ops map has pointers directly into the
BTF data.
We're already storing the BTF type_id in struct bpf_struct_ops. Because
type_id is stable, we can therefore just update the places where we were
looking at those pointers to instead do the lookups we need from the
type_id.
Fixes: 590a00888250 ("bpf: libbpf: Add STRUCT_OPS support")
Signed-off-by: David Vernet <void@manifault.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240724171459.281234-1-void@manifault.com
---
src/libbpf.c | 19 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/src/libbpf.c b/src/libbpf.c
index a3be6f8fa..e55353887 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -496,8 +496,6 @@ struct bpf_program {
};
struct bpf_struct_ops {
- const char *tname;
- const struct btf_type *type;
struct bpf_program **progs;
__u32 *kern_func_off;
/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
@@ -1121,8 +1122,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
int err;
st_ops = map->st_ops;
- type = st_ops->type;
- tname = st_ops->tname;
+ type = btf__type_by_id(btf, st_ops->type_id);
+ tname = btf__name_by_offset(btf, type->name_off);
err = find_struct_ops_kern_types(kern_btf, tname,
&kern_type, &kern_type_id,
&kern_vtype, &kern_vtype_id,
@@ -1423,8 +1424,6 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
memcpy(st_ops->data,
data->d_buf + vsi->offset,
type->size);
- st_ops->tname = tname;
- st_ops->type = type;
st_ops->type_id = type_id;
pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
@@ -8445,11 +8444,13 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
static void bpf_map_prepare_vdata(const struct bpf_map *map)
{
+ const struct btf_type *type;
struct bpf_struct_ops *st_ops;
__u32 i;
st_ops = map->st_ops;
- for (i = 0; i < btf_vlen(st_ops->type); i++) {
+ type = btf__type_by_id(map->obj->btf, st_ops->type_id);
+ for (i = 0; i < btf_vlen(type); i++) {
struct bpf_program *prog = st_ops->progs[i];
void *kern_data;
int prog_fd;
@@ -9712,6 +9713,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
Elf64_Shdr *shdr, Elf_Data *data)
{
+ const struct btf_type *type;
const struct btf_member *member;
struct bpf_struct_ops *st_ops;
struct bpf_program *prog;
@@ -9771,13 +9773,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
}
insn_idx = sym->st_value / BPF_INSN_SZ;
- member = find_member_by_offset(st_ops->type, moff * 8);
+ type = btf__type_by_id(btf, st_ops->type_id);
+ member = find_member_by_offset(type, moff * 8);
if (!member) {
pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
map->name, moff);
return -EINVAL;
}
- member_idx = member - btf_members(st_ops->type);
+ member_idx = member - btf_members(type);
name = btf__name_by_offset(btf, member->name_off);
if (!resolve_func_ptr(btf, member->type, NULL)) {
--
2.23.0

View File

@ -1,149 +0,0 @@
From f6f24022d3054d2855612e642f8fe9f1148b4275 Mon Sep 17 00:00:00 2001
From: Andrii Nakryiko <andrii@kernel.org>
Date: Tue, 27 Aug 2024 13:37:21 -0700
Subject: [PATCH] libbpf: Fix bpf_object__open_skeleton()'s mishandling of
options
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
We do an ugly copying of options in bpf_object__open_skeleton() just to
be able to set object name from skeleton's recorded name (while still
allowing user to override it through opts->object_name).
This is not just ugly, but it also is broken due to memcpy() that
doesn't take into account potential skel_opts' and user-provided opts'
sizes differences due to backward and forward compatibility. This leads
to copying over extra bytes and then failing to validate options
properly. It could, technically, lead also to SIGSEGV, if we are unlucky.
So just get rid of that memory copy completely and instead pass
default object name into bpf_object_open() directly, simplifying all
this significantly. The rule now is that obj_name should be non-NULL for
bpf_object_open() when called with in-memory buffer, so validate that
explicitly as well.
We adopt bpf_object__open_mem() to this as well and generate default
name (based on buffer memory address and size) outside of bpf_object_open().
Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support")
Reported-by: Daniel Müller <deso@posteo.net>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Daniel Müller <deso@posteo.net>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/bpf/20240827203721.1145494-1-andrii@kernel.org
---
src/libbpf.c | 52 +++++++++++++++++++---------------------------------
1 file changed, 19 insertions(+), 33 deletions(-)
diff --git a/src/libbpf.c b/src/libbpf.c
index e55353887..d3a542649 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -7905,16 +7905,19 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
}
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ const char *obj_name,
const struct bpf_object_open_opts *opts)
{
- const char *obj_name, *kconfig, *btf_tmp_path;
+ const char *kconfig, *btf_tmp_path;
struct bpf_object *obj;
- char tmp_name[64];
int err;
char *log_buf;
size_t log_size;
__u32 log_level;
+ if (obj_buf && !obj_name)
+ return ERR_PTR(-EINVAL);
+
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n",
path ? : "(mem buf)");
@@ -7924,16 +7927,12 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
if (!OPTS_VALID(opts, bpf_object_open_opts))
return ERR_PTR(-EINVAL);
- obj_name = OPTS_GET(opts, object_name, NULL);
+ obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name;
if (obj_buf) {
- if (!obj_name) {
- snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
- (unsigned long)obj_buf,
- (unsigned long)obj_buf_sz);
- obj_name = tmp_name;
- }
path = obj_name;
pr_debug("loading object '%s' from buffer\n", obj_name);
+ } else {
+ pr_debug("loading object from %s\n", path);
}
log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
@@ -8017,9 +8016,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
if (!path)
return libbpf_err_ptr(-EINVAL);
- pr_debug("loading %s\n", path);
-
- return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
+ return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts));
}
struct bpf_object *bpf_object__open(const char *path)
@@ -8031,10 +8028,15 @@ struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
{
+ char tmp_name[64];
+
if (!obj_buf || obj_buf_sz == 0)
return libbpf_err_ptr(-EINVAL);
- return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
+ /* create a (quite useless) default "name" for this memory buffer object */
+ snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
+
+ return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts));
}
static int bpf_object_unload(struct bpf_object *obj)
@@ -13761,29 +13763,13 @@ static int populate_skeleton_progs(const struct bpf_object *obj,
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
- .object_name = s->name,
- );
struct bpf_object *obj;
int err;
- /* Attempt to preserve opts->object_name, unless overriden by user
- * explicitly. Overwriting object name for skeletons is discouraged,
- * as it breaks global data maps, because they contain object name
- * prefix as their own map name prefix. When skeleton is generated,
- * bpftool is making an assumption that this name will stay the same.
- */
- if (opts) {
- memcpy(&skel_opts, opts, sizeof(*opts));
- if (!opts->object_name)
- skel_opts.object_name = s->name;
- }
-
- obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
- err = libbpf_get_error(obj);
- if (err) {
- pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
- s->name, err);
+ obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err);
return libbpf_err(err);
}

View File

@ -1,138 +0,0 @@
From 984dcc97ae50c566924277aedc4967e1222e38c2 Mon Sep 17 00:00:00 2001
From: Quentin Monnet <qmo@kernel.org>
Date: Thu, 5 Dec 2024 13:59:42 +0000
Subject: [PATCH] libbpf: Fix segfault due to libelf functions not setting
errno
Libelf functions do not set errno on failure. Instead, it relies on its
internal _elf_errno value, that can be retrieved via elf_errno (or the
corresponding message via elf_errmsg()). From "man libelf":
If a libelf function encounters an error it will set an internal
error code that can be retrieved with elf_errno. Each thread
maintains its own separate error code. The meaning of each error
code can be determined with elf_errmsg, which returns a string
describing the error.
As a consequence, libbpf should not return -errno when a function from
libelf fails, because an empty value will not be interpreted as an error
and won't prevent the program to stop. This is visible in
bpf_linker__add_file(), for example, where we call a succession of
functions that rely on libelf:
err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
err = err ?: linker_append_sec_data(linker, &obj);
err = err ?: linker_append_elf_syms(linker, &obj);
err = err ?: linker_append_elf_relos(linker, &obj);
err = err ?: linker_append_btf(linker, &obj);
err = err ?: linker_append_btf_ext(linker, &obj);
If the object file that we try to process is not, in fact, a correct
object file, linker_load_obj_file() may fail with errno not being set,
and return 0. In this case we attempt to run linker_append_elf_sysms()
and may segfault.
This can happen (and was discovered) with bpftool:
$ bpftool gen object output.o sample_ret0.bpf.c
libbpf: failed to get ELF header for sample_ret0.bpf.c: invalid `Elf' handle
zsh: segmentation fault (core dumped) bpftool gen object output.o sample_ret0.bpf.c
Fix the issue by returning a non-null error code (-EINVAL) when libelf
functions fail.
Conflict:Context adapt
Reference: https://github.com/libbpf/libbpf/commit/984dcc97ae50c566924277aedc4967e1222e38c2
Fixes: faf6ed321cf6 ("libbpf: Add BPF static linker APIs")
Signed-off-by: Quentin Monnet <qmo@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20241205135942.65262-1-qmo@kernel.org
---
src/linker.c | 22 ++++++++--------------
1 file changed, 8 insertions(+), 14 deletions(-)
diff --git a/src/linker.c b/src/linker.c
index cf71d149f..e56ba6e67 100644
--- a/src/linker.c
+++ b/src/linker.c
@@ -566,17 +566,15 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
}
obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
if (!obj->elf) {
- err = -errno;
pr_warn_elf("failed to parse ELF file '%s'", filename);
- return err;
+ return -EINVAL;
}
/* Sanity check ELF file high-level properties */
ehdr = elf64_getehdr(obj->elf);
if (!ehdr) {
- err = -errno;
pr_warn_elf("failed to get ELF header for %s", filename);
- return err;
+ return -EINVAL;
}
if (ehdr->e_ident[EI_DATA] != host_endianness) {
err = -EOPNOTSUPP;
@@ -606,9 +604,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
}
if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
- err = -errno;
pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
- return err;
+ return -EINVAL;
}
scn = NULL;
@@ -618,26 +615,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
shdr = elf64_getshdr(scn);
if (!shdr) {
- err = -errno;
pr_warn_elf("failed to get section #%zu header for %s",
sec_idx, filename);
- return err;
+ return -EINVAL;
}
sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
if (!sec_name) {
- err = -errno;
pr_warn_elf("failed to get section #%zu name for %s",
sec_idx, filename);
- return err;
+ return -EINVAL;
}
data = elf_getdata(scn, 0);
if (!data) {
- err = -errno;
pr_warn_elf("failed to get section #%zu (%s) data from %s",
sec_idx, sec_name, filename);
- return err;
+ return -EINVAL;
}
sec = add_src_sec(obj, sec_name);
@@ -2680,14 +2674,14 @@ int bpf_linker__finalize(struct bpf_linker *linker)
/* Finalize ELF layout */
if (elf_update(linker->elf, ELF_C_NULL) < 0) {
- err = -errno;
+ err = -EINVAL;
pr_warn_elf("failed to finalize ELF layout");
return libbpf_err(err);
}
/* Write out final ELF contents */
if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
- err = -errno;
+ err = -EINVAL;
pr_warn_elf("failed to write ELF contents");
return libbpf_err(err);
}

View File

@ -1,32 +0,0 @@
From 81ac790dc831a5b753b310138f2201f87b55169b Mon Sep 17 00:00:00 2001
From: Shuyi Cheng <chengshuyi@linux.alibaba.com>
Date: Sun, 8 Sep 2024 17:23:53 +0800
Subject: [PATCH] libbpf: Fixed getting wrong return address on arm64
architecture
ARM64 has a separate lr register to store the return address, so here
you only need to read the lr register to get the return address, no need
to dereference it again.
Signed-off-by: Shuyi Cheng <chengshuyi@linux.alibaba.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/1725787433-77262-1-git-send-email-chengshuyi@linux.alibaba.com
---
src/bpf_tracing.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/bpf_tracing.h b/src/bpf_tracing.h
index 4eab132a9..aa3b04f55 100644
--- a/src/bpf_tracing.h
+++ b/src/bpf_tracing.h
@@ -522,7 +522,7 @@ struct pt_regs;
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
-#elif defined(bpf_target_sparc)
+#elif defined(bpf_target_sparc) || defined(bpf_target_arm64)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP

View File

@ -1,43 +0,0 @@
From 0e3971339f06c23aa9402a33057ecb3aac7795aa Mon Sep 17 00:00:00 2001
From: Andrii Nakryiko <andrii@kernel.org>
Date: Tue, 8 Oct 2024 18:15:54 -0700
Subject: [PATCH] libbpf: fix sym_is_subprog() logic for weak global subprogs
sym_is_subprog() is incorrectly rejecting relocations against *weak*
global subprogs. Fix that by realizing that STB_WEAK is also a global
function.
While it seems like verifier doesn't support taking an address of
non-static subprog right now, it's still best to fix support for it on
libbpf side, otherwise users will get a very confusing error during BPF
skeleton generation or static linking due to misinterpreted relocation:
libbpf: prog 'handle_tp': bad map relo against 'foo' in section '.text'
Error: failed to open BPF object file: Relocation failed
It's clearly not a map relocation, but is treated and reported as such
without this fix.
Fixes: 53eddb5e04ac ("libbpf: Support subprog address relocation")
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20241009011554.880168-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
src/libbpf.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/libbpf.c b/src/libbpf.c
index 712b95e88..05ad264ff 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -4013,7 +4013,7 @@ static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
return true;
/* global function */
- return bind == STB_GLOBAL && type == STT_FUNC;
+ return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC;
}
static int find_extern_btf_id(const struct btf *btf, const char *ext_name)

View File

@ -1,167 +0,0 @@
From 2dea4b86ee82a48912e54b49ac4c255eca592067 Mon Sep 17 00:00:00 2001
From: Andrii Nakryiko <andrii@kernel.org>
Date: Tue, 22 Oct 2024 21:39:07 -0700
Subject: [PATCH] libbpf: move global data mmap()'ing into bpf_object__load()
Since BPF skeleton inception libbpf has been doing mmap()'ing of global
data ARRAY maps in bpf_object__load_skeleton() API, which is used by
code generated .skel.h files (i.e., by BPF skeletons only).
This is wrong because if BPF object is loaded through generic
bpf_object__load() API, global data maps won't be re-mmap()'ed after
load step, and memory pointers returned from bpf_map__initial_value()
would be wrong and won't reflect the actual memory shared between BPF
program and user space.
bpf_map__initial_value() return result is rarely used after load, so
this went unnoticed for a really long time, until bpftrace project
attempted to load BPF object through generic bpf_object__load() API and
then used BPF subskeleton instantiated from such bpf_object. It turned
out that .data/.rodata/.bss data updates through such subskeleton was
"blackholed", all because libbpf wouldn't re-mmap() those maps during
bpf_object__load() phase.
Long story short, this step should be done by libbpf regardless of BPF
skeleton usage, right after BPF map is created in the kernel. This patch
moves this functionality into bpf_object__populate_internal_map() to
achieve this. And bpf_object__load_skeleton() is now simple and almost
trivial, only propagating these mmap()'ed pointers into user-supplied
skeleton structs.
We also do trivial adjustments to error reporting inside
bpf_object__populate_internal_map() for consistency with the rest of
libbpf's map-handling code.
Reported-by: Alastair Robertson <ajor@meta.com>
Reported-by: Jonathan Wiepert <jwiepert@meta.com>
Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support")
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20241023043908.3834423-3-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Conflict:In the original patch, the function code is moved from the bpf object __load_skeleton to the bpf object __populate_internal_map. The implementation details of the function code are different due to version changes. Therefore, the function code is moved again according to this method.
Reference: https://github.com/libbpf/libbpf/commit/2dea4b86ee82a48912e54b49ac4c255eca592067
---
src/libbpf.c | 81 ++++++++++++++++++++++++++--------------------------
1 file changed, 41 insertions(+), 40 deletions(-)
diff --git a/src/libbpf.c b/src/libbpf.c
index 8d63238..cd8203f 100644
--- a/src/libbpf.c
+++ b/src/libbpf.c
@@ -4971,6 +4971,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
enum libbpf_map_type map_type = map->libbpf_type;
char *cp, errmsg[STRERR_BUFSIZE];
int err, zero = 0;
+ size_t mmap_sz;
if (obj->gen_loader) {
bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
@@ -4983,8 +4984,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
if (err) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("Error setting initial map(%s) contents: %s\n",
- map->name, cp);
+ pr_warn("map '%s': failed to set initial contents: %s\n",
+ bpf_map__name(map), cp);
return err;
}
@@ -4994,11 +4995,45 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
if (err) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("Error freezing map(%s) as read-only: %s\n",
- map->name, cp);
+ pr_warn("map '%s': failed to freeze as read-only: %s\n",
+ bpf_map__name(map), cp);
return err;
}
}
+
+ /* Remap anonymous mmap()-ed "map initialization image" as
+ * a BPF map-backed mmap()-ed memory, but preserving the same
+ * memory address. This will cause kernel to change process'
+ * page table to point to a different piece of kernel memory,
+ * but from userspace point of view memory address (and its
+ * contents, being identical at this point) will stay the
+ * same. This mapping will be released by bpf_object__close()
+ * as per normal clean up procedure, so we don't need to worry
+ * about it from skeleton's clean up perspective.
+ */
+ mmap_sz = bpf_map_mmap_sz(map);
+ if (map->def.map_flags & BPF_F_MMAPABLE) {
+ void *mmaped;
+ int prot;
+
+ if (map->def.map_flags & BPF_F_RDONLY_PROG)
+ prot = PROT_READ;
+ else
+ prot = PROT_READ | PROT_WRITE;
+ mmaped = mmap(map->mmaped, mmap_sz, prot,
+ MAP_SHARED | MAP_FIXED, map->fd, 0);
+ if (mmaped == MAP_FAILED) {
+ err = -errno;
+ mmaped = NULL;
+ pr_warn("failed to re-mmap() map '%s': %d\n",
+ bpf_map__name(map), err);
+ return libbpf_err(err);
+ }
+ } else if (map->mmaped) {
+ munmap(map->mmaped, mmap_sz);
+ map->mmaped = NULL;
+ }
+
return 0;
}
@@ -13128,44 +13163,10 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
- size_t mmap_sz = bpf_map_mmap_sz(map);
- int prot, map_fd = bpf_map__fd(map);
- void **mmaped = s->maps[i].mmaped;
-
- if (!mmaped)
+ if (!s->maps[i].mmaped)
continue;
-
- if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
- *mmaped = NULL;
- continue;
- }
-
- if (map->def.map_flags & BPF_F_RDONLY_PROG)
- prot = PROT_READ;
- else
- prot = PROT_READ | PROT_WRITE;
-
- /* Remap anonymous mmap()-ed "map initialization image" as
- * a BPF map-backed mmap()-ed memory, but preserving the same
- * memory address. This will cause kernel to change process'
- * page table to point to a different piece of kernel memory,
- * but from userspace point of view memory address (and its
- * contents, being identical at this point) will stay the
- * same. This mapping will be released by bpf_object__close()
- * as per normal clean up procedure, so we don't need to worry
- * about it from skeleton's clean up perspective.
- */
- *mmaped = mmap(map->mmaped, mmap_sz, prot,
- MAP_SHARED | MAP_FIXED, map_fd, 0);
- if (*mmaped == MAP_FAILED) {
- err = -errno;
- *mmaped = NULL;
- pr_warn("failed to re-mmap() map '%s': %d\n",
- bpf_map__name(map), err);
- return libbpf_err(err);
- }
+ *s->maps[i].mmaped = map->mmaped;
}
-
return 0;
}
--
2.33.0

View File

@ -4,7 +4,7 @@
Name: %{githubname}
Version: %{githubver}
Release: 8
Release: 3
Summary: Libbpf library
License: LGPLv2 or BSD
@ -18,18 +18,6 @@ Patch0001: backport-libbpf-Ensure-libbpf-always-opens-files-with-O_CLOEX.pa
Patch0002: backport-libbpf-Set-close-on-exec-flag-on-gzopen.patch
Patch0003: backport-libbpf-Fix-NULL-pointer-dereference-in_bpf_object__c.patch
Patch0004: backport-libbpf-Free-btf_vmlinux-when-closing-bpf_object.patch
Patch0005: backport-libbpf-Avoid-uninitialized-value-in-BPF_CORE_READ_BI.patch
Patch0006: backport-libbpf-Add-NULL-checks-to-bpf_object__prev_map,next_.patch
Patch0007: backport-libbpf-Apply-map_set_def_max_entries-for-inner_maps-.patch
Patch0008: backport-libbpf-Dont-take-direct-pointers-into-BTF-data-from-.patch
Patch0009: add-sw_64-support.patch
Patch0010: backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch
Patch0011: backport-libbpf-Fix-bpf_object__open_skeleton-s-mishandling-o.patch
Patch0012: backport-libbpf-Fix-segfault-due-to-libelf-functions-not-sett.patch
Patch0013: backport-libbpf-Fixed-getting-wrong-return-address-on-arm64-a.patch
Patch0014: backport-libbpf-fix-sym_is_subprog-logic-for-weak-global-subp.patch
Patch0015: backport-libbpf-move-global-data-mmap-ing-into-bpf_object__lo.patch
# This package supersedes libbpf from kernel-tools,
# which has default Epoch: 0. By having Epoch: 1
@ -82,31 +70,6 @@ developing applications that use %{name}
%{_libdir}/libbpf.a
%changelog
* Fri Mar 14 2025 zhangmingyi <zhangmingyi5@huawei.com> 2:1.2.2-8
- backport patch from upstream:
backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch
backport-libbpf-Fix-bpf_object__open_skeleton-s-mishandling-o.patch
backport-libbpf-Fix-segfault-due-to-libelf-functions-not-sett.patch
backport-libbpf-Fixed-getting-wrong-return-address-on-arm64-a.patch
backport-libbpf-fix-sym_is_subprog-logic-for-weak-global-subp.patch
backport-libbpf-move-global-data-mmap-ing-into-bpf_object__lo.patch
* Fri Mar 14 2025 mahailiang <mahailiang@uniontech.com> 2:1.2.2-7
- add sw_64 support
* Mon Dec 23 2024 zhangmingyi <zhangmingyi5@huawei.com> 2:1.2.2-6
- backport patch from upstream:
backport-libbpf-Dont-take-direct-pointers-into-BTF-data-from-.patch
* Wed Oct 09 2024 zhangmingyi <zhangmingyi5@huawei.com> 2:1.2.2-5
- backport patch from upstream:
backport-libbpf-Add-NULL-checks-to-bpf_object__prev_map,next_.patch
backport-libbpf-Apply-map_set_def_max_entries-for-inner_maps-.patch
* Wed Sep 25 2024 zhangmingyi <zhangmingyi5@huawei.com> 2:1.2.2-4
- backport patch from upstream:
backport-libbpf-Avoid-uninitialized-value-in-BPF_CORE_READ_BI.patch
* Fri May 10 2024 jinzhiguang <jinzhiguang@kylinos.cn> 2:1.2.2-3
- backport patch from upstream:
backport-libbpf-Free-btf_vmlinux-when-closing-bpf_object.patch