libbpf/0001-sync-bpf-helper-funcs-from-kernel.patch
kwb0523 50c746f25f sync bpf helper funcs from kernel
(cherry picked from commit a85dd514ecb07946de0d78d84d3c2560840c112e)
2023-08-05 17:44:35 +08:00

361 lines
12 KiB
Diff

From 78ac59d3afde9e11df3223cb669c54b1b77400e3 Mon Sep 17 00:00:00 2001
From: kwb0523 <kwb0523@163.com>
Date: Fri, 4 Aug 2023 16:30:46 +0800
Subject: [PATCH] sync bpf helper funcs from kernel
---
src/bpf_helper_defs.h | 339 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 339 insertions(+)
diff --git a/src/bpf_helper_defs.h b/src/bpf_helper_defs.h
index abe612e..95f05f1 100644
--- a/src/bpf_helper_defs.h
+++ b/src/bpf_helper_defs.h
@@ -4370,4 +4370,343 @@ static void *(*bpf_kptr_xchg)(void *map_value, void *ptr) = (void *) 194;
*/
static void *(*bpf_map_lookup_percpu_elem)(void *map, const void *key, __u32 cpu) = (void *) 195;
+/*
+ * bpf_skc_to_mptcp_sock
+ *
+ * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
+ *
+ * Returns
+ * *sk* if casting is valid, or **NULL** otherwise.
+ */
+static struct mptcp_sock *(*bpf_skc_to_mptcp_sock)(void *sk) = (void *) 196;
+
+/*
+ * bpf_dynptr_from_mem
+ *
+ * Get a dynptr to local memory *data*.
+ *
+ * *data* must be a ptr to a map value.
+ * The maximum *size* supported is DYNPTR_MAX_SIZE.
+ * *flags* is currently unused.
+ *
+ * Returns
+ * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
+ * -EINVAL if flags is not 0.
+ */
+static long (*bpf_dynptr_from_mem)(void *data, __u32 size, __u64 flags, struct bpf_dynptr *ptr) = (void *) 197;
+
+/*
+ * bpf_ringbuf_reserve_dynptr
+ *
+ * Reserve *size* bytes of payload in a ring buffer *ringbuf*
+ * through the dynptr interface. *flags* must be 0.
+ *
+ * Please note that a corresponding bpf_ringbuf_submit_dynptr or
+ * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
+ * reservation fails. This is enforced by the verifier.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static long (*bpf_ringbuf_reserve_dynptr)(void *ringbuf, __u32 size, __u64 flags, struct bpf_dynptr *ptr) = (void *) 198;
+
+/*
+ * bpf_ringbuf_submit_dynptr
+ *
+ * Submit reserved ring buffer sample, pointed to by *data*,
+ * through the dynptr interface. This is a no-op if the dynptr is
+ * invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_submit'.
+ *
+ * Returns
+ * Nothing. Always succeeds.
+ */
+static void (*bpf_ringbuf_submit_dynptr)(struct bpf_dynptr *ptr, __u64 flags) = (void *) 199;
+
+/*
+ * bpf_ringbuf_discard_dynptr
+ *
+ * Discard reserved ring buffer sample through the dynptr
+ * interface. This is a no-op if the dynptr is invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_discard'.
+ *
+ * Returns
+ * Nothing. Always succeeds.
+ */
+static void (*bpf_ringbuf_discard_dynptr)(struct bpf_dynptr *ptr, __u64 flags) = (void *) 200;
+
+/*
+ * bpf_dynptr_read
+ *
+ * Read *len* bytes from *src* into *dst*, starting from *offset*
+ * into *src*.
+ * *flags* is currently unused.
+ *
+ * Returns
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
+ * *flags* is not 0.
+ */
+static long (*bpf_dynptr_read)(void *dst, __u32 len, const struct bpf_dynptr *src, __u32 offset, __u64 flags) = (void *) 201;
+
+/*
+ * bpf_dynptr_write
+ *
+ * Write *len* bytes from *src* into *dst*, starting from *offset*
+ * into *dst*.
+ *
+ * *flags* must be 0 except for skb-type dynptrs.
+ *
+ * For skb-type dynptrs:
+ * * All data slices of the dynptr are automatically
+ * invalidated after **bpf_dynptr_write**\ (). This is
+ * because writing may pull the skb and change the
+ * underlying packet buffer.
+ *
+ * * For *flags*, please see the flags accepted by
+ * **bpf_skb_store_bytes**\ ().
+ *
+ * Returns
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
+ * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
+ * other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
+ */
+static long (*bpf_dynptr_write)(const struct bpf_dynptr *dst, __u32 offset, void *src, __u32 len, __u64 flags) = (void *) 202;
+
+/*
+ * bpf_dynptr_data
+ *
+ * Get a pointer to the underlying dynptr data.
+ *
+ * *len* must be a statically known value. The returned data slice
+ * is invalidated whenever the dynptr is invalidated.
+ *
+ * skb and xdp type dynptrs may not use bpf_dynptr_data. They should
+ * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr.
+ *
+ * Returns
+ * Pointer to the underlying dynptr data, NULL if the dynptr is
+ * read-only, if the dynptr is invalid, or if the offset and length
+ * is out of bounds.
+ */
+static void *(*bpf_dynptr_data)(const struct bpf_dynptr *ptr, __u32 offset, __u32 len) = (void *) 203;
+
+/*
+ * bpf_tcp_raw_gen_syncookie_ipv4
+ *
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv4/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ *
+ * Returns
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ */
+static __s64 (*bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *iph, struct tcphdr *th, __u32 th_len) = (void *) 204;
+
+/*
+ * bpf_tcp_raw_gen_syncookie_ipv6
+ *
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv6/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ *
+ * Returns
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ */
+static __s64 (*bpf_tcp_raw_gen_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th, __u32 th_len) = (void *) 205;
+
+/*
+ * bpf_tcp_raw_check_syncookie_ipv4
+ *
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the TCP header.
+ *
+ * Returns
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ */
+static long (*bpf_tcp_raw_check_syncookie_ipv4)(struct iphdr *iph, struct tcphdr *th) = (void *) 206;
+
+/*
+ * bpf_tcp_raw_check_syncookie_ipv6
+ *
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the TCP header.
+ *
+ * Returns
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ */
+static long (*bpf_tcp_raw_check_syncookie_ipv6)(struct ipv6hdr *iph, struct tcphdr *th) = (void *) 207;
+
+/*
+ * bpf_ktime_get_tai_ns
+ *
+ * A nonsettable system-wide clock derived from wall-clock time but
+ * ignoring leap seconds. This clock does not experience
+ * discontinuities and backwards jumps caused by NTP inserting leap
+ * seconds as CLOCK_REALTIME does.
+ *
+ * See: **clock_gettime**\ (**CLOCK_TAI**)
+ *
+ * Returns
+ * Current *ktime*.
+ */
+static __u64 (*bpf_ktime_get_tai_ns)(void) = (void *) 208;
+
+/*
+ * bpf_user_ringbuf_drain
+ *
+ * Drain samples from the specified user ring buffer, and invoke
+ * the provided callback for each such sample:
+ *
+ * long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx);
+ *
+ * If **callback_fn** returns 0, the helper will continue to try
+ * and drain the next sample, up to a maximum of
+ * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
+ * the helper will skip the rest of the samples and return. Other
+ * return values are not used now, and will be rejected by the
+ * verifier.
+ *
+ * Returns
+ * The number of drained samples if no error was encountered while
+ * draining samples, or 0 if no samples were present in the ring
+ * buffer. If a user-space producer was epoll-waiting on this map,
+ * and at least one sample was drained, they will receive an event
+ * notification notifying them of available space in the ring
+ * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
+ * function, no wakeup notification will be sent. If the
+ * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
+ * be sent even if no sample was drained.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EBUSY** if the ring buffer is contended, and another calling
+ * context was concurrently draining the ring buffer.
+ *
+ * **-EINVAL** if user-space is not properly tracking the ring
+ * buffer due to the producer position not being aligned to 8
+ * bytes, a sample not being aligned to 8 bytes, or the producer
+ * position not matching the advertised length of a sample.
+ *
+ * **-E2BIG** if user-space has tried to publish a sample which is
+ * larger than the size of the ring buffer, or which cannot fit
+ * within a struct bpf_dynptr.
+ */
+static long (*bpf_user_ringbuf_drain)(void *map, void *callback_fn, void *ctx, __u64 flags) = (void *) 209;
+
+/*
+ * bpf_cgrp_storage_get
+ *
+ * Get a bpf_local_storage from the *cgroup*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *cgroup* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this
+ * helper enforces the key must be a cgroup struct and the map must also
+ * be a **BPF_MAP_TYPE_CGRP_STORAGE**.
+ *
+ * In reality, the local-storage value is embedded directly inside of the
+ * *cgroup* object itself, rather than being located in the
+ * **BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is
+ * queried for some *map* on a *cgroup* object, the kernel will perform an
+ * O(n) iteration over all of the live local-storage values for that
+ * *cgroup* object until the local-storage value for the *map* is found.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ *
+ * Returns
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ */
+static void *(*bpf_cgrp_storage_get)(void *map, struct cgroup *cgroup, void *value, __u64 flags) = (void *) 210;
+
+/*
+ * bpf_cgrp_storage_delete
+ *
+ * Delete a bpf_local_storage from a *cgroup*.
+ *
+ * Returns
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ */
+static long (*bpf_cgrp_storage_delete)(void *map, struct cgroup *cgroup) = (void *) 211;
+
+/*
+ * bpf_get_sockops_uid_gid
+ *
+ * Get sock's uid and gid
+ *
+ * Returns
+ * A 64-bit integer containing the current GID and UID, and
+ * created as such: *current_gid* **<< 32 \|** *current_uid*.
+ */
+static __u64 (*bpf_get_sockops_uid_gid)(void *sockops) = (void *) 212;
+
+/*
+ * bpf_sk_original_addr
+ *
+ * Get Ipv4 origdst or replysrc. Works with IPv4.
+ *
+ * Returns
+ * 0 on success, or a negative error in case of failure.
+ */
+static int (*bpf_sk_original_addr)(void *bpf_socket, int optname, char *optval, int optlen) = (void *) 213;
--
2.33.0