mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
bpf-next-for-netdev
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZmIsRAAKCRDbK58LschI g4SSAP0bkl6rPMn7zp1h+/l7hlvpp2aVOmasBTe8hIhAGUbluwD/TGq4sNsGgXFI i4tUtFRhw8pOjy2guy6526qyJvBs8wY= =WMhY -----END PGP SIGNATURE----- Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next Daniel Borkmann says: ==================== pull-request: bpf-next 2024-06-06 We've added 54 non-merge commits during the last 10 day(s) which contain a total of 50 files changed, 1887 insertions(+), 527 deletions(-). The main changes are: 1) Add a user space notification mechanism via epoll when a struct_ops object is getting detached/unregistered, from Kui-Feng Lee. 2) Big batch of BPF selftest refactoring for sockmap and BPF congctl tests, from Geliang Tang. 3) Add BTF field (type and string fields, right now) iterator support to libbpf instead of using existing callback-based approaches, from Andrii Nakryiko. 4) Extend BPF selftests for the latter with a new btf_field_iter selftest, from Alan Maguire. 5) Add new kfuncs for a generic, open-coded bits iterator, from Yafang Shao. 6) Fix BPF selftests' kallsyms_find() helper under kernels configured with CONFIG_LTO_CLANG_THIN, from Yonghong Song. 7) Remove a bunch of unused structs in BPF selftests, from David Alan Gilbert. 8) Convert test_sockmap section names into names understood by libbpf so it can deduce program type and attach type, from Jakub Sitnicki. 9) Extend libbpf with the ability to configure log verbosity via LIBBPF_LOG_LEVEL environment variable, from Mykyta Yatsenko. 10) Fix BPF selftests with regards to bpf_cookie and find_vma flakiness in nested VMs, from Song Liu. 11) Extend riscv32/64 JITs to introduce shift/add helpers to generate Zba optimization, from Xiao Wang. 12) Enable BPF programs to declare arrays and struct fields with kptr, bpf_rb_root, and bpf_list_head, from Kui-Feng Lee. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (54 commits) selftests/bpf: Drop useless arguments of do_test in bpf_tcp_ca selftests/bpf: Use start_test in test_dctcp in bpf_tcp_ca selftests/bpf: Use start_test in test_dctcp_fallback in bpf_tcp_ca selftests/bpf: Add start_test helper in bpf_tcp_ca selftests/bpf: Use connect_to_fd_opts in do_test in bpf_tcp_ca libbpf: Auto-attach struct_ops BPF maps in BPF skeleton selftests/bpf: Add btf_field_iter selftests selftests/bpf: Fix send_signal test with nested CONFIG_PARAVIRT libbpf: Remove callback-based type/string BTF field visitor helpers bpftool: Use BTF field iterator in btfgen libbpf: Make use of BTF field iterator in BTF handling code libbpf: Make use of BTF field iterator in BPF linker code libbpf: Add BTF field iterator selftests/bpf: Ignore .llvm.<hash> suffix in kallsyms_find() selftests/bpf: Fix bpf_cookie and find_vma in nested VM selftests/bpf: Test global bpf_list_head arrays. selftests/bpf: Test global bpf_rb_root arrays and fields in nested struct types. selftests/bpf: Test kptr arrays and kptrs in nested struct fields. bpf: limit the number of levels of a nested struct type. bpf: look into the types of the fields of a struct type recursively. ... ==================== Link: https://lore.kernel.org/r/20240606223146.23020-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
b1156532bc
@ -219,6 +219,14 @@ compilation and skeleton generation. Using Libbpf-rs will make building user
|
||||
space part of the BPF application easier. Note that the BPF program themselves
|
||||
must still be written in plain C.
|
||||
|
||||
libbpf logging
|
||||
==============
|
||||
|
||||
By default, libbpf logs informational and warning messages to stderr. The
|
||||
verbosity of these messages can be controlled by setting the environment
|
||||
variable LIBBPF_LOG_LEVEL to either warn, info, or debug. A custom log
|
||||
callback can be set using ``libbpf_set_print()``.
|
||||
|
||||
Additional Documentation
|
||||
========================
|
||||
|
||||
|
@ -742,6 +742,17 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2)
|
||||
return rv_css_insn(0x6, imm, rs2, 0x2);
|
||||
}
|
||||
|
||||
/* RVZBA instructions. */
|
||||
static inline u32 rvzba_sh2add(u8 rd, u8 rs1, u8 rs2)
|
||||
{
|
||||
return rv_r_insn(0x10, rs2, rs1, 0x4, rd, 0x33);
|
||||
}
|
||||
|
||||
static inline u32 rvzba_sh3add(u8 rd, u8 rs1, u8 rs2)
|
||||
{
|
||||
return rv_r_insn(0x10, rs2, rs1, 0x6, rd, 0x33);
|
||||
}
|
||||
|
||||
/* RVZBB instructions. */
|
||||
static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
|
||||
{
|
||||
@ -1095,6 +1106,28 @@ static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
|
||||
emit(rv_sw(rs1, off, rs2), ctx);
|
||||
}
|
||||
|
||||
static inline void emit_sh2add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
|
||||
{
|
||||
if (rvzba_enabled()) {
|
||||
emit(rvzba_sh2add(rd, rs1, rs2), ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
emit_slli(rd, rs1, 2, ctx);
|
||||
emit_add(rd, rd, rs2, ctx);
|
||||
}
|
||||
|
||||
static inline void emit_sh3add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
|
||||
{
|
||||
if (rvzba_enabled()) {
|
||||
emit(rvzba_sh3add(rd, rs1, rs2), ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
emit_slli(rd, rs1, 3, ctx);
|
||||
emit_add(rd, rd, rs2, ctx);
|
||||
}
|
||||
|
||||
/* RV64-only helper functions. */
|
||||
#if __riscv_xlen == 64
|
||||
|
||||
|
@ -811,8 +811,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
|
||||
* if (!prog)
|
||||
* goto out;
|
||||
*/
|
||||
emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx);
|
||||
emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx);
|
||||
emit_sh2add(RV_REG_T0, lo(idx_reg), lo(arr_reg), ctx);
|
||||
off = offsetof(struct bpf_array, ptrs);
|
||||
if (is_12b_check(off, insn))
|
||||
return -1;
|
||||
|
@ -380,8 +380,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
|
||||
* if (!prog)
|
||||
* goto out;
|
||||
*/
|
||||
emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
|
||||
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
|
||||
emit_sh3add(RV_REG_T2, RV_REG_A2, RV_REG_A1, ctx);
|
||||
off = offsetof(struct bpf_array, ptrs);
|
||||
if (is_12b_check(off, insn))
|
||||
return -1;
|
||||
@ -1099,12 +1098,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
/* Load current CPU number in T1 */
|
||||
emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
|
||||
RV_REG_TP, ctx);
|
||||
/* << 3 because offsets are 8 bytes */
|
||||
emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx);
|
||||
/* Load address of __per_cpu_offset array in T2 */
|
||||
emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
|
||||
/* Add offset of current CPU to __per_cpu_offset */
|
||||
emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx);
|
||||
/* Get address of __per_cpu_offset[cpu] in T1 */
|
||||
emit_sh3add(RV_REG_T1, RV_REG_T1, RV_REG_T2, ctx);
|
||||
/* Load __per_cpu_offset[cpu] in T1 */
|
||||
emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx);
|
||||
/* Add the offset to Rd */
|
||||
|
@ -1612,6 +1612,7 @@ struct bpf_link_ops {
|
||||
struct bpf_link_info *info);
|
||||
int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
|
||||
struct bpf_map *old_map);
|
||||
__poll_t (*poll)(struct file *file, struct poll_table_struct *pts);
|
||||
};
|
||||
|
||||
struct bpf_tramp_link {
|
||||
@ -1730,9 +1731,9 @@ struct bpf_struct_ops {
|
||||
int (*init_member)(const struct btf_type *t,
|
||||
const struct btf_member *member,
|
||||
void *kdata, const void *udata);
|
||||
int (*reg)(void *kdata);
|
||||
void (*unreg)(void *kdata);
|
||||
int (*update)(void *kdata, void *old_kdata);
|
||||
int (*reg)(void *kdata, struct bpf_link *link);
|
||||
void (*unreg)(void *kdata, struct bpf_link *link);
|
||||
int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
|
||||
int (*validate)(void *kdata);
|
||||
void *cfi_stubs;
|
||||
struct module *owner;
|
||||
@ -2333,6 +2334,7 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
|
||||
int bpf_link_settle(struct bpf_link_primer *primer);
|
||||
void bpf_link_cleanup(struct bpf_link_primer *primer);
|
||||
void bpf_link_inc(struct bpf_link *link);
|
||||
struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
|
||||
void bpf_link_put(struct bpf_link *link);
|
||||
int bpf_link_new_fd(struct bpf_link *link);
|
||||
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
|
||||
@ -2704,6 +2706,11 @@ static inline void bpf_link_inc(struct bpf_link *link)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void bpf_link_put(struct bpf_link *link)
|
||||
{
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/rcupdate_wait.h>
|
||||
#include <linux/poll.h>
|
||||
|
||||
struct bpf_struct_ops_value {
|
||||
struct bpf_struct_ops_common_value common;
|
||||
@ -56,6 +57,7 @@ struct bpf_struct_ops_map {
|
||||
struct bpf_struct_ops_link {
|
||||
struct bpf_link link;
|
||||
struct bpf_map __rcu *map;
|
||||
wait_queue_head_t wait_hup;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(update_mutex);
|
||||
@ -757,7 +759,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
err = st_ops->reg(kdata);
|
||||
err = st_ops->reg(kdata, NULL);
|
||||
if (likely(!err)) {
|
||||
/* This refcnt increment on the map here after
|
||||
* 'st_ops->reg()' is secure since the state of the
|
||||
@ -805,7 +807,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
|
||||
BPF_STRUCT_OPS_STATE_TOBEFREE);
|
||||
switch (prev_state) {
|
||||
case BPF_STRUCT_OPS_STATE_INUSE:
|
||||
st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
|
||||
st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
|
||||
bpf_map_put(map);
|
||||
return 0;
|
||||
case BPF_STRUCT_OPS_STATE_TOBEFREE:
|
||||
@ -1057,10 +1059,7 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
|
||||
st_map = (struct bpf_struct_ops_map *)
|
||||
rcu_dereference_protected(st_link->map, true);
|
||||
if (st_map) {
|
||||
/* st_link->map can be NULL if
|
||||
* bpf_struct_ops_link_create() fails to register.
|
||||
*/
|
||||
st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
|
||||
st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
|
||||
bpf_map_put(&st_map->map);
|
||||
}
|
||||
kfree(st_link);
|
||||
@ -1075,7 +1074,8 @@ static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
|
||||
st_link = container_of(link, struct bpf_struct_ops_link, link);
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(st_link->map);
|
||||
seq_printf(seq, "map_id:\t%d\n", map->id);
|
||||
if (map)
|
||||
seq_printf(seq, "map_id:\t%d\n", map->id);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -1088,7 +1088,8 @@ static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
|
||||
st_link = container_of(link, struct bpf_struct_ops_link, link);
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(st_link->map);
|
||||
info->struct_ops.map_id = map->id;
|
||||
if (map)
|
||||
info->struct_ops.map_id = map->id;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
@ -1113,6 +1114,10 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
|
||||
mutex_lock(&update_mutex);
|
||||
|
||||
old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
|
||||
if (!old_map) {
|
||||
err = -ENOLINK;
|
||||
goto err_out;
|
||||
}
|
||||
if (expected_old_map && old_map != expected_old_map) {
|
||||
err = -EPERM;
|
||||
goto err_out;
|
||||
@ -1125,7 +1130,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
|
||||
err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
@ -1139,11 +1144,53 @@ err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
|
||||
{
|
||||
struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
|
||||
struct bpf_struct_ops_map *st_map;
|
||||
struct bpf_map *map;
|
||||
|
||||
mutex_lock(&update_mutex);
|
||||
|
||||
map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
|
||||
if (!map) {
|
||||
mutex_unlock(&update_mutex);
|
||||
return 0;
|
||||
}
|
||||
st_map = container_of(map, struct bpf_struct_ops_map, map);
|
||||
|
||||
st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
|
||||
|
||||
RCU_INIT_POINTER(st_link->map, NULL);
|
||||
/* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
|
||||
* bpf_map_inc() in bpf_struct_ops_map_link_update().
|
||||
*/
|
||||
bpf_map_put(&st_map->map);
|
||||
|
||||
mutex_unlock(&update_mutex);
|
||||
|
||||
wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
|
||||
struct poll_table_struct *pts)
|
||||
{
|
||||
struct bpf_struct_ops_link *st_link = file->private_data;
|
||||
|
||||
poll_wait(file, &st_link->wait_hup, pts);
|
||||
|
||||
return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
|
||||
}
|
||||
|
||||
static const struct bpf_link_ops bpf_struct_ops_map_lops = {
|
||||
.dealloc = bpf_struct_ops_map_link_dealloc,
|
||||
.detach = bpf_struct_ops_map_link_detach,
|
||||
.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
|
||||
.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
|
||||
.update_map = bpf_struct_ops_map_link_update,
|
||||
.poll = bpf_struct_ops_map_link_poll,
|
||||
};
|
||||
|
||||
int bpf_struct_ops_link_create(union bpf_attr *attr)
|
||||
@ -1176,13 +1223,21 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data);
|
||||
init_waitqueue_head(&link->wait_hup);
|
||||
|
||||
/* Hold the update_mutex such that the subsystem cannot
|
||||
* do link->ops->detach() before the link is fully initialized.
|
||||
*/
|
||||
mutex_lock(&update_mutex);
|
||||
err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
|
||||
if (err) {
|
||||
mutex_unlock(&update_mutex);
|
||||
bpf_link_cleanup(&link_primer);
|
||||
link = NULL;
|
||||
goto err_out;
|
||||
}
|
||||
RCU_INIT_POINTER(link->map, map);
|
||||
mutex_unlock(&update_mutex);
|
||||
|
||||
return bpf_link_settle(&link_primer);
|
||||
|
||||
|
310
kernel/bpf/btf.c
310
kernel/bpf/btf.c
@ -3442,10 +3442,12 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
|
||||
goto end; \
|
||||
}
|
||||
|
||||
static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
|
||||
static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type,
|
||||
u32 field_mask, u32 *seen_mask,
|
||||
int *align, int *sz)
|
||||
{
|
||||
int type = 0;
|
||||
const char *name = __btf_name_by_offset(btf, var_type->name_off);
|
||||
|
||||
if (field_mask & BPF_SPIN_LOCK) {
|
||||
if (!strcmp(name, "bpf_spin_lock")) {
|
||||
@ -3481,7 +3483,7 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
|
||||
field_mask_test_name(BPF_REFCOUNT, "bpf_refcount");
|
||||
|
||||
/* Only return BPF_KPTR when all other types with matchable names fail */
|
||||
if (field_mask & BPF_KPTR) {
|
||||
if (field_mask & BPF_KPTR && !__btf_type_is_struct(var_type)) {
|
||||
type = BPF_KPTR_REF;
|
||||
goto end;
|
||||
}
|
||||
@ -3494,140 +3496,232 @@ end:
|
||||
|
||||
#undef field_mask_test_name
|
||||
|
||||
/* Repeat a number of fields for a specified number of times.
|
||||
*
|
||||
* Copy the fields starting from the first field and repeat them for
|
||||
* repeat_cnt times. The fields are repeated by adding the offset of each
|
||||
* field with
|
||||
* (i + 1) * elem_size
|
||||
* where i is the repeat index and elem_size is the size of an element.
|
||||
*/
|
||||
static int btf_repeat_fields(struct btf_field_info *info,
|
||||
u32 field_cnt, u32 repeat_cnt, u32 elem_size)
|
||||
{
|
||||
u32 i, j;
|
||||
u32 cur;
|
||||
|
||||
/* Ensure not repeating fields that should not be repeated. */
|
||||
for (i = 0; i < field_cnt; i++) {
|
||||
switch (info[i].type) {
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
case BPF_LIST_HEAD:
|
||||
case BPF_RB_ROOT:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
cur = field_cnt;
|
||||
for (i = 0; i < repeat_cnt; i++) {
|
||||
memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
|
||||
for (j = 0; j < field_cnt; j++)
|
||||
info[cur++].off += (i + 1) * elem_size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_find_struct_field(const struct btf *btf,
|
||||
const struct btf_type *t, u32 field_mask,
|
||||
struct btf_field_info *info, int info_cnt)
|
||||
struct btf_field_info *info, int info_cnt,
|
||||
u32 level);
|
||||
|
||||
/* Find special fields in the struct type of a field.
|
||||
*
|
||||
* This function is used to find fields of special types that is not a
|
||||
* global variable or a direct field of a struct type. It also handles the
|
||||
* repetition if it is the element type of an array.
|
||||
*/
|
||||
static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t,
|
||||
u32 off, u32 nelems,
|
||||
u32 field_mask, struct btf_field_info *info,
|
||||
int info_cnt, u32 level)
|
||||
{
|
||||
int ret, idx = 0, align, sz, field_type;
|
||||
const struct btf_member *member;
|
||||
int ret, err, i;
|
||||
|
||||
level++;
|
||||
if (level >= MAX_RESOLVE_DEPTH)
|
||||
return -E2BIG;
|
||||
|
||||
ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level);
|
||||
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
/* Shift the offsets of the nested struct fields to the offsets
|
||||
* related to the container.
|
||||
*/
|
||||
for (i = 0; i < ret; i++)
|
||||
info[i].off += off;
|
||||
|
||||
if (nelems > 1) {
|
||||
err = btf_repeat_fields(info, ret, nelems - 1, t->size);
|
||||
if (err == 0)
|
||||
ret *= nelems;
|
||||
else
|
||||
ret = err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btf_find_field_one(const struct btf *btf,
|
||||
const struct btf_type *var,
|
||||
const struct btf_type *var_type,
|
||||
int var_idx,
|
||||
u32 off, u32 expected_size,
|
||||
u32 field_mask, u32 *seen_mask,
|
||||
struct btf_field_info *info, int info_cnt,
|
||||
u32 level)
|
||||
{
|
||||
int ret, align, sz, field_type;
|
||||
struct btf_field_info tmp;
|
||||
const struct btf_array *array;
|
||||
u32 i, nelems = 1;
|
||||
|
||||
/* Walk into array types to find the element type and the number of
|
||||
* elements in the (flattened) array.
|
||||
*/
|
||||
for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(var_type); i++) {
|
||||
array = btf_array(var_type);
|
||||
nelems *= array->nelems;
|
||||
var_type = btf_type_by_id(btf, array->type);
|
||||
}
|
||||
if (i == MAX_RESOLVE_DEPTH)
|
||||
return -E2BIG;
|
||||
if (nelems == 0)
|
||||
return 0;
|
||||
|
||||
field_type = btf_get_field_type(btf, var_type,
|
||||
field_mask, seen_mask, &align, &sz);
|
||||
/* Look into variables of struct types */
|
||||
if (!field_type && __btf_type_is_struct(var_type)) {
|
||||
sz = var_type->size;
|
||||
if (expected_size && expected_size != sz * nelems)
|
||||
return 0;
|
||||
ret = btf_find_nested_struct(btf, var_type, off, nelems, field_mask,
|
||||
&info[0], info_cnt, level);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (field_type == 0)
|
||||
return 0;
|
||||
if (field_type < 0)
|
||||
return field_type;
|
||||
|
||||
if (expected_size && expected_size != sz * nelems)
|
||||
return 0;
|
||||
if (off % align)
|
||||
return 0;
|
||||
|
||||
switch (field_type) {
|
||||
case BPF_SPIN_LOCK:
|
||||
case BPF_TIMER:
|
||||
case BPF_WORKQUEUE:
|
||||
case BPF_LIST_NODE:
|
||||
case BPF_RB_NODE:
|
||||
case BPF_REFCOUNT:
|
||||
ret = btf_find_struct(btf, var_type, off, sz, field_type,
|
||||
info_cnt ? &info[0] : &tmp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
ret = btf_find_kptr(btf, var_type, off, sz,
|
||||
info_cnt ? &info[0] : &tmp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case BPF_LIST_HEAD:
|
||||
case BPF_RB_ROOT:
|
||||
ret = btf_find_graph_root(btf, var, var_type,
|
||||
var_idx, off, sz,
|
||||
info_cnt ? &info[0] : &tmp,
|
||||
field_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (ret == BTF_FIELD_IGNORE)
|
||||
return 0;
|
||||
if (nelems > info_cnt)
|
||||
return -E2BIG;
|
||||
if (nelems > 1) {
|
||||
ret = btf_repeat_fields(info, 1, nelems - 1, sz);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return nelems;
|
||||
}
|
||||
|
||||
static int btf_find_struct_field(const struct btf *btf,
|
||||
const struct btf_type *t, u32 field_mask,
|
||||
struct btf_field_info *info, int info_cnt,
|
||||
u32 level)
|
||||
{
|
||||
int ret, idx = 0;
|
||||
const struct btf_member *member;
|
||||
u32 i, off, seen_mask = 0;
|
||||
|
||||
for_each_member(i, t, member) {
|
||||
const struct btf_type *member_type = btf_type_by_id(btf,
|
||||
member->type);
|
||||
|
||||
field_type = btf_get_field_type(__btf_name_by_offset(btf, member_type->name_off),
|
||||
field_mask, &seen_mask, &align, &sz);
|
||||
if (field_type == 0)
|
||||
continue;
|
||||
if (field_type < 0)
|
||||
return field_type;
|
||||
|
||||
off = __btf_member_bit_offset(t, member);
|
||||
if (off % 8)
|
||||
/* valid C code cannot generate such BTF */
|
||||
return -EINVAL;
|
||||
off /= 8;
|
||||
if (off % align)
|
||||
continue;
|
||||
|
||||
switch (field_type) {
|
||||
case BPF_SPIN_LOCK:
|
||||
case BPF_TIMER:
|
||||
case BPF_WORKQUEUE:
|
||||
case BPF_LIST_NODE:
|
||||
case BPF_RB_NODE:
|
||||
case BPF_REFCOUNT:
|
||||
ret = btf_find_struct(btf, member_type, off, sz, field_type,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
ret = btf_find_kptr(btf, member_type, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case BPF_LIST_HEAD:
|
||||
case BPF_RB_ROOT:
|
||||
ret = btf_find_graph_root(btf, t, member_type,
|
||||
i, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp,
|
||||
field_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (ret == BTF_FIELD_IGNORE)
|
||||
continue;
|
||||
if (idx >= info_cnt)
|
||||
return -E2BIG;
|
||||
++idx;
|
||||
ret = btf_find_field_one(btf, t, member_type, i,
|
||||
off, 0,
|
||||
field_mask, &seen_mask,
|
||||
&info[idx], info_cnt - idx, level);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
idx += ret;
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
|
||||
static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
|
||||
u32 field_mask, struct btf_field_info *info,
|
||||
int info_cnt)
|
||||
int info_cnt, u32 level)
|
||||
{
|
||||
int ret, idx = 0, align, sz, field_type;
|
||||
int ret, idx = 0;
|
||||
const struct btf_var_secinfo *vsi;
|
||||
struct btf_field_info tmp;
|
||||
u32 i, off, seen_mask = 0;
|
||||
|
||||
for_each_vsi(i, t, vsi) {
|
||||
const struct btf_type *var = btf_type_by_id(btf, vsi->type);
|
||||
const struct btf_type *var_type = btf_type_by_id(btf, var->type);
|
||||
|
||||
field_type = btf_get_field_type(__btf_name_by_offset(btf, var_type->name_off),
|
||||
field_mask, &seen_mask, &align, &sz);
|
||||
if (field_type == 0)
|
||||
continue;
|
||||
if (field_type < 0)
|
||||
return field_type;
|
||||
|
||||
off = vsi->offset;
|
||||
if (vsi->size != sz)
|
||||
continue;
|
||||
if (off % align)
|
||||
continue;
|
||||
|
||||
switch (field_type) {
|
||||
case BPF_SPIN_LOCK:
|
||||
case BPF_TIMER:
|
||||
case BPF_WORKQUEUE:
|
||||
case BPF_LIST_NODE:
|
||||
case BPF_RB_NODE:
|
||||
case BPF_REFCOUNT:
|
||||
ret = btf_find_struct(btf, var_type, off, sz, field_type,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
ret = btf_find_kptr(btf, var_type, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
case BPF_LIST_HEAD:
|
||||
case BPF_RB_ROOT:
|
||||
ret = btf_find_graph_root(btf, var, var_type,
|
||||
-1, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp,
|
||||
field_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (ret == BTF_FIELD_IGNORE)
|
||||
continue;
|
||||
if (idx >= info_cnt)
|
||||
return -E2BIG;
|
||||
++idx;
|
||||
ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size,
|
||||
field_mask, &seen_mask,
|
||||
&info[idx], info_cnt - idx,
|
||||
level);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
idx += ret;
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
@ -3637,9 +3731,9 @@ static int btf_find_field(const struct btf *btf, const struct btf_type *t,
|
||||
int info_cnt)
|
||||
{
|
||||
if (__btf_type_is_struct(t))
|
||||
return btf_find_struct_field(btf, t, field_mask, info, info_cnt);
|
||||
return btf_find_struct_field(btf, t, field_mask, info, info_cnt, 0);
|
||||
else if (btf_type_is_datasec(t))
|
||||
return btf_find_datasec_var(btf, t, field_mask, info, info_cnt);
|
||||
return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, 0);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -6693,7 +6787,7 @@ int btf_struct_access(struct bpf_verifier_log *log,
|
||||
for (i = 0; i < rec->cnt; i++) {
|
||||
struct btf_field *field = &rec->fields[i];
|
||||
u32 offset = field->offset;
|
||||
if (off < offset + btf_field_type_size(field->type) && offset < off + size) {
|
||||
if (off < offset + field->size && offset < off + size) {
|
||||
bpf_log(log,
|
||||
"direct access to %s is disallowed\n",
|
||||
btf_field_type_name(field->type));
|
||||
|
@ -2744,6 +2744,122 @@ __bpf_kfunc void bpf_preempt_enable(void)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
struct bpf_iter_bits {
|
||||
__u64 __opaque[2];
|
||||
} __aligned(8);
|
||||
|
||||
struct bpf_iter_bits_kern {
|
||||
union {
|
||||
unsigned long *bits;
|
||||
unsigned long bits_copy;
|
||||
};
|
||||
u32 nr_bits;
|
||||
int bit;
|
||||
} __aligned(8);
|
||||
|
||||
/**
|
||||
* bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
|
||||
* @it: The new bpf_iter_bits to be created
|
||||
* @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
|
||||
* @nr_words: The size of the specified memory area, measured in 8-byte units.
|
||||
* Due to the limitation of memalloc, it can't be greater than 512.
|
||||
*
|
||||
* This function initializes a new bpf_iter_bits structure for iterating over
|
||||
* a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
|
||||
* copies the data of the memory area to the newly created bpf_iter_bits @it for
|
||||
* subsequent iteration operations.
|
||||
*
|
||||
* On success, 0 is returned. On failure, ERR is returned.
|
||||
*/
|
||||
__bpf_kfunc int
|
||||
bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
|
||||
{
|
||||
struct bpf_iter_bits_kern *kit = (void *)it;
|
||||
u32 nr_bytes = nr_words * sizeof(u64);
|
||||
u32 nr_bits = BYTES_TO_BITS(nr_bytes);
|
||||
int err;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
|
||||
BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
|
||||
__alignof__(struct bpf_iter_bits));
|
||||
|
||||
kit->nr_bits = 0;
|
||||
kit->bits_copy = 0;
|
||||
kit->bit = -1;
|
||||
|
||||
if (!unsafe_ptr__ign || !nr_words)
|
||||
return -EINVAL;
|
||||
|
||||
/* Optimization for u64 mask */
|
||||
if (nr_bits == 64) {
|
||||
err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
kit->nr_bits = nr_bits;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Fallback to memalloc */
|
||||
kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
|
||||
if (!kit->bits)
|
||||
return -ENOMEM;
|
||||
|
||||
err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
|
||||
if (err) {
|
||||
bpf_mem_free(&bpf_global_ma, kit->bits);
|
||||
return err;
|
||||
}
|
||||
|
||||
kit->nr_bits = nr_bits;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
|
||||
* @it: The bpf_iter_bits to be checked
|
||||
*
|
||||
* This function returns a pointer to a number representing the value of the
|
||||
* next bit in the bits.
|
||||
*
|
||||
* If there are no further bits available, it returns NULL.
|
||||
*/
|
||||
__bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
|
||||
{
|
||||
struct bpf_iter_bits_kern *kit = (void *)it;
|
||||
u32 nr_bits = kit->nr_bits;
|
||||
const unsigned long *bits;
|
||||
int bit;
|
||||
|
||||
if (nr_bits == 0)
|
||||
return NULL;
|
||||
|
||||
bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
|
||||
bit = find_next_bit(bits, nr_bits, kit->bit + 1);
|
||||
if (bit >= nr_bits) {
|
||||
kit->nr_bits = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
kit->bit = bit;
|
||||
return &kit->bit;
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
|
||||
* @it: The bpf_iter_bits to be destroyed
|
||||
*
|
||||
* Destroy the resource associated with the bpf_iter_bits.
|
||||
*/
|
||||
__bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
|
||||
{
|
||||
struct bpf_iter_bits_kern *kit = (void *)it;
|
||||
|
||||
if (kit->nr_bits <= 64)
|
||||
return;
|
||||
bpf_mem_free(&bpf_global_ma, kit->bits);
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
BTF_KFUNCS_START(generic_btf_ids)
|
||||
@ -2826,6 +2942,9 @@ BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
|
||||
BTF_ID_FLAGS(func, bpf_wq_start)
|
||||
BTF_ID_FLAGS(func, bpf_preempt_disable)
|
||||
BTF_ID_FLAGS(func, bpf_preempt_enable)
|
||||
BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
|
||||
BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
|
||||
BTF_KFUNCS_END(common_btf_ids)
|
||||
|
||||
static const struct btf_kfunc_id_set common_kfunc_set = {
|
||||
|
@ -3151,6 +3151,13 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
}
|
||||
#endif
|
||||
|
||||
static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts)
|
||||
{
|
||||
struct bpf_link *link = file->private_data;
|
||||
|
||||
return link->ops->poll(file, pts);
|
||||
}
|
||||
|
||||
static const struct file_operations bpf_link_fops = {
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show_fdinfo = bpf_link_show_fdinfo,
|
||||
@ -3160,6 +3167,16 @@ static const struct file_operations bpf_link_fops = {
|
||||
.write = bpf_dummy_write,
|
||||
};
|
||||
|
||||
static const struct file_operations bpf_link_fops_poll = {
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show_fdinfo = bpf_link_show_fdinfo,
|
||||
#endif
|
||||
.release = bpf_link_release,
|
||||
.read = bpf_dummy_read,
|
||||
.write = bpf_dummy_write,
|
||||
.poll = bpf_link_poll,
|
||||
};
|
||||
|
||||
static int bpf_link_alloc_id(struct bpf_link *link)
|
||||
{
|
||||
int id;
|
||||
@ -3202,7 +3219,9 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
|
||||
return id;
|
||||
}
|
||||
|
||||
file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
|
||||
file = anon_inode_getfile("bpf_link",
|
||||
link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
|
||||
link, O_CLOEXEC);
|
||||
if (IS_ERR(file)) {
|
||||
bpf_link_free_id(id);
|
||||
put_unused_fd(fd);
|
||||
@ -3230,7 +3249,9 @@ int bpf_link_settle(struct bpf_link_primer *primer)
|
||||
|
||||
int bpf_link_new_fd(struct bpf_link *link)
|
||||
{
|
||||
return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
|
||||
return anon_inode_getfd("bpf-link",
|
||||
link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops,
|
||||
link, O_CLOEXEC);
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_link_get_from_fd(u32 ufd)
|
||||
@ -3240,7 +3261,7 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd)
|
||||
|
||||
if (!f.file)
|
||||
return ERR_PTR(-EBADF);
|
||||
if (f.file->f_op != &bpf_link_fops) {
|
||||
if (f.file->f_op != &bpf_link_fops && f.file->f_op != &bpf_link_fops_poll) {
|
||||
fdput(f);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
@ -4972,7 +4993,7 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
|
||||
uattr);
|
||||
else if (f.file->f_op == &btf_fops)
|
||||
err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
|
||||
else if (f.file->f_op == &bpf_link_fops)
|
||||
else if (f.file->f_op == &bpf_link_fops || f.file->f_op == &bpf_link_fops_poll)
|
||||
err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
|
||||
attr, uattr);
|
||||
else
|
||||
@ -5107,7 +5128,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
|
||||
if (!file)
|
||||
return -EBADF;
|
||||
|
||||
if (file->f_op == &bpf_link_fops) {
|
||||
if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) {
|
||||
struct bpf_link *link = file->private_data;
|
||||
|
||||
if (link->ops == &bpf_raw_tp_link_lops) {
|
||||
@ -5417,10 +5438,11 @@ static int link_detach(union bpf_attr *attr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
|
||||
struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
|
||||
{
|
||||
return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
|
||||
}
|
||||
EXPORT_SYMBOL(bpf_link_inc_not_zero);
|
||||
|
||||
struct bpf_link *bpf_link_by_id(u32 id)
|
||||
{
|
||||
|
@ -5448,7 +5448,7 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
|
||||
* this program. To check that [x1, x2) overlaps with [y1, y2),
|
||||
* it is sufficient to check x1 < y2 && y1 < x2.
|
||||
*/
|
||||
if (reg->smin_value + off < p + btf_field_type_size(field->type) &&
|
||||
if (reg->smin_value + off < p + field->size &&
|
||||
p < reg->umax_value + off + size) {
|
||||
switch (field->type) {
|
||||
case BPF_KPTR_UNREF:
|
||||
@ -11648,7 +11648,7 @@ __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
|
||||
|
||||
node_off = reg->off + reg->var_off.value;
|
||||
field = reg_find_field_offset(reg, node_off, node_field_type);
|
||||
if (!field || field->offset != node_off) {
|
||||
if (!field) {
|
||||
verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -15706,4 +15706,5 @@ static void __exit test_bpf_exit(void)
|
||||
module_init(test_bpf_init);
|
||||
module_exit(test_bpf_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Testsuite for BPF interpreter and BPF JIT compiler");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -272,12 +272,12 @@ static int bpf_dummy_init_member(const struct btf_type *t,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int bpf_dummy_reg(void *kdata)
|
||||
static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void bpf_dummy_unreg(void *kdata)
|
||||
static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -260,17 +260,17 @@ static int bpf_tcp_ca_check_member(const struct btf_type *t,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_tcp_ca_reg(void *kdata)
|
||||
static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link)
|
||||
{
|
||||
return tcp_register_congestion_control(kdata);
|
||||
}
|
||||
|
||||
static void bpf_tcp_ca_unreg(void *kdata)
|
||||
static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link)
|
||||
{
|
||||
tcp_unregister_congestion_control(kdata);
|
||||
}
|
||||
|
||||
static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
|
||||
static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link)
|
||||
{
|
||||
return tcp_update_congestion_control(kdata, old_kdata);
|
||||
}
|
||||
|
@ -848,7 +848,7 @@ out:
|
||||
}
|
||||
|
||||
static void
|
||||
codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
|
||||
codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool populate_links)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
char ident[256];
|
||||
@ -888,6 +888,14 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
|
||||
printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
|
||||
i, ident);
|
||||
}
|
||||
|
||||
if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
|
||||
codegen("\
|
||||
\n\
|
||||
s->maps[%zu].link = &obj->links.%s;\n\
|
||||
",
|
||||
i, ident);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
@ -1141,7 +1149,7 @@ static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
|
||||
static int do_skeleton(int argc, char **argv)
|
||||
{
|
||||
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
|
||||
size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
|
||||
size_t map_cnt = 0, prog_cnt = 0, attach_map_cnt = 0, file_sz, mmap_sz;
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
|
||||
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
|
||||
struct bpf_object *obj = NULL;
|
||||
@ -1225,6 +1233,10 @@ static int do_skeleton(int argc, char **argv)
|
||||
bpf_map__name(map));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS)
|
||||
attach_map_cnt++;
|
||||
|
||||
map_cnt++;
|
||||
}
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
@ -1297,6 +1309,9 @@ static int do_skeleton(int argc, char **argv)
|
||||
bpf_program__name(prog));
|
||||
}
|
||||
printf("\t} progs;\n");
|
||||
}
|
||||
|
||||
if (prog_cnt + attach_map_cnt) {
|
||||
printf("\tstruct {\n");
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
if (use_loader)
|
||||
@ -1306,6 +1321,19 @@ static int do_skeleton(int argc, char **argv)
|
||||
printf("\t\tstruct bpf_link *%s;\n",
|
||||
bpf_program__name(prog));
|
||||
}
|
||||
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
|
||||
continue;
|
||||
|
||||
if (use_loader)
|
||||
printf("t\tint %s_fd;\n", ident);
|
||||
else
|
||||
printf("\t\tstruct bpf_link *%s;\n", ident);
|
||||
}
|
||||
|
||||
printf("\t} links;\n");
|
||||
}
|
||||
|
||||
@ -1448,7 +1476,7 @@ static int do_skeleton(int argc, char **argv)
|
||||
obj_name
|
||||
);
|
||||
|
||||
codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
|
||||
codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/, true /*links*/);
|
||||
codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
|
||||
|
||||
codegen("\
|
||||
@ -1786,7 +1814,7 @@ static int do_subskeleton(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
|
||||
codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/, false /*links*/);
|
||||
codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
|
||||
|
||||
codegen("\
|
||||
@ -2379,15 +2407,6 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int btfgen_remap_id(__u32 *type_id, void *ctx)
|
||||
{
|
||||
unsigned int *ids = ctx;
|
||||
|
||||
*type_id = ids[*type_id];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Generate BTF from relocation information previously recorded */
|
||||
static struct btf *btfgen_get_btf(struct btfgen_info *info)
|
||||
{
|
||||
@ -2467,10 +2486,15 @@ static struct btf *btfgen_get_btf(struct btfgen_info *info)
|
||||
/* second pass: fix up type ids */
|
||||
for (i = 1; i < btf__type_cnt(btf_new); i++) {
|
||||
struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *type_id;
|
||||
|
||||
err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
|
||||
err = btf_field_iter_init(&it, btf_type, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
while ((type_id = btf_field_iter_next(&it)))
|
||||
*type_id = ids[*type_id];
|
||||
}
|
||||
|
||||
free(ids);
|
||||
|
@ -29,6 +29,7 @@ enum bpf_link_type___local {
|
||||
};
|
||||
|
||||
extern const void bpf_link_fops __ksym;
|
||||
extern const void bpf_link_fops_poll __ksym __weak;
|
||||
extern const void bpf_map_fops __ksym;
|
||||
extern const void bpf_prog_fops __ksym;
|
||||
extern const void btf_fops __ksym;
|
||||
@ -84,7 +85,11 @@ int iter(struct bpf_iter__task_file *ctx)
|
||||
fops = &btf_fops;
|
||||
break;
|
||||
case BPF_OBJ_LINK:
|
||||
fops = &bpf_link_fops;
|
||||
if (&bpf_link_fops_poll &&
|
||||
file->f_op == &bpf_link_fops_poll)
|
||||
fops = &bpf_link_fops_poll;
|
||||
else
|
||||
fops = &bpf_link_fops;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
|
@ -40,17 +40,17 @@ struct {
|
||||
|
||||
const volatile __u32 num_cpu = 1;
|
||||
const volatile __u32 num_metric = 1;
|
||||
#define MAX_NUM_MATRICS 4
|
||||
#define MAX_NUM_METRICS 4
|
||||
|
||||
SEC("fentry/XXX")
|
||||
int BPF_PROG(fentry_XXX)
|
||||
{
|
||||
struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS];
|
||||
struct bpf_perf_event_value___local *ptrs[MAX_NUM_METRICS];
|
||||
u32 key = bpf_get_smp_processor_id();
|
||||
u32 i;
|
||||
|
||||
/* look up before reading, to reduce error */
|
||||
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
|
||||
for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
|
||||
u32 flag = i;
|
||||
|
||||
ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
|
||||
@ -58,7 +58,7 @@ int BPF_PROG(fentry_XXX)
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
|
||||
for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
|
||||
struct bpf_perf_event_value___local reading;
|
||||
int err;
|
||||
|
||||
@ -99,14 +99,14 @@ fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
|
||||
SEC("fexit/XXX")
|
||||
int BPF_PROG(fexit_XXX)
|
||||
{
|
||||
struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS];
|
||||
struct bpf_perf_event_value___local readings[MAX_NUM_METRICS];
|
||||
u32 cpu = bpf_get_smp_processor_id();
|
||||
u32 i, zero = 0;
|
||||
int err;
|
||||
u64 *count;
|
||||
|
||||
/* read all events before updating the maps, to reduce error */
|
||||
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
|
||||
for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
|
||||
err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
|
||||
(void *)(readings + i),
|
||||
sizeof(*readings));
|
||||
@ -116,7 +116,7 @@ int BPF_PROG(fexit_XXX)
|
||||
count = bpf_map_lookup_elem(&counts, &zero);
|
||||
if (count) {
|
||||
*count += 1;
|
||||
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
|
||||
for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++)
|
||||
fexit_update_maps(i, &readings[i]);
|
||||
}
|
||||
return 0;
|
||||
|
@ -1739,9 +1739,8 @@ struct btf_pipe {
|
||||
struct hashmap *str_off_map; /* map string offsets from src to dst */
|
||||
};
|
||||
|
||||
static int btf_rewrite_str(__u32 *str_off, void *ctx)
|
||||
static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off)
|
||||
{
|
||||
struct btf_pipe *p = ctx;
|
||||
long mapped_off;
|
||||
int off, err;
|
||||
|
||||
@ -1774,7 +1773,9 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx)
|
||||
int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
|
||||
{
|
||||
struct btf_pipe p = { .src = src_btf, .dst = btf };
|
||||
struct btf_field_iter it;
|
||||
struct btf_type *t;
|
||||
__u32 *str_off;
|
||||
int sz, err;
|
||||
|
||||
sz = btf_type_size(src_type);
|
||||
@ -1791,28 +1792,19 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t
|
||||
|
||||
memcpy(t, src_type, sz);
|
||||
|
||||
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
||||
if (err)
|
||||
return libbpf_err(err);
|
||||
|
||||
while ((str_off = btf_field_iter_next(&it))) {
|
||||
err = btf_rewrite_str(&p, str_off);
|
||||
if (err)
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
|
||||
static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
|
||||
{
|
||||
struct btf *btf = ctx;
|
||||
|
||||
if (!*type_id) /* nothing to do for VOID references */
|
||||
return 0;
|
||||
|
||||
/* we haven't updated btf's type count yet, so
|
||||
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
|
||||
* add to all newly added BTF types
|
||||
*/
|
||||
*type_id += btf->start_id + btf->nr_types - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
|
||||
static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
|
||||
|
||||
@ -1858,6 +1850,9 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
|
||||
memcpy(t, src_btf->types_data, data_sz);
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
struct btf_field_iter it;
|
||||
__u32 *type_id, *str_off;
|
||||
|
||||
sz = btf_type_size(t);
|
||||
if (sz < 0) {
|
||||
/* unlikely, has to be corrupted src_btf */
|
||||
@ -1869,14 +1864,30 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf)
|
||||
*off = t - btf->types_data;
|
||||
|
||||
/* add, dedup, and remap strings referenced by this BTF type */
|
||||
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
||||
if (err)
|
||||
goto err_out;
|
||||
while ((str_off = btf_field_iter_next(&it))) {
|
||||
err = btf_rewrite_str(&p, str_off);
|
||||
if (err)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* remap all type IDs referenced from this BTF type */
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* remap all type IDs referenced from this BTF type */
|
||||
err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
|
||||
if (err)
|
||||
goto err_out;
|
||||
while ((type_id = btf_field_iter_next(&it))) {
|
||||
if (!*type_id) /* nothing to do for VOID references */
|
||||
continue;
|
||||
|
||||
/* we haven't updated btf's type count yet, so
|
||||
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
|
||||
* add to all newly added BTF types
|
||||
*/
|
||||
*type_id += btf->start_id + btf->nr_types - 1;
|
||||
}
|
||||
|
||||
/* go to next type data and type offset index entry */
|
||||
t += sz;
|
||||
@ -3453,11 +3464,19 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < d->btf->nr_types; i++) {
|
||||
struct btf_field_iter it;
|
||||
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
|
||||
__u32 *str_off;
|
||||
|
||||
r = btf_type_visit_str_offs(t, fn, ctx);
|
||||
r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
while ((str_off = btf_field_iter_next(&it))) {
|
||||
r = fn(str_off, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
if (!d->btf_ext)
|
||||
@ -4919,10 +4938,23 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
|
||||
|
||||
for (i = 0; i < d->btf->nr_types; i++) {
|
||||
struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *type_id;
|
||||
|
||||
r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
|
||||
r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
while ((type_id = btf_field_iter_next(&it))) {
|
||||
__u32 resolved_id, new_id;
|
||||
|
||||
resolved_id = resolve_type_id(d, *type_id);
|
||||
new_id = d->hypot_map[resolved_id];
|
||||
if (new_id > BTF_MAX_NR_TYPES)
|
||||
return -EINVAL;
|
||||
|
||||
*type_id = new_id;
|
||||
}
|
||||
}
|
||||
|
||||
if (!d->btf_ext)
|
||||
@ -5003,134 +5035,166 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt
|
||||
return btf__parse_split(path, vmlinux_btf);
|
||||
}
|
||||
|
||||
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
|
||||
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind)
|
||||
{
|
||||
int i, n, err;
|
||||
it->p = NULL;
|
||||
it->m_idx = -1;
|
||||
it->off_idx = 0;
|
||||
it->vlen = 0;
|
||||
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
return 0;
|
||||
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
return visit(&t->type, ctx);
|
||||
|
||||
case BTF_KIND_ARRAY: {
|
||||
struct btf_array *a = btf_array(t);
|
||||
|
||||
err = visit(&a->type, ctx);
|
||||
err = err ?: visit(&a->index_type, ctx);
|
||||
return err;
|
||||
}
|
||||
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
struct btf_member *m = btf_members(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
switch (iter_kind) {
|
||||
case BTF_FIELD_ITER_IDS:
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_UNKN:
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_ENUM:
|
||||
case BTF_KIND_ENUM64:
|
||||
it->desc = (struct btf_field_desc) {};
|
||||
break;
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} };
|
||||
break;
|
||||
case BTF_KIND_ARRAY:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
2, {sizeof(struct btf_type) + offsetof(struct btf_array, type),
|
||||
sizeof(struct btf_type) + offsetof(struct btf_array, index_type)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
0, {},
|
||||
sizeof(struct btf_member),
|
||||
1, {offsetof(struct btf_member, type)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, type)},
|
||||
sizeof(struct btf_param),
|
||||
1, {offsetof(struct btf_param, type)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_DATASEC:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
0, {},
|
||||
sizeof(struct btf_var_secinfo),
|
||||
1, {offsetof(struct btf_var_secinfo, type)}
|
||||
};
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *m = btf_params(t);
|
||||
|
||||
err = visit(&t->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
case BTF_FIELD_ITER_STRS:
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_UNKN:
|
||||
it->desc = (struct btf_field_desc) {};
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FLOAT:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_ARRAY:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
case BTF_KIND_DATASEC:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)},
|
||||
sizeof(struct btf_enum),
|
||||
1, {offsetof(struct btf_enum, name_off)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_ENUM64:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)},
|
||||
sizeof(struct btf_enum64),
|
||||
1, {offsetof(struct btf_enum64, name_off)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)},
|
||||
sizeof(struct btf_member),
|
||||
1, {offsetof(struct btf_member, name_off)}
|
||||
};
|
||||
break;
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
it->desc = (struct btf_field_desc) {
|
||||
1, {offsetof(struct btf_type, name_off)},
|
||||
sizeof(struct btf_param),
|
||||
1, {offsetof(struct btf_param, name_off)}
|
||||
};
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
case BTF_KIND_DATASEC: {
|
||||
struct btf_var_secinfo *m = btf_var_secinfos(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->type, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (it->desc.m_sz)
|
||||
it->vlen = btf_vlen(t);
|
||||
|
||||
it->p = t;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
|
||||
__u32 *btf_field_iter_next(struct btf_field_iter *it)
|
||||
{
|
||||
int i, n, err;
|
||||
if (!it->p)
|
||||
return NULL;
|
||||
|
||||
err = visit(&t->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (btf_kind(t)) {
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
struct btf_member *m = btf_members(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM: {
|
||||
struct btf_enum *m = btf_enum(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM64: {
|
||||
struct btf_enum64 *m = btf_enum64(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
struct btf_param *m = btf_params(t);
|
||||
|
||||
for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
|
||||
err = visit(&m->name_off, ctx);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
if (it->m_idx < 0) {
|
||||
if (it->off_idx < it->desc.t_off_cnt)
|
||||
return it->p + it->desc.t_offs[it->off_idx++];
|
||||
/* move to per-member iteration */
|
||||
it->m_idx = 0;
|
||||
it->p += sizeof(struct btf_type);
|
||||
it->off_idx = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* if type doesn't have members, stop */
|
||||
if (it->desc.m_sz == 0) {
|
||||
it->p = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (it->off_idx >= it->desc.m_off_cnt) {
|
||||
/* exhausted this member's fields, go to the next member */
|
||||
it->m_idx++;
|
||||
it->p += it->desc.m_sz;
|
||||
it->off_idx = 0;
|
||||
}
|
||||
|
||||
if (it->m_idx < it->vlen)
|
||||
return it->p + it->desc.m_offs[it->off_idx++];
|
||||
|
||||
it->p = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
|
||||
|
@ -229,7 +229,30 @@ static const char * const prog_type_name[] = {
|
||||
static int __base_pr(enum libbpf_print_level level, const char *format,
|
||||
va_list args)
|
||||
{
|
||||
if (level == LIBBPF_DEBUG)
|
||||
const char *env_var = "LIBBPF_LOG_LEVEL";
|
||||
static enum libbpf_print_level min_level = LIBBPF_INFO;
|
||||
static bool initialized;
|
||||
|
||||
if (!initialized) {
|
||||
char *verbosity;
|
||||
|
||||
initialized = true;
|
||||
verbosity = getenv(env_var);
|
||||
if (verbosity) {
|
||||
if (strcasecmp(verbosity, "warn") == 0)
|
||||
min_level = LIBBPF_WARN;
|
||||
else if (strcasecmp(verbosity, "debug") == 0)
|
||||
min_level = LIBBPF_DEBUG;
|
||||
else if (strcasecmp(verbosity, "info") == 0)
|
||||
min_level = LIBBPF_INFO;
|
||||
else
|
||||
fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n",
|
||||
env_var, verbosity);
|
||||
}
|
||||
}
|
||||
|
||||
/* if too verbose, skip logging */
|
||||
if (level > min_level)
|
||||
return 0;
|
||||
|
||||
return vfprintf(stderr, format, args);
|
||||
@ -549,6 +572,7 @@ struct bpf_map {
|
||||
bool pinned;
|
||||
bool reused;
|
||||
bool autocreate;
|
||||
bool autoattach;
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
@ -1377,6 +1401,7 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
|
||||
map->def.value_size = type->size;
|
||||
map->def.max_entries = 1;
|
||||
map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
|
||||
map->autoattach = true;
|
||||
|
||||
map->st_ops = calloc(1, sizeof(*map->st_ops));
|
||||
if (!map->st_ops)
|
||||
@ -4796,6 +4821,20 @@ int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach)
|
||||
{
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
map->autoattach = autoattach;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool bpf_map__autoattach(const struct bpf_map *map)
|
||||
{
|
||||
return map->autoattach;
|
||||
}
|
||||
|
||||
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
|
||||
{
|
||||
struct bpf_map_info info;
|
||||
@ -12877,8 +12916,10 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
|
||||
__u32 zero = 0;
|
||||
int err, fd;
|
||||
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
if (!bpf_map__is_struct_ops(map)) {
|
||||
pr_warn("map '%s': can't attach non-struct_ops map\n", map->name);
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
}
|
||||
|
||||
if (map->fd < 0) {
|
||||
pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
|
||||
@ -13922,6 +13963,35 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
|
||||
*/
|
||||
}
|
||||
|
||||
/* Skeleton is created with earlier version of bpftool
|
||||
* which does not support auto-attachment
|
||||
*/
|
||||
if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < s->map_cnt; i++) {
|
||||
struct bpf_map *map = *s->maps[i].map;
|
||||
struct bpf_link **link = s->maps[i].link;
|
||||
|
||||
if (!map->autocreate || !map->autoattach)
|
||||
continue;
|
||||
|
||||
if (*link)
|
||||
continue;
|
||||
|
||||
/* only struct_ops maps can be attached */
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
continue;
|
||||
*link = bpf_map__attach_struct_ops(map);
|
||||
|
||||
if (!*link) {
|
||||
err = -errno;
|
||||
pr_warn("map '%s': failed to auto-attach: %d\n",
|
||||
bpf_map__name(map), err);
|
||||
return libbpf_err(err);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -13935,6 +14005,18 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
|
||||
bpf_link__destroy(*link);
|
||||
*link = NULL;
|
||||
}
|
||||
|
||||
if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
|
||||
return;
|
||||
|
||||
for (i = 0; i < s->map_cnt; i++) {
|
||||
struct bpf_link **link = s->maps[i].link;
|
||||
|
||||
if (link) {
|
||||
bpf_link__destroy(*link);
|
||||
*link = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
|
||||
@ -13942,8 +14024,7 @@ void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
|
||||
if (!s)
|
||||
return;
|
||||
|
||||
if (s->progs)
|
||||
bpf_object__detach_skeleton(s);
|
||||
bpf_object__detach_skeleton(s);
|
||||
if (s->obj)
|
||||
bpf_object__close(*s->obj);
|
||||
free(s->maps);
|
||||
|
@ -98,7 +98,10 @@ typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
|
||||
|
||||
/**
|
||||
* @brief **libbpf_set_print()** sets user-provided log callback function to
|
||||
* be used for libbpf warnings and informational messages.
|
||||
* be used for libbpf warnings and informational messages. If the user callback
|
||||
* is not set, messages are logged to stderr by default. The verbosity of these
|
||||
* messages can be controlled by setting the environment variable
|
||||
* LIBBPF_LOG_LEVEL to either warn, info, or debug.
|
||||
* @param fn The log print function. If NULL, libbpf won't print anything.
|
||||
* @return Pointer to old print function.
|
||||
*
|
||||
@ -975,6 +978,23 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
|
||||
LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach
|
||||
* map during BPF skeleton attach phase.
|
||||
* @param map the BPF map instance
|
||||
* @param autoattach whether to attach map during BPF skeleton attach phase
|
||||
* @return 0 on success; negative error code, otherwise
|
||||
*/
|
||||
LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__autoattach()** returns whether BPF map is configured to
|
||||
* auto-attach during BPF skeleton attach phase.
|
||||
* @param map the BPF map instance
|
||||
* @return true if map is set to auto-attach during skeleton attach phase; false, otherwise
|
||||
*/
|
||||
LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__fd()** gets the file descriptor of the passed
|
||||
* BPF map
|
||||
@ -1669,6 +1689,7 @@ struct bpf_map_skeleton {
|
||||
const char *name;
|
||||
struct bpf_map **map;
|
||||
void **mmaped;
|
||||
struct bpf_link **link;
|
||||
};
|
||||
|
||||
struct bpf_prog_skeleton {
|
||||
|
@ -419,6 +419,8 @@ LIBBPF_1.4.0 {
|
||||
|
||||
LIBBPF_1.5.0 {
|
||||
global:
|
||||
bpf_map__autoattach;
|
||||
bpf_map__set_autoattach;
|
||||
bpf_program__attach_sockmap;
|
||||
ring__consume_n;
|
||||
ring_buffer__consume_n;
|
||||
|
@ -508,11 +508,33 @@ struct bpf_line_info_min {
|
||||
__u32 line_col;
|
||||
};
|
||||
|
||||
enum btf_field_iter_kind {
|
||||
BTF_FIELD_ITER_IDS,
|
||||
BTF_FIELD_ITER_STRS,
|
||||
};
|
||||
|
||||
struct btf_field_desc {
|
||||
/* once-per-type offsets */
|
||||
int t_off_cnt, t_offs[2];
|
||||
/* member struct size, or zero, if no members */
|
||||
int m_sz;
|
||||
/* repeated per-member offsets */
|
||||
int m_off_cnt, m_offs[1];
|
||||
};
|
||||
|
||||
struct btf_field_iter {
|
||||
struct btf_field_desc desc;
|
||||
void *p;
|
||||
int m_idx;
|
||||
int off_idx;
|
||||
int vlen;
|
||||
};
|
||||
|
||||
int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind);
|
||||
__u32 *btf_field_iter_next(struct btf_field_iter *it);
|
||||
|
||||
typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
|
||||
typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
|
||||
int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
|
||||
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
@ -597,13 +619,9 @@ static inline int ensure_good_fd(int fd)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static inline int sys_dup2(int oldfd, int newfd)
|
||||
static inline int sys_dup3(int oldfd, int newfd, int flags)
|
||||
{
|
||||
#ifdef __NR_dup2
|
||||
return syscall(__NR_dup2, oldfd, newfd);
|
||||
#else
|
||||
return syscall(__NR_dup3, oldfd, newfd, 0);
|
||||
#endif
|
||||
return syscall(__NR_dup3, oldfd, newfd, flags);
|
||||
}
|
||||
|
||||
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
|
||||
@ -614,7 +632,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = sys_dup2(tmp_fd, fixed_fd);
|
||||
err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC);
|
||||
err = err < 0 ? -errno : 0;
|
||||
close(tmp_fd); /* clean up temporary FD */
|
||||
return err;
|
||||
|
@ -957,19 +957,33 @@ static int check_btf_str_off(__u32 *str_off, void *ctx)
|
||||
static int linker_sanity_check_btf(struct src_obj *obj)
|
||||
{
|
||||
struct btf_type *t;
|
||||
int i, n, err = 0;
|
||||
int i, n, err;
|
||||
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
n = btf__type_cnt(obj->btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
struct btf_field_iter it;
|
||||
__u32 *type_id, *str_off;
|
||||
|
||||
t = btf_type_by_id(obj->btf, i);
|
||||
|
||||
err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
|
||||
err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf);
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
return err;
|
||||
while ((type_id = btf_field_iter_next(&it))) {
|
||||
if (*type_id >= n)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS);
|
||||
if (err)
|
||||
return err;
|
||||
while ((str_off = btf_field_iter_next(&it))) {
|
||||
if (!btf__str_by_offset(obj->btf, *str_off))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2234,26 +2248,10 @@ static int linker_fixup_btf(struct src_obj *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int remap_type_id(__u32 *type_id, void *ctx)
|
||||
{
|
||||
int *id_map = ctx;
|
||||
int new_id = id_map[*type_id];
|
||||
|
||||
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
|
||||
if (new_id == 0 && *type_id != 0) {
|
||||
pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*type_id = id_map[*type_id];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
int i, j, n, start_id, id;
|
||||
int i, j, n, start_id, id, err;
|
||||
const char *name;
|
||||
|
||||
if (!obj->btf)
|
||||
@ -2324,9 +2322,25 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
n = btf__type_cnt(linker->btf);
|
||||
for (i = start_id; i < n; i++) {
|
||||
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
|
||||
struct btf_field_iter it;
|
||||
__u32 *type_id;
|
||||
|
||||
if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
|
||||
return -EINVAL;
|
||||
err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
while ((type_id = btf_field_iter_next(&it))) {
|
||||
int new_id = obj->btf_type_map[*type_id];
|
||||
|
||||
/* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
|
||||
if (new_id == 0 && *type_id != 0) {
|
||||
pr_warn("failed to find new ID mapping for original BTF type ID %u\n",
|
||||
*type_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*type_id = obj->btf_type_map[*type_id];
|
||||
}
|
||||
}
|
||||
|
||||
/* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's
|
||||
|
@ -22,12 +22,12 @@ static int dummy_init_member(const struct btf_type *t,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dummy_reg(void *kdata)
|
||||
static int dummy_reg(void *kdata, struct bpf_link *link)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dummy_unreg(void *kdata)
|
||||
static void dummy_unreg(void *kdata, struct bpf_link *link)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -820,7 +820,7 @@ static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
|
||||
.is_valid_access = bpf_testmod_ops_is_valid_access,
|
||||
};
|
||||
|
||||
static int bpf_dummy_reg(void *kdata)
|
||||
static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
|
||||
{
|
||||
struct bpf_testmod_ops *ops = kdata;
|
||||
|
||||
@ -835,7 +835,7 @@ static int bpf_dummy_reg(void *kdata)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_dummy_unreg(void *kdata)
|
||||
static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
|
||||
{
|
||||
}
|
||||
|
||||
@ -871,7 +871,7 @@ struct bpf_struct_ops bpf_bpf_testmod_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int bpf_dummy_reg2(void *kdata)
|
||||
static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
|
||||
{
|
||||
struct bpf_testmod_ops2 *ops = kdata;
|
||||
|
||||
|
@ -94,7 +94,8 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl
|
||||
if (settimeo(fd, opts->timeout_ms))
|
||||
goto error_close;
|
||||
|
||||
if (opts->post_socket_cb && opts->post_socket_cb(fd, NULL)) {
|
||||
if (opts->post_socket_cb &&
|
||||
opts->post_socket_cb(fd, opts->cb_opts)) {
|
||||
log_err("Failed to call post_socket_cb");
|
||||
goto error_close;
|
||||
}
|
||||
@ -118,22 +119,32 @@ error_close:
|
||||
return -1;
|
||||
}
|
||||
|
||||
int start_server_str(int family, int type, const char *addr_str, __u16 port,
|
||||
const struct network_helper_opts *opts)
|
||||
{
|
||||
struct sockaddr_storage addr;
|
||||
socklen_t addrlen;
|
||||
|
||||
if (!opts)
|
||||
opts = &default_opts;
|
||||
|
||||
if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
|
||||
return -1;
|
||||
|
||||
return __start_server(type, (struct sockaddr *)&addr, addrlen, opts);
|
||||
}
|
||||
|
||||
int start_server(int family, int type, const char *addr_str, __u16 port,
|
||||
int timeout_ms)
|
||||
{
|
||||
struct network_helper_opts opts = {
|
||||
.timeout_ms = timeout_ms,
|
||||
};
|
||||
struct sockaddr_storage addr;
|
||||
socklen_t addrlen;
|
||||
|
||||
if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
|
||||
return -1;
|
||||
|
||||
return __start_server(type, (struct sockaddr *)&addr, addrlen, &opts);
|
||||
return start_server_str(family, type, addr_str, port, &opts);
|
||||
}
|
||||
|
||||
static int reuseport_cb(int fd, const struct post_socket_opts *opts)
|
||||
static int reuseport_cb(int fd, void *opts)
|
||||
{
|
||||
int on = 1;
|
||||
|
||||
@ -338,9 +349,8 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
|
||||
if (settimeo(fd, opts->timeout_ms))
|
||||
goto error_close;
|
||||
|
||||
if (opts->cc && opts->cc[0] &&
|
||||
setsockopt(fd, SOL_TCP, TCP_CONGESTION, opts->cc,
|
||||
strlen(opts->cc) + 1))
|
||||
if (opts->post_socket_cb &&
|
||||
opts->post_socket_cb(fd, opts->cb_opts))
|
||||
goto error_close;
|
||||
|
||||
if (!opts->noconnect)
|
||||
|
@ -21,16 +21,14 @@ typedef __u16 __sum16;
|
||||
#define VIP_NUM 5
|
||||
#define MAGIC_BYTES 123
|
||||
|
||||
struct post_socket_opts {};
|
||||
|
||||
struct network_helper_opts {
|
||||
const char *cc;
|
||||
int timeout_ms;
|
||||
bool must_fail;
|
||||
bool noconnect;
|
||||
int type;
|
||||
int proto;
|
||||
int (*post_socket_cb)(int fd, const struct post_socket_opts *opts);
|
||||
int (*post_socket_cb)(int fd, void *opts);
|
||||
void *cb_opts;
|
||||
};
|
||||
|
||||
/* ipv4 test vector */
|
||||
@ -50,6 +48,8 @@ struct ipv6_packet {
|
||||
extern struct ipv6_packet pkt_v6;
|
||||
|
||||
int settimeo(int fd, int timeout_ms);
|
||||
int start_server_str(int family, int type, const char *addr_str, __u16 port,
|
||||
const struct network_helper_opts *opts);
|
||||
int start_server(int family, int type, const char *addr, __u16 port,
|
||||
int timeout_ms);
|
||||
int *start_reuseport_server(int family, int type, const char *addr_str,
|
||||
|
@ -451,7 +451,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
|
||||
attr.type = PERF_TYPE_SOFTWARE;
|
||||
attr.config = PERF_COUNT_SW_CPU_CLOCK;
|
||||
attr.freq = 1;
|
||||
attr.sample_freq = 1000;
|
||||
attr.sample_freq = 10000;
|
||||
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
|
||||
if (!ASSERT_GE(pfd, 0, "perf_fd"))
|
||||
goto cleanup;
|
||||
|
@ -23,6 +23,11 @@
|
||||
static const unsigned int total_bytes = 10 * 1024 * 1024;
|
||||
static int expected_stg = 0xeB9F;
|
||||
|
||||
struct cb_opts {
|
||||
const char *cc;
|
||||
int map_fd;
|
||||
};
|
||||
|
||||
static int settcpca(int fd, const char *tcp_ca)
|
||||
{
|
||||
int err;
|
||||
@ -34,55 +39,66 @@ static int settcpca(int fd, const char *tcp_ca)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
|
||||
static bool start_test(char *addr_str,
|
||||
const struct network_helper_opts *srv_opts,
|
||||
const struct network_helper_opts *cli_opts,
|
||||
int *srv_fd, int *cli_fd)
|
||||
{
|
||||
int lfd = -1, fd = -1;
|
||||
int err;
|
||||
|
||||
lfd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
|
||||
if (!ASSERT_NEQ(lfd, -1, "socket"))
|
||||
return;
|
||||
|
||||
fd = socket(AF_INET6, SOCK_STREAM, 0);
|
||||
if (!ASSERT_NEQ(fd, -1, "socket")) {
|
||||
close(lfd);
|
||||
return;
|
||||
}
|
||||
|
||||
if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca))
|
||||
goto done;
|
||||
|
||||
if (sk_stg_map) {
|
||||
err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
|
||||
&expected_stg, BPF_NOEXIST);
|
||||
if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
|
||||
goto done;
|
||||
}
|
||||
*srv_fd = start_server_str(AF_INET6, SOCK_STREAM, addr_str, 0, srv_opts);
|
||||
if (!ASSERT_NEQ(*srv_fd, -1, "start_server_str"))
|
||||
goto err;
|
||||
|
||||
/* connect to server */
|
||||
err = connect_fd_to_fd(fd, lfd, 0);
|
||||
if (!ASSERT_NEQ(err, -1, "connect"))
|
||||
goto done;
|
||||
*cli_fd = connect_to_fd_opts(*srv_fd, cli_opts);
|
||||
if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts"))
|
||||
goto err;
|
||||
|
||||
if (sk_stg_map) {
|
||||
int tmp_stg;
|
||||
return true;
|
||||
|
||||
err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
|
||||
&tmp_stg);
|
||||
if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
|
||||
!ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
|
||||
goto done;
|
||||
err:
|
||||
if (*srv_fd != -1) {
|
||||
close(*srv_fd);
|
||||
*srv_fd = -1;
|
||||
}
|
||||
if (*cli_fd != -1) {
|
||||
close(*cli_fd);
|
||||
*cli_fd = -1;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void do_test(const struct network_helper_opts *opts)
|
||||
{
|
||||
int lfd = -1, fd = -1;
|
||||
|
||||
if (!start_test(NULL, opts, opts, &lfd, &fd))
|
||||
goto done;
|
||||
|
||||
ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
|
||||
|
||||
done:
|
||||
close(lfd);
|
||||
close(fd);
|
||||
if (lfd != -1)
|
||||
close(lfd);
|
||||
if (fd != -1)
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static int cc_cb(int fd, void *opts)
|
||||
{
|
||||
struct cb_opts *cb_opts = (struct cb_opts *)opts;
|
||||
|
||||
return settcpca(fd, cb_opts->cc);
|
||||
}
|
||||
|
||||
static void test_cubic(void)
|
||||
{
|
||||
struct cb_opts cb_opts = {
|
||||
.cc = "bpf_cubic",
|
||||
};
|
||||
struct network_helper_opts opts = {
|
||||
.post_socket_cb = cc_cb,
|
||||
.cb_opts = &cb_opts,
|
||||
};
|
||||
struct bpf_cubic *cubic_skel;
|
||||
struct bpf_link *link;
|
||||
|
||||
@ -96,7 +112,7 @@ static void test_cubic(void)
|
||||
return;
|
||||
}
|
||||
|
||||
do_test("bpf_cubic", NULL);
|
||||
do_test(&opts);
|
||||
|
||||
ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called");
|
||||
|
||||
@ -104,8 +120,37 @@ static void test_cubic(void)
|
||||
bpf_cubic__destroy(cubic_skel);
|
||||
}
|
||||
|
||||
static int stg_post_socket_cb(int fd, void *opts)
|
||||
{
|
||||
struct cb_opts *cb_opts = (struct cb_opts *)opts;
|
||||
int err;
|
||||
|
||||
err = settcpca(fd, cb_opts->cc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = bpf_map_update_elem(cb_opts->map_fd, &fd,
|
||||
&expected_stg, BPF_NOEXIST);
|
||||
if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)"))
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void test_dctcp(void)
|
||||
{
|
||||
struct cb_opts cb_opts = {
|
||||
.cc = "bpf_dctcp",
|
||||
};
|
||||
struct network_helper_opts opts = {
|
||||
.post_socket_cb = cc_cb,
|
||||
.cb_opts = &cb_opts,
|
||||
};
|
||||
struct network_helper_opts cli_opts = {
|
||||
.post_socket_cb = stg_post_socket_cb,
|
||||
.cb_opts = &cb_opts,
|
||||
};
|
||||
int lfd = -1, fd = -1, tmp_stg, err;
|
||||
struct bpf_dctcp *dctcp_skel;
|
||||
struct bpf_link *link;
|
||||
|
||||
@ -119,11 +164,25 @@ static void test_dctcp(void)
|
||||
return;
|
||||
}
|
||||
|
||||
do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
|
||||
cb_opts.map_fd = bpf_map__fd(dctcp_skel->maps.sk_stg_map);
|
||||
if (!start_test(NULL, &opts, &cli_opts, &lfd, &fd))
|
||||
goto done;
|
||||
|
||||
err = bpf_map_lookup_elem(cb_opts.map_fd, &fd, &tmp_stg);
|
||||
if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") ||
|
||||
!ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)"))
|
||||
goto done;
|
||||
|
||||
ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data");
|
||||
ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result");
|
||||
|
||||
done:
|
||||
bpf_link__destroy(link);
|
||||
bpf_dctcp__destroy(dctcp_skel);
|
||||
if (lfd != -1)
|
||||
close(lfd);
|
||||
if (fd != -1)
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static char *err_str;
|
||||
@ -171,11 +230,22 @@ static void test_invalid_license(void)
|
||||
static void test_dctcp_fallback(void)
|
||||
{
|
||||
int err, lfd = -1, cli_fd = -1, srv_fd = -1;
|
||||
struct network_helper_opts opts = {
|
||||
.cc = "cubic",
|
||||
};
|
||||
struct bpf_dctcp *dctcp_skel;
|
||||
struct bpf_link *link = NULL;
|
||||
struct cb_opts dctcp = {
|
||||
.cc = "bpf_dctcp",
|
||||
};
|
||||
struct network_helper_opts srv_opts = {
|
||||
.post_socket_cb = cc_cb,
|
||||
.cb_opts = &dctcp,
|
||||
};
|
||||
struct cb_opts cubic = {
|
||||
.cc = "cubic",
|
||||
};
|
||||
struct network_helper_opts cli_opts = {
|
||||
.post_socket_cb = cc_cb,
|
||||
.cb_opts = &cubic,
|
||||
};
|
||||
char srv_cc[16];
|
||||
socklen_t cc_len = sizeof(srv_cc);
|
||||
|
||||
@ -190,13 +260,7 @@ static void test_dctcp_fallback(void)
|
||||
if (!ASSERT_OK_PTR(link, "dctcp link"))
|
||||
goto done;
|
||||
|
||||
lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
|
||||
if (!ASSERT_GE(lfd, 0, "lfd") ||
|
||||
!ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp"))
|
||||
goto done;
|
||||
|
||||
cli_fd = connect_to_fd_opts(lfd, &opts);
|
||||
if (!ASSERT_GE(cli_fd, 0, "cli_fd"))
|
||||
if (!start_test("::1", &srv_opts, &cli_opts, &lfd, &cli_fd))
|
||||
goto done;
|
||||
|
||||
srv_fd = accept(lfd, NULL, 0);
|
||||
@ -297,6 +361,13 @@ static void test_unsupp_cong_op(void)
|
||||
|
||||
static void test_update_ca(void)
|
||||
{
|
||||
struct cb_opts cb_opts = {
|
||||
.cc = "tcp_ca_update",
|
||||
};
|
||||
struct network_helper_opts opts = {
|
||||
.post_socket_cb = cc_cb,
|
||||
.cb_opts = &cb_opts,
|
||||
};
|
||||
struct tcp_ca_update *skel;
|
||||
struct bpf_link *link;
|
||||
int saved_ca1_cnt;
|
||||
@ -309,14 +380,14 @@ static void test_update_ca(void)
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
do_test(&opts);
|
||||
saved_ca1_cnt = skel->bss->ca1_cnt;
|
||||
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
|
||||
|
||||
err = bpf_link__update_map(link, skel->maps.ca_update_2);
|
||||
ASSERT_OK(err, "update_map");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
do_test(&opts);
|
||||
ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
|
||||
ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
|
||||
|
||||
@ -326,6 +397,13 @@ static void test_update_ca(void)
|
||||
|
||||
static void test_update_wrong(void)
|
||||
{
|
||||
struct cb_opts cb_opts = {
|
||||
.cc = "tcp_ca_update",
|
||||
};
|
||||
struct network_helper_opts opts = {
|
||||
.post_socket_cb = cc_cb,
|
||||
.cb_opts = &cb_opts,
|
||||
};
|
||||
struct tcp_ca_update *skel;
|
||||
struct bpf_link *link;
|
||||
int saved_ca1_cnt;
|
||||
@ -338,14 +416,14 @@ static void test_update_wrong(void)
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
do_test(&opts);
|
||||
saved_ca1_cnt = skel->bss->ca1_cnt;
|
||||
ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
|
||||
|
||||
err = bpf_link__update_map(link, skel->maps.ca_wrong);
|
||||
ASSERT_ERR(err, "update_map");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
do_test(&opts);
|
||||
ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
|
||||
|
||||
bpf_link__destroy(link);
|
||||
@ -354,6 +432,13 @@ static void test_update_wrong(void)
|
||||
|
||||
static void test_mixed_links(void)
|
||||
{
|
||||
struct cb_opts cb_opts = {
|
||||
.cc = "tcp_ca_update",
|
||||
};
|
||||
struct network_helper_opts opts = {
|
||||
.post_socket_cb = cc_cb,
|
||||
.cb_opts = &cb_opts,
|
||||
};
|
||||
struct tcp_ca_update *skel;
|
||||
struct bpf_link *link, *link_nl;
|
||||
int err;
|
||||
@ -368,7 +453,7 @@ static void test_mixed_links(void)
|
||||
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
|
||||
ASSERT_OK_PTR(link, "attach_struct_ops");
|
||||
|
||||
do_test("tcp_ca_update", NULL);
|
||||
do_test(&opts);
|
||||
ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt");
|
||||
|
||||
err = bpf_link__update_map(link, skel->maps.ca_no_link);
|
||||
@ -455,6 +540,13 @@ static void test_tcp_ca_kfunc(void)
|
||||
|
||||
static void test_cc_cubic(void)
|
||||
{
|
||||
struct cb_opts cb_opts = {
|
||||
.cc = "bpf_cc_cubic",
|
||||
};
|
||||
struct network_helper_opts opts = {
|
||||
.post_socket_cb = cc_cb,
|
||||
.cb_opts = &cb_opts,
|
||||
};
|
||||
struct bpf_cc_cubic *cc_cubic_skel;
|
||||
struct bpf_link *link;
|
||||
|
||||
@ -468,7 +560,7 @@ static void test_cc_cubic(void)
|
||||
return;
|
||||
}
|
||||
|
||||
do_test("bpf_cc_cubic", NULL);
|
||||
do_test(&opts);
|
||||
|
||||
bpf_link__destroy(link);
|
||||
bpf_cc_cubic__destroy(cc_cubic_skel);
|
||||
|
@ -45,12 +45,6 @@ err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
struct scale_test_def {
|
||||
const char *file;
|
||||
enum bpf_prog_type attach_type;
|
||||
bool fails;
|
||||
};
|
||||
|
||||
static void scale_test(const char *file,
|
||||
enum bpf_prog_type attach_type,
|
||||
bool should_fail)
|
||||
|
161
tools/testing/selftests/bpf/prog_tests/btf_field_iter.c
Normal file
161
tools/testing/selftests/bpf/prog_tests/btf_field_iter.c
Normal file
@ -0,0 +1,161 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2024, Oracle and/or its affiliates. */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <bpf/btf.h>
|
||||
#include "btf_helpers.h"
|
||||
#include "bpf/libbpf_internal.h"
|
||||
|
||||
struct field_data {
|
||||
__u32 ids[5];
|
||||
const char *strs[5];
|
||||
} fields[] = {
|
||||
{ .ids = {}, .strs = {} },
|
||||
{ .ids = {}, .strs = { "int" } },
|
||||
{ .ids = {}, .strs = { "int64" } },
|
||||
{ .ids = { 1 }, .strs = { "" } },
|
||||
{ .ids = { 2, 1 }, .strs = { "" } },
|
||||
{ .ids = { 3, 1 }, .strs = { "s1", "f1", "f2" } },
|
||||
{ .ids = { 1, 5 }, .strs = { "u1", "f1", "f2" } },
|
||||
{ .ids = {}, .strs = { "e1", "v1", "v2" } },
|
||||
{ .ids = {}, .strs = { "fw1" } },
|
||||
{ .ids = { 1 }, .strs = { "t" } },
|
||||
{ .ids = { 2 }, .strs = { "" } },
|
||||
{ .ids = { 1 }, .strs = { "" } },
|
||||
{ .ids = { 3 }, .strs = { "" } },
|
||||
{ .ids = { 1, 1, 3 }, .strs = { "", "p1", "p2" } },
|
||||
{ .ids = { 13 }, .strs = { "func" } },
|
||||
{ .ids = { 1 }, .strs = { "var1" } },
|
||||
{ .ids = { 3 }, .strs = { "var2" } },
|
||||
{ .ids = {}, .strs = { "float" } },
|
||||
{ .ids = { 11 }, .strs = { "decltag" } },
|
||||
{ .ids = { 6 }, .strs = { "typetag" } },
|
||||
{ .ids = {}, .strs = { "e64", "eval1", "eval2", "eval3" } },
|
||||
{ .ids = { 15, 16 }, .strs = { "datasec1" } }
|
||||
|
||||
};
|
||||
|
||||
/* Fabricate BTF with various types and check BTF field iteration finds types,
|
||||
* strings expected.
|
||||
*/
|
||||
void test_btf_field_iter(void)
|
||||
{
|
||||
struct btf *btf = NULL;
|
||||
int id;
|
||||
|
||||
btf = btf__new_empty();
|
||||
if (!ASSERT_OK_PTR(btf, "empty_btf"))
|
||||
return;
|
||||
|
||||
btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */
|
||||
btf__add_int(btf, "int64", 8, BTF_INT_SIGNED); /* [2] int64 */
|
||||
btf__add_ptr(btf, 1); /* [3] int * */
|
||||
btf__add_array(btf, 1, 2, 3); /* [4] int64[3] */
|
||||
btf__add_struct(btf, "s1", 12); /* [5] struct s1 { */
|
||||
btf__add_field(btf, "f1", 3, 0, 0); /* int *f1; */
|
||||
btf__add_field(btf, "f2", 1, 0, 0); /* int f2; */
|
||||
/* } */
|
||||
btf__add_union(btf, "u1", 12); /* [6] union u1 { */
|
||||
btf__add_field(btf, "f1", 1, 0, 0); /* int f1; */
|
||||
btf__add_field(btf, "f2", 5, 0, 0); /* struct s1 f2; */
|
||||
/* } */
|
||||
btf__add_enum(btf, "e1", 4); /* [7] enum e1 { */
|
||||
btf__add_enum_value(btf, "v1", 1); /* v1 = 1; */
|
||||
btf__add_enum_value(btf, "v2", 2); /* v2 = 2; */
|
||||
/* } */
|
||||
|
||||
btf__add_fwd(btf, "fw1", BTF_FWD_STRUCT); /* [8] struct fw1; */
|
||||
btf__add_typedef(btf, "t", 1); /* [9] typedef int t; */
|
||||
btf__add_volatile(btf, 2); /* [10] volatile int64; */
|
||||
btf__add_const(btf, 1); /* [11] const int; */
|
||||
btf__add_restrict(btf, 3); /* [12] restrict int *; */
|
||||
btf__add_func_proto(btf, 1); /* [13] int (*)(int p1, int *p2); */
|
||||
btf__add_func_param(btf, "p1", 1);
|
||||
btf__add_func_param(btf, "p2", 3);
|
||||
|
||||
btf__add_func(btf, "func", BTF_FUNC_GLOBAL, 13);/* [14] int func(int p1, int *p2); */
|
||||
btf__add_var(btf, "var1", BTF_VAR_STATIC, 1); /* [15] static int var1; */
|
||||
btf__add_var(btf, "var2", BTF_VAR_STATIC, 3); /* [16] static int *var2; */
|
||||
btf__add_float(btf, "float", 4); /* [17] float; */
|
||||
btf__add_decl_tag(btf, "decltag", 11, -1); /* [18] decltag const int; */
|
||||
btf__add_type_tag(btf, "typetag", 6); /* [19] typetag union u1; */
|
||||
btf__add_enum64(btf, "e64", 8, true); /* [20] enum { */
|
||||
btf__add_enum64_value(btf, "eval1", 1000); /* eval1 = 1000, */
|
||||
btf__add_enum64_value(btf, "eval2", 2000); /* eval2 = 2000, */
|
||||
btf__add_enum64_value(btf, "eval3", 3000); /* eval3 = 3000 */
|
||||
/* } */
|
||||
btf__add_datasec(btf, "datasec1", 12); /* [21] datasec datasec1 */
|
||||
btf__add_datasec_var_info(btf, 15, 0, 4);
|
||||
btf__add_datasec_var_info(btf, 16, 4, 8);
|
||||
|
||||
VALIDATE_RAW_BTF(
|
||||
btf,
|
||||
"[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
|
||||
"[2] INT 'int64' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED",
|
||||
"[3] PTR '(anon)' type_id=1",
|
||||
"[4] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=3",
|
||||
"[5] STRUCT 's1' size=12 vlen=2\n"
|
||||
"\t'f1' type_id=3 bits_offset=0\n"
|
||||
"\t'f2' type_id=1 bits_offset=0",
|
||||
"[6] UNION 'u1' size=12 vlen=2\n"
|
||||
"\t'f1' type_id=1 bits_offset=0\n"
|
||||
"\t'f2' type_id=5 bits_offset=0",
|
||||
"[7] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
|
||||
"\t'v1' val=1\n"
|
||||
"\t'v2' val=2",
|
||||
"[8] FWD 'fw1' fwd_kind=struct",
|
||||
"[9] TYPEDEF 't' type_id=1",
|
||||
"[10] VOLATILE '(anon)' type_id=2",
|
||||
"[11] CONST '(anon)' type_id=1",
|
||||
"[12] RESTRICT '(anon)' type_id=3",
|
||||
"[13] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
|
||||
"\t'p1' type_id=1\n"
|
||||
"\t'p2' type_id=3",
|
||||
"[14] FUNC 'func' type_id=13 linkage=global",
|
||||
"[15] VAR 'var1' type_id=1, linkage=static",
|
||||
"[16] VAR 'var2' type_id=3, linkage=static",
|
||||
"[17] FLOAT 'float' size=4",
|
||||
"[18] DECL_TAG 'decltag' type_id=11 component_idx=-1",
|
||||
"[19] TYPE_TAG 'typetag' type_id=6",
|
||||
"[20] ENUM64 'e64' encoding=SIGNED size=8 vlen=3\n"
|
||||
"\t'eval1' val=1000\n"
|
||||
"\t'eval2' val=2000\n"
|
||||
"\t'eval3' val=3000",
|
||||
"[21] DATASEC 'datasec1' size=12 vlen=2\n"
|
||||
"\ttype_id=15 offset=0 size=4\n"
|
||||
"\ttype_id=16 offset=4 size=8");
|
||||
|
||||
for (id = 1; id < btf__type_cnt(btf); id++) {
|
||||
struct btf_type *t = btf_type_by_id(btf, id);
|
||||
struct btf_field_iter it_strs, it_ids;
|
||||
int str_idx = 0, id_idx = 0;
|
||||
__u32 *next_str, *next_id;
|
||||
|
||||
if (!ASSERT_OK_PTR(t, "btf_type_by_id"))
|
||||
break;
|
||||
if (!ASSERT_OK(btf_field_iter_init(&it_strs, t, BTF_FIELD_ITER_STRS),
|
||||
"iter_init_strs"))
|
||||
break;
|
||||
if (!ASSERT_OK(btf_field_iter_init(&it_ids, t, BTF_FIELD_ITER_IDS),
|
||||
"iter_init_ids"))
|
||||
break;
|
||||
while ((next_str = btf_field_iter_next(&it_strs))) {
|
||||
const char *str = btf__str_by_offset(btf, *next_str);
|
||||
|
||||
if (!ASSERT_OK(strcmp(fields[id].strs[str_idx], str), "field_str_match"))
|
||||
break;
|
||||
str_idx++;
|
||||
}
|
||||
/* ensure no more strings are expected */
|
||||
ASSERT_EQ(fields[id].strs[str_idx], NULL, "field_str_cnt");
|
||||
|
||||
while ((next_id = btf_field_iter_next(&it_ids))) {
|
||||
if (!ASSERT_EQ(*next_id, fields[id].ids[id_idx], "field_id_match"))
|
||||
break;
|
||||
id_idx++;
|
||||
}
|
||||
/* ensure no more ids are expected */
|
||||
ASSERT_EQ(fields[id].ids[id_idx], 0, "field_id_cnt");
|
||||
}
|
||||
btf__free(btf);
|
||||
}
|
@ -18,6 +18,11 @@ static const char * const cpumask_success_testcases[] = {
|
||||
"test_insert_leave",
|
||||
"test_insert_remove_release",
|
||||
"test_global_mask_rcu",
|
||||
"test_global_mask_array_one_rcu",
|
||||
"test_global_mask_array_rcu",
|
||||
"test_global_mask_array_l2_rcu",
|
||||
"test_global_mask_nested_rcu",
|
||||
"test_global_mask_nested_deep_rcu",
|
||||
"test_cpumask_weight",
|
||||
};
|
||||
|
||||
|
@ -29,8 +29,8 @@ static int open_pe(void)
|
||||
|
||||
/* create perf event */
|
||||
attr.size = sizeof(attr);
|
||||
attr.type = PERF_TYPE_HARDWARE;
|
||||
attr.config = PERF_COUNT_HW_CPU_CYCLES;
|
||||
attr.type = PERF_TYPE_SOFTWARE;
|
||||
attr.config = PERF_COUNT_SW_CPU_CLOCK;
|
||||
attr.freq = 1;
|
||||
attr.sample_freq = 1000;
|
||||
pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
|
||||
|
@ -183,6 +183,18 @@ static void test_linked_list_success(int mode, bool leave_in_map)
|
||||
if (!leave_in_map)
|
||||
clear_fields(skel->maps.bss_A);
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_nested), &opts);
|
||||
ASSERT_OK(ret, "global_list_push_pop_nested");
|
||||
ASSERT_OK(opts.retval, "global_list_push_pop_nested retval");
|
||||
if (!leave_in_map)
|
||||
clear_fields(skel->maps.bss_A);
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_array_push_pop), &opts);
|
||||
ASSERT_OK(ret, "global_list_array_push_pop");
|
||||
ASSERT_OK(opts.retval, "global_list_array_push_pop retval");
|
||||
if (!leave_in_map)
|
||||
clear_fields(skel->maps.bss_A);
|
||||
|
||||
if (mode == PUSH_POP)
|
||||
goto end;
|
||||
|
||||
|
@ -31,6 +31,28 @@ static void test_rbtree_add_nodes(void)
|
||||
rbtree__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_rbtree_add_nodes_nested(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
struct rbtree *skel;
|
||||
int ret;
|
||||
|
||||
skel = rbtree__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
||||
return;
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts);
|
||||
ASSERT_OK(ret, "rbtree_add_nodes_nested run");
|
||||
ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval");
|
||||
ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes_nested less_callback_ran");
|
||||
|
||||
rbtree__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_rbtree_add_and_remove(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
@ -53,6 +75,27 @@ static void test_rbtree_add_and_remove(void)
|
||||
rbtree__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_rbtree_add_and_remove_array(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
struct rbtree *skel;
|
||||
int ret;
|
||||
|
||||
skel = rbtree__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
|
||||
return;
|
||||
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove_array), &opts);
|
||||
ASSERT_OK(ret, "rbtree_add_and_remove_array");
|
||||
ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval");
|
||||
|
||||
rbtree__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_rbtree_first_and_remove(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
@ -104,8 +147,12 @@ void test_rbtree_success(void)
|
||||
{
|
||||
if (test__start_subtest("rbtree_add_nodes"))
|
||||
test_rbtree_add_nodes();
|
||||
if (test__start_subtest("rbtree_add_nodes_nested"))
|
||||
test_rbtree_add_nodes_nested();
|
||||
if (test__start_subtest("rbtree_add_and_remove"))
|
||||
test_rbtree_add_and_remove();
|
||||
if (test__start_subtest("rbtree_add_and_remove_array"))
|
||||
test_rbtree_add_and_remove_array();
|
||||
if (test__start_subtest("rbtree_first_and_remove"))
|
||||
test_rbtree_first_and_remove();
|
||||
if (test__start_subtest("rbtree_api_release_aliasing"))
|
||||
|
@ -156,7 +156,8 @@ static void test_send_signal_tracepoint(bool signal_thread)
|
||||
static void test_send_signal_perf(bool signal_thread)
|
||||
{
|
||||
struct perf_event_attr attr = {
|
||||
.sample_period = 1,
|
||||
.freq = 1,
|
||||
.sample_freq = 1000,
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_CPU_CLOCK,
|
||||
};
|
||||
|
@ -70,7 +70,7 @@ static void *server_thread(void *arg)
|
||||
return (void *)(long)err;
|
||||
}
|
||||
|
||||
static int custom_cb(int fd, const struct post_socket_opts *opts)
|
||||
static int custom_cb(int fd, void *opts)
|
||||
{
|
||||
char buf;
|
||||
int err;
|
||||
|
@ -3,9 +3,12 @@
|
||||
#include <test_progs.h>
|
||||
#include <time.h>
|
||||
|
||||
#include <sys/epoll.h>
|
||||
|
||||
#include "struct_ops_module.skel.h"
|
||||
#include "struct_ops_nulled_out_cb.skel.h"
|
||||
#include "struct_ops_forgotten_cb.skel.h"
|
||||
#include "struct_ops_detach.skel.h"
|
||||
|
||||
static void check_map_info(struct bpf_map_info *info)
|
||||
{
|
||||
@ -242,6 +245,58 @@ cleanup:
|
||||
struct_ops_forgotten_cb__destroy(skel);
|
||||
}
|
||||
|
||||
/* Detach a link from a user space program */
|
||||
static void test_detach_link(void)
|
||||
{
|
||||
struct epoll_event ev, events[2];
|
||||
struct struct_ops_detach *skel;
|
||||
struct bpf_link *link = NULL;
|
||||
int fd, epollfd = -1, nfds;
|
||||
int err;
|
||||
|
||||
skel = struct_ops_detach__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "struct_ops_detach__open_and_load"))
|
||||
return;
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach);
|
||||
if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
|
||||
goto cleanup;
|
||||
|
||||
fd = bpf_link__fd(link);
|
||||
if (!ASSERT_GE(fd, 0, "link_fd"))
|
||||
goto cleanup;
|
||||
|
||||
epollfd = epoll_create1(0);
|
||||
if (!ASSERT_GE(epollfd, 0, "epoll_create1"))
|
||||
goto cleanup;
|
||||
|
||||
ev.events = EPOLLHUP;
|
||||
ev.data.fd = fd;
|
||||
err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
|
||||
if (!ASSERT_OK(err, "epoll_ctl"))
|
||||
goto cleanup;
|
||||
|
||||
err = bpf_link__detach(link);
|
||||
if (!ASSERT_OK(err, "detach_link"))
|
||||
goto cleanup;
|
||||
|
||||
/* Wait for EPOLLHUP */
|
||||
nfds = epoll_wait(epollfd, events, 2, 500);
|
||||
if (!ASSERT_EQ(nfds, 1, "epoll_wait"))
|
||||
goto cleanup;
|
||||
|
||||
if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd"))
|
||||
goto cleanup;
|
||||
if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events"))
|
||||
goto cleanup;
|
||||
|
||||
cleanup:
|
||||
if (epollfd >= 0)
|
||||
close(epollfd);
|
||||
bpf_link__destroy(link);
|
||||
struct_ops_detach__destroy(skel);
|
||||
}
|
||||
|
||||
void serial_test_struct_ops_module(void)
|
||||
{
|
||||
if (test__start_subtest("struct_ops_load"))
|
||||
@ -254,5 +309,7 @@ void serial_test_struct_ops_module(void)
|
||||
test_struct_ops_nulled_out_cb();
|
||||
if (test__start_subtest("struct_ops_forgotten_cb"))
|
||||
test_struct_ops_forgotten_cb();
|
||||
if (test__start_subtest("test_detach_link"))
|
||||
test_detach_link();
|
||||
}
|
||||
|
||||
|
@ -86,6 +86,7 @@
|
||||
#include "verifier_xadd.skel.h"
|
||||
#include "verifier_xdp.skel.h"
|
||||
#include "verifier_xdp_direct_packet_access.skel.h"
|
||||
#include "verifier_bits_iter.skel.h"
|
||||
|
||||
#define MAX_ENTRIES 11
|
||||
|
||||
@ -202,6 +203,7 @@ void test_verifier_var_off(void) { RUN(verifier_var_off); }
|
||||
void test_verifier_xadd(void) { RUN(verifier_xadd); }
|
||||
void test_verifier_xdp(void) { RUN(verifier_xdp); }
|
||||
void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); }
|
||||
void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
|
||||
|
||||
static int init_test_val_map(struct bpf_object *obj, char *map_name)
|
||||
{
|
||||
|
@ -6,12 +6,6 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct key_t {
|
||||
int a;
|
||||
int b;
|
||||
int c;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 3);
|
||||
|
@ -6,12 +6,6 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct key_t {
|
||||
int a;
|
||||
int b;
|
||||
int c;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 3);
|
||||
|
@ -12,6 +12,31 @@ char _license[] SEC("license") = "GPL";
|
||||
|
||||
int pid, nr_cpus;
|
||||
|
||||
struct kptr_nested {
|
||||
struct bpf_cpumask __kptr * mask;
|
||||
};
|
||||
|
||||
struct kptr_nested_pair {
|
||||
struct bpf_cpumask __kptr * mask_1;
|
||||
struct bpf_cpumask __kptr * mask_2;
|
||||
};
|
||||
|
||||
struct kptr_nested_mid {
|
||||
int dummy;
|
||||
struct kptr_nested m;
|
||||
};
|
||||
|
||||
struct kptr_nested_deep {
|
||||
struct kptr_nested_mid ptrs[2];
|
||||
struct kptr_nested_pair ptr_pairs[3];
|
||||
};
|
||||
|
||||
private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
|
||||
private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
|
||||
private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
|
||||
private(MASK) static struct kptr_nested global_mask_nested[2];
|
||||
private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
|
||||
|
||||
static bool is_test_task(void)
|
||||
{
|
||||
int cur_pid = bpf_get_current_pid_tgid() >> 32;
|
||||
@ -460,6 +485,152 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
struct bpf_cpumask *local, *prev;
|
||||
|
||||
if (!is_test_task())
|
||||
return 0;
|
||||
|
||||
/* Kptr arrays with one element are special cased, being treated
|
||||
* just like a single pointer.
|
||||
*/
|
||||
|
||||
local = create_cpumask();
|
||||
if (!local)
|
||||
return 0;
|
||||
|
||||
prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
|
||||
if (prev) {
|
||||
bpf_cpumask_release(prev);
|
||||
err = 3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bpf_rcu_read_lock();
|
||||
local = global_mask_array_one[0];
|
||||
if (!local) {
|
||||
err = 4;
|
||||
bpf_rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
bpf_rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
|
||||
struct bpf_cpumask **mask1)
|
||||
{
|
||||
struct bpf_cpumask *local;
|
||||
|
||||
if (!is_test_task())
|
||||
return 0;
|
||||
|
||||
/* Check if two kptrs in the array work and independently */
|
||||
|
||||
local = create_cpumask();
|
||||
if (!local)
|
||||
return 0;
|
||||
|
||||
bpf_rcu_read_lock();
|
||||
|
||||
local = bpf_kptr_xchg(mask0, local);
|
||||
if (local) {
|
||||
err = 1;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
/* [<mask 0>, NULL] */
|
||||
if (!*mask0 || *mask1) {
|
||||
err = 2;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
local = create_cpumask();
|
||||
if (!local) {
|
||||
err = 9;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
local = bpf_kptr_xchg(mask1, local);
|
||||
if (local) {
|
||||
err = 10;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
/* [<mask 0>, <mask 1>] */
|
||||
if (!*mask0 || !*mask1 || *mask0 == *mask1) {
|
||||
err = 11;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
err_exit:
|
||||
if (local)
|
||||
bpf_cpumask_release(local);
|
||||
bpf_rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
|
||||
}
|
||||
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
|
||||
}
|
||||
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
|
||||
}
|
||||
|
||||
/* Ensure that the field->offset has been correctly advanced from one
|
||||
* nested struct or array sub-tree to another. In the case of
|
||||
* kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2. By
|
||||
* calling bpf_kptr_xchg() on every single kptr in both nested sub-trees,
|
||||
* the verifier should reject the program if the field->offset of any kptr
|
||||
* is incorrect.
|
||||
*
|
||||
* For instance, if we have 10 kptrs in a nested struct and a program that
|
||||
* accesses each kptr individually with bpf_kptr_xchg(), the compiler
|
||||
* should emit instructions to access 10 different offsets if it works
|
||||
* correctly. If the field->offset values of any pair of them are
|
||||
* incorrectly the same, the number of unique offsets in btf_record for
|
||||
* this nested struct should be less than 10. The verifier should fail to
|
||||
* discover some of the offsets emitted by the compiler.
|
||||
*
|
||||
* Even if the field->offset values of kptrs are not duplicated, the
|
||||
* verifier should fail to find a btf_field for the instruction accessing a
|
||||
* kptr if the corresponding field->offset is pointing to a random
|
||||
* incorrect offset.
|
||||
*/
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
int r, i;
|
||||
|
||||
r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
|
||||
&global_mask_nested_deep.ptrs[1].m.mask);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1,
|
||||
&global_mask_nested_deep.ptr_pairs[i].mask_2);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
|
@ -11,6 +11,22 @@
|
||||
|
||||
#include "linked_list.h"
|
||||
|
||||
struct head_nested_inner {
|
||||
struct bpf_spin_lock lock;
|
||||
struct bpf_list_head head __contains(foo, node2);
|
||||
};
|
||||
|
||||
struct head_nested {
|
||||
int dummy;
|
||||
struct head_nested_inner inner;
|
||||
};
|
||||
|
||||
private(C) struct bpf_spin_lock glock_c;
|
||||
private(C) struct bpf_list_head ghead_array[2] __contains(foo, node2);
|
||||
private(C) struct bpf_list_head ghead_array_one[1] __contains(foo, node2);
|
||||
|
||||
private(D) struct head_nested ghead_nested;
|
||||
|
||||
static __always_inline
|
||||
int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map)
|
||||
{
|
||||
@ -309,6 +325,32 @@ int global_list_push_pop(void *ctx)
|
||||
return test_list_push_pop(&glock, &ghead);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int global_list_push_pop_nested(void *ctx)
|
||||
{
|
||||
return test_list_push_pop(&ghead_nested.inner.lock, &ghead_nested.inner.head);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int global_list_array_push_pop(void *ctx)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = test_list_push_pop(&glock_c, &ghead_array[0]);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = test_list_push_pop(&glock_c, &ghead_array[1]);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* Arrays with only one element is a special case, being treated
|
||||
* just like a bpf_list_head variable by the verifier, not an
|
||||
* array.
|
||||
*/
|
||||
return test_list_push_pop(&glock_c, &ghead_array_one[0]);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int map_list_push_pop_multiple(void *ctx)
|
||||
{
|
||||
|
@ -13,6 +13,15 @@ struct node_data {
|
||||
struct bpf_rb_node node;
|
||||
};
|
||||
|
||||
struct root_nested_inner {
|
||||
struct bpf_spin_lock glock;
|
||||
struct bpf_rb_root root __contains(node_data, node);
|
||||
};
|
||||
|
||||
struct root_nested {
|
||||
struct root_nested_inner inner;
|
||||
};
|
||||
|
||||
long less_callback_ran = -1;
|
||||
long removed_key = -1;
|
||||
long first_data[2] = {-1, -1};
|
||||
@ -20,6 +29,9 @@ long first_data[2] = {-1, -1};
|
||||
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
|
||||
private(A) struct bpf_spin_lock glock;
|
||||
private(A) struct bpf_rb_root groot __contains(node_data, node);
|
||||
private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node);
|
||||
private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node);
|
||||
private(B) struct root_nested groot_nested;
|
||||
|
||||
static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
|
||||
{
|
||||
@ -71,6 +83,12 @@ long rbtree_add_nodes(void *ctx)
|
||||
return __add_three(&groot, &glock);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
long rbtree_add_nodes_nested(void *ctx)
|
||||
{
|
||||
return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
long rbtree_add_and_remove(void *ctx)
|
||||
{
|
||||
@ -109,6 +127,65 @@ err_out:
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
long rbtree_add_and_remove_array(void *ctx)
|
||||
{
|
||||
struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL;
|
||||
struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}};
|
||||
struct node_data *n;
|
||||
long k1 = -1, k2 = -1, k3 = -1;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j]));
|
||||
if (!nodes[i][j])
|
||||
goto err_out;
|
||||
nodes[i][j]->key = i * 2 + j;
|
||||
}
|
||||
}
|
||||
|
||||
bpf_spin_lock(&glock);
|
||||
for (i = 0; i < 2; i++)
|
||||
for (j = 0; j < 2; j++)
|
||||
bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less);
|
||||
for (j = 0; j < 2; j++)
|
||||
bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less);
|
||||
res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node);
|
||||
res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node);
|
||||
res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node);
|
||||
bpf_spin_unlock(&glock);
|
||||
|
||||
if (res1) {
|
||||
n = container_of(res1, struct node_data, node);
|
||||
k1 = n->key;
|
||||
bpf_obj_drop(n);
|
||||
}
|
||||
if (res2) {
|
||||
n = container_of(res2, struct node_data, node);
|
||||
k2 = n->key;
|
||||
bpf_obj_drop(n);
|
||||
}
|
||||
if (res3) {
|
||||
n = container_of(res3, struct node_data, node);
|
||||
k3 = n->key;
|
||||
bpf_obj_drop(n);
|
||||
}
|
||||
if (k1 != 0 || k2 != 2 || k3 != 4)
|
||||
return 2;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
for (i = 0; i < 3; i++) {
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (nodes[i][j])
|
||||
bpf_obj_drop(nodes[i][j]);
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
long rbtree_first_and_remove(void *ctx)
|
||||
{
|
||||
|
10
tools/testing/selftests/bpf/progs/struct_ops_detach.c
Normal file
10
tools/testing/selftests/bpf/progs/struct_ops_detach.c
Normal file
@ -0,0 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "../bpf_testmod/bpf_testmod.h"
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct bpf_testmod_ops testmod_do_detach;
|
@ -92,7 +92,7 @@ struct {
|
||||
__uint(value_size, sizeof(int));
|
||||
} tls_sock_map SEC(".maps");
|
||||
|
||||
SEC("sk_skb1")
|
||||
SEC("sk_skb/stream_parser")
|
||||
int bpf_prog1(struct __sk_buff *skb)
|
||||
{
|
||||
int *f, two = 2;
|
||||
@ -104,7 +104,7 @@ int bpf_prog1(struct __sk_buff *skb)
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
SEC("sk_skb2")
|
||||
SEC("sk_skb/stream_verdict")
|
||||
int bpf_prog2(struct __sk_buff *skb)
|
||||
{
|
||||
__u32 lport = skb->local_port;
|
||||
@ -151,7 +151,7 @@ static inline void bpf_write_pass(struct __sk_buff *skb, int offset)
|
||||
memcpy(c + offset, "PASS", 4);
|
||||
}
|
||||
|
||||
SEC("sk_skb3")
|
||||
SEC("sk_skb/stream_verdict")
|
||||
int bpf_prog3(struct __sk_buff *skb)
|
||||
{
|
||||
int err, *f, ret = SK_PASS;
|
||||
@ -177,9 +177,6 @@ int bpf_prog3(struct __sk_buff *skb)
|
||||
return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags);
|
||||
#endif
|
||||
}
|
||||
f = bpf_map_lookup_elem(&sock_skb_opts, &one);
|
||||
if (f && *f)
|
||||
ret = SK_DROP;
|
||||
err = bpf_skb_adjust_room(skb, 4, 0, 0);
|
||||
if (err)
|
||||
return SK_DROP;
|
||||
@ -233,7 +230,7 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("sk_msg1")
|
||||
SEC("sk_msg")
|
||||
int bpf_prog4(struct sk_msg_md *msg)
|
||||
{
|
||||
int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
|
||||
@ -263,7 +260,7 @@ int bpf_prog4(struct sk_msg_md *msg)
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
SEC("sk_msg2")
|
||||
SEC("sk_msg")
|
||||
int bpf_prog6(struct sk_msg_md *msg)
|
||||
{
|
||||
int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
|
||||
@ -308,7 +305,7 @@ int bpf_prog6(struct sk_msg_md *msg)
|
||||
#endif
|
||||
}
|
||||
|
||||
SEC("sk_msg3")
|
||||
SEC("sk_msg")
|
||||
int bpf_prog8(struct sk_msg_md *msg)
|
||||
{
|
||||
void *data_end = (void *)(long) msg->data_end;
|
||||
@ -329,7 +326,8 @@ int bpf_prog8(struct sk_msg_md *msg)
|
||||
|
||||
return SK_PASS;
|
||||
}
|
||||
SEC("sk_msg4")
|
||||
|
||||
SEC("sk_msg")
|
||||
int bpf_prog9(struct sk_msg_md *msg)
|
||||
{
|
||||
void *data_end = (void *)(long) msg->data_end;
|
||||
@ -347,7 +345,7 @@ int bpf_prog9(struct sk_msg_md *msg)
|
||||
return SK_PASS;
|
||||
}
|
||||
|
||||
SEC("sk_msg5")
|
||||
SEC("sk_msg")
|
||||
int bpf_prog10(struct sk_msg_md *msg)
|
||||
{
|
||||
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
|
||||
|
153
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
Normal file
153
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
Normal file
@ -0,0 +1,153 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2024 Yafang Shao <laoar.shao@gmail.com> */
|
||||
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
#include "bpf_misc.h"
|
||||
#include "task_kfunc_common.h"
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign,
|
||||
u32 nr_bits) __ksym __weak;
|
||||
int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak;
|
||||
void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak;
|
||||
|
||||
SEC("iter.s/cgroup")
|
||||
__description("bits iter without destroy")
|
||||
__failure __msg("Unreleased reference")
|
||||
int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp)
|
||||
{
|
||||
struct bpf_iter_bits it;
|
||||
u64 data = 1;
|
||||
|
||||
bpf_iter_bits_new(&it, &data, 1);
|
||||
bpf_iter_bits_next(&it);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("iter/cgroup")
|
||||
__description("uninitialized iter in ->next()")
|
||||
__failure __msg("expected an initialized iter_bits as arg #1")
|
||||
int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
|
||||
{
|
||||
struct bpf_iter_bits *it = NULL;
|
||||
|
||||
bpf_iter_bits_next(it);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("iter/cgroup")
|
||||
__description("uninitialized iter in ->destroy()")
|
||||
__failure __msg("expected an initialized iter_bits as arg #1")
|
||||
int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp)
|
||||
{
|
||||
struct bpf_iter_bits it = {};
|
||||
|
||||
bpf_iter_bits_destroy(&it);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("syscall")
|
||||
__description("null pointer")
|
||||
__success __retval(0)
|
||||
int null_pointer(void)
|
||||
{
|
||||
int nr = 0;
|
||||
int *bit;
|
||||
|
||||
bpf_for_each(bits, bit, NULL, 1)
|
||||
nr++;
|
||||
return nr;
|
||||
}
|
||||
|
||||
SEC("syscall")
|
||||
__description("bits copy")
|
||||
__success __retval(10)
|
||||
int bits_copy(void)
|
||||
{
|
||||
u64 data = 0xf7310UL; /* 4 + 3 + 2 + 1 + 0*/
|
||||
int nr = 0;
|
||||
int *bit;
|
||||
|
||||
bpf_for_each(bits, bit, &data, 1)
|
||||
nr++;
|
||||
return nr;
|
||||
}
|
||||
|
||||
SEC("syscall")
|
||||
__description("bits memalloc")
|
||||
__success __retval(64)
|
||||
int bits_memalloc(void)
|
||||
{
|
||||
u64 data[2];
|
||||
int nr = 0;
|
||||
int *bit;
|
||||
|
||||
__builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */
|
||||
bpf_for_each(bits, bit, &data[0], sizeof(data) / sizeof(u64))
|
||||
nr++;
|
||||
return nr;
|
||||
}
|
||||
|
||||
SEC("syscall")
|
||||
__description("bit index")
|
||||
__success __retval(8)
|
||||
int bit_index(void)
|
||||
{
|
||||
u64 data = 0x100;
|
||||
int bit_idx = 0;
|
||||
int *bit;
|
||||
|
||||
bpf_for_each(bits, bit, &data, 1) {
|
||||
if (*bit == 0)
|
||||
continue;
|
||||
bit_idx = *bit;
|
||||
}
|
||||
return bit_idx;
|
||||
}
|
||||
|
||||
SEC("syscall")
|
||||
__description("bits nomem")
|
||||
__success __retval(0)
|
||||
int bits_nomem(void)
|
||||
{
|
||||
u64 data[4];
|
||||
int nr = 0;
|
||||
int *bit;
|
||||
|
||||
__builtin_memset(&data, 0xff, sizeof(data));
|
||||
bpf_for_each(bits, bit, &data[0], 513) /* Be greater than 512 */
|
||||
nr++;
|
||||
return nr;
|
||||
}
|
||||
|
||||
SEC("syscall")
|
||||
__description("fewer words")
|
||||
__success __retval(1)
|
||||
int fewer_words(void)
|
||||
{
|
||||
u64 data[2] = {0x1, 0xff};
|
||||
int nr = 0;
|
||||
int *bit;
|
||||
|
||||
bpf_for_each(bits, bit, &data[0], 1)
|
||||
nr++;
|
||||
return nr;
|
||||
}
|
||||
|
||||
SEC("syscall")
|
||||
__description("zero words")
|
||||
__success __retval(0)
|
||||
int zero_words(void)
|
||||
{
|
||||
u64 data[2] = {0x1, 0xff};
|
||||
int nr = 0;
|
||||
int *bit;
|
||||
|
||||
bpf_for_each(bits, bit, &data[0], 0)
|
||||
nr++;
|
||||
return nr;
|
||||
}
|
@ -63,7 +63,8 @@ int passed;
|
||||
int failed;
|
||||
int map_fd[9];
|
||||
struct bpf_map *maps[9];
|
||||
int prog_fd[9];
|
||||
struct bpf_program *progs[9];
|
||||
struct bpf_link *links[9];
|
||||
|
||||
int txmsg_pass;
|
||||
int txmsg_redir;
|
||||
@ -680,7 +681,8 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
|
||||
}
|
||||
}
|
||||
|
||||
s->bytes_recvd += recv;
|
||||
if (recv > 0)
|
||||
s->bytes_recvd += recv;
|
||||
|
||||
if (opt->check_recved_len && s->bytes_recvd > total_bytes) {
|
||||
errno = EMSGSIZE;
|
||||
@ -952,7 +954,8 @@ enum {
|
||||
|
||||
static int run_options(struct sockmap_options *options, int cg_fd, int test)
|
||||
{
|
||||
int i, key, next_key, err, tx_prog_fd = -1, zero = 0;
|
||||
int i, key, next_key, err, zero = 0;
|
||||
struct bpf_program *tx_prog;
|
||||
|
||||
/* If base test skip BPF setup */
|
||||
if (test == BASE || test == BASE_SENDPAGE)
|
||||
@ -960,48 +963,44 @@ static int run_options(struct sockmap_options *options, int cg_fd, int test)
|
||||
|
||||
/* Attach programs to sockmap */
|
||||
if (!txmsg_omit_skb_parser) {
|
||||
err = bpf_prog_attach(prog_fd[0], map_fd[0],
|
||||
BPF_SK_SKB_STREAM_PARSER, 0);
|
||||
if (err) {
|
||||
links[0] = bpf_program__attach_sockmap(progs[0], map_fd[0]);
|
||||
if (!links[0]) {
|
||||
fprintf(stderr,
|
||||
"ERROR: bpf_prog_attach (sockmap %i->%i): %d (%s)\n",
|
||||
prog_fd[0], map_fd[0], err, strerror(errno));
|
||||
return err;
|
||||
"ERROR: bpf_program__attach_sockmap (sockmap %i->%i): (%s)\n",
|
||||
bpf_program__fd(progs[0]), map_fd[0], strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
err = bpf_prog_attach(prog_fd[1], map_fd[0],
|
||||
BPF_SK_SKB_STREAM_VERDICT, 0);
|
||||
if (err) {
|
||||
fprintf(stderr, "ERROR: bpf_prog_attach (sockmap): %d (%s)\n",
|
||||
err, strerror(errno));
|
||||
return err;
|
||||
links[1] = bpf_program__attach_sockmap(progs[1], map_fd[0]);
|
||||
if (!links[1]) {
|
||||
fprintf(stderr, "ERROR: bpf_program__attach_sockmap (sockmap): (%s)\n",
|
||||
strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Attach programs to TLS sockmap */
|
||||
if (txmsg_ktls_skb) {
|
||||
if (!txmsg_omit_skb_parser) {
|
||||
err = bpf_prog_attach(prog_fd[0], map_fd[8],
|
||||
BPF_SK_SKB_STREAM_PARSER, 0);
|
||||
if (err) {
|
||||
links[2] = bpf_program__attach_sockmap(progs[0], map_fd[8]);
|
||||
if (!links[2]) {
|
||||
fprintf(stderr,
|
||||
"ERROR: bpf_prog_attach (TLS sockmap %i->%i): %d (%s)\n",
|
||||
prog_fd[0], map_fd[8], err, strerror(errno));
|
||||
return err;
|
||||
"ERROR: bpf_program__attach_sockmap (TLS sockmap %i->%i): (%s)\n",
|
||||
bpf_program__fd(progs[0]), map_fd[8], strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
err = bpf_prog_attach(prog_fd[2], map_fd[8],
|
||||
BPF_SK_SKB_STREAM_VERDICT, 0);
|
||||
if (err) {
|
||||
fprintf(stderr, "ERROR: bpf_prog_attach (TLS sockmap): %d (%s)\n",
|
||||
err, strerror(errno));
|
||||
return err;
|
||||
links[3] = bpf_program__attach_sockmap(progs[2], map_fd[8]);
|
||||
if (!links[3]) {
|
||||
fprintf(stderr, "ERROR: bpf_program__attach_sockmap (TLS sockmap): (%s)\n",
|
||||
strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Attach to cgroups */
|
||||
err = bpf_prog_attach(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS, 0);
|
||||
err = bpf_prog_attach(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS, 0);
|
||||
if (err) {
|
||||
fprintf(stderr, "ERROR: bpf_prog_attach (groups): %d (%s)\n",
|
||||
err, strerror(errno));
|
||||
@ -1017,30 +1016,31 @@ run:
|
||||
|
||||
/* Attach txmsg program to sockmap */
|
||||
if (txmsg_pass)
|
||||
tx_prog_fd = prog_fd[4];
|
||||
tx_prog = progs[4];
|
||||
else if (txmsg_redir)
|
||||
tx_prog_fd = prog_fd[5];
|
||||
tx_prog = progs[5];
|
||||
else if (txmsg_apply)
|
||||
tx_prog_fd = prog_fd[6];
|
||||
tx_prog = progs[6];
|
||||
else if (txmsg_cork)
|
||||
tx_prog_fd = prog_fd[7];
|
||||
tx_prog = progs[7];
|
||||
else if (txmsg_drop)
|
||||
tx_prog_fd = prog_fd[8];
|
||||
tx_prog = progs[8];
|
||||
else
|
||||
tx_prog_fd = 0;
|
||||
tx_prog = NULL;
|
||||
|
||||
if (tx_prog_fd) {
|
||||
int redir_fd, i = 0;
|
||||
if (tx_prog) {
|
||||
int redir_fd;
|
||||
|
||||
err = bpf_prog_attach(tx_prog_fd,
|
||||
map_fd[1], BPF_SK_MSG_VERDICT, 0);
|
||||
if (err) {
|
||||
links[4] = bpf_program__attach_sockmap(tx_prog, map_fd[1]);
|
||||
if (!links[4]) {
|
||||
fprintf(stderr,
|
||||
"ERROR: bpf_prog_attach (txmsg): %d (%s)\n",
|
||||
err, strerror(errno));
|
||||
"ERROR: bpf_program__attach_sockmap (txmsg): (%s)\n",
|
||||
strerror(errno));
|
||||
err = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_update_elem(map_fd[1], &i, &c1, BPF_ANY);
|
||||
if (err) {
|
||||
fprintf(stderr,
|
||||
@ -1279,16 +1279,14 @@ run:
|
||||
fprintf(stderr, "unknown test\n");
|
||||
out:
|
||||
/* Detatch and zero all the maps */
|
||||
bpf_prog_detach2(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS);
|
||||
bpf_prog_detach2(prog_fd[0], map_fd[0], BPF_SK_SKB_STREAM_PARSER);
|
||||
bpf_prog_detach2(prog_fd[1], map_fd[0], BPF_SK_SKB_STREAM_VERDICT);
|
||||
bpf_prog_detach2(prog_fd[0], map_fd[8], BPF_SK_SKB_STREAM_PARSER);
|
||||
bpf_prog_detach2(prog_fd[2], map_fd[8], BPF_SK_SKB_STREAM_VERDICT);
|
||||
bpf_prog_detach2(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS);
|
||||
|
||||
if (tx_prog_fd >= 0)
|
||||
bpf_prog_detach2(tx_prog_fd, map_fd[1], BPF_SK_MSG_VERDICT);
|
||||
for (i = 0; i < ARRAY_SIZE(links); i++) {
|
||||
if (links[i])
|
||||
bpf_link__detach(links[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(map_fd); i++) {
|
||||
key = next_key = 0;
|
||||
bpf_map_update_elem(map_fd[i], &key, &zero, BPF_ANY);
|
||||
while (bpf_map_get_next_key(map_fd[i], &key, &next_key) == 0) {
|
||||
@ -1783,30 +1781,6 @@ char *map_names[] = {
|
||||
"tls_sock_map",
|
||||
};
|
||||
|
||||
int prog_attach_type[] = {
|
||||
BPF_SK_SKB_STREAM_PARSER,
|
||||
BPF_SK_SKB_STREAM_VERDICT,
|
||||
BPF_SK_SKB_STREAM_VERDICT,
|
||||
BPF_CGROUP_SOCK_OPS,
|
||||
BPF_SK_MSG_VERDICT,
|
||||
BPF_SK_MSG_VERDICT,
|
||||
BPF_SK_MSG_VERDICT,
|
||||
BPF_SK_MSG_VERDICT,
|
||||
BPF_SK_MSG_VERDICT,
|
||||
};
|
||||
|
||||
int prog_type[] = {
|
||||
BPF_PROG_TYPE_SK_SKB,
|
||||
BPF_PROG_TYPE_SK_SKB,
|
||||
BPF_PROG_TYPE_SK_SKB,
|
||||
BPF_PROG_TYPE_SOCK_OPS,
|
||||
BPF_PROG_TYPE_SK_MSG,
|
||||
BPF_PROG_TYPE_SK_MSG,
|
||||
BPF_PROG_TYPE_SK_MSG,
|
||||
BPF_PROG_TYPE_SK_MSG,
|
||||
BPF_PROG_TYPE_SK_MSG,
|
||||
};
|
||||
|
||||
static int populate_progs(char *bpf_file)
|
||||
{
|
||||
struct bpf_program *prog;
|
||||
@ -1825,17 +1799,10 @@ static int populate_progs(char *bpf_file)
|
||||
return -1;
|
||||
}
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
bpf_program__set_type(prog, prog_type[i]);
|
||||
bpf_program__set_expected_attach_type(prog,
|
||||
prog_attach_type[i]);
|
||||
i++;
|
||||
}
|
||||
|
||||
i = bpf_object__load(obj);
|
||||
i = 0;
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
prog_fd[i] = bpf_program__fd(prog);
|
||||
progs[i] = prog;
|
||||
i++;
|
||||
}
|
||||
|
||||
@ -1849,6 +1816,9 @@ static int populate_progs(char *bpf_file)
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(links); i++)
|
||||
links[i] = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -139,14 +139,14 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int v6only_true(int fd, const struct post_socket_opts *opts)
|
||||
static int v6only_true(int fd, void *opts)
|
||||
{
|
||||
int mode = true;
|
||||
|
||||
return setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &mode, sizeof(mode));
|
||||
}
|
||||
|
||||
static int v6only_false(int fd, const struct post_socket_opts *opts)
|
||||
static int v6only_false(int fd, void *opts)
|
||||
{
|
||||
int mode = false;
|
||||
|
||||
|
@ -1237,11 +1237,6 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
|
||||
fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
|
||||
}
|
||||
|
||||
struct libcap {
|
||||
struct __user_cap_header_struct hdr;
|
||||
struct __user_cap_data_struct data[2];
|
||||
};
|
||||
|
||||
static int set_admin(bool admin)
|
||||
{
|
||||
int err;
|
||||
|
@ -211,7 +211,7 @@ long ksym_get_addr(const char *name)
|
||||
*/
|
||||
int kallsyms_find(const char *sym, unsigned long long *addr)
|
||||
{
|
||||
char type, name[500];
|
||||
char type, name[500], *match;
|
||||
unsigned long long value;
|
||||
int err = 0;
|
||||
FILE *f;
|
||||
@ -221,6 +221,17 @@ int kallsyms_find(const char *sym, unsigned long long *addr)
|
||||
return -EINVAL;
|
||||
|
||||
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
|
||||
/* If CONFIG_LTO_CLANG_THIN is enabled, static variable/function
|
||||
* symbols could be promoted to global due to cross-file inlining.
|
||||
* For such cases, clang compiler will add .llvm.<hash> suffix
|
||||
* to those symbols to avoid potential naming conflict.
|
||||
* Let us ignore .llvm.<hash> suffix during symbol comparison.
|
||||
*/
|
||||
if (type == 'd') {
|
||||
match = strstr(name, ".llvm.");
|
||||
if (match)
|
||||
*match = '\0';
|
||||
}
|
||||
if (strcmp(name, sym) == 0) {
|
||||
*addr = value;
|
||||
goto out;
|
||||
|
Loading…
Reference in New Issue
Block a user