diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4ccfae30e681..17bc922f543f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -498,6 +498,10 @@ struct bpf_object { * it at load time. */ struct btf *btf_vmlinux; + /* Path to the custom BTF to be used for BPF CO-RE relocations as an + * override for vmlinux BTF. + */ + char *btf_custom_path; /* vmlinux BTF override for CO-RE relocations */ struct btf *btf_vmlinux_override; /* Lazily initialized kernel module BTFs */ @@ -2645,8 +2649,10 @@ static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) struct bpf_program *prog; int i; - /* CO-RE relocations need kernel BTF */ - if (obj->btf_ext && obj->btf_ext->core_relo_info.len) + /* CO-RE relocations need kernel BTF, only when btf_custom_path + * is not specified + */ + if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path) return true; /* Support for typed ksyms needs kernel BTF */ @@ -7596,7 +7602,7 @@ static struct bpf_object * __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts) { - const char *obj_name, *kconfig; + const char *obj_name, *kconfig, *btf_tmp_path; struct bpf_program *prog; struct bpf_object *obj; char tmp_name[64]; @@ -7627,11 +7633,26 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, if (IS_ERR(obj)) return obj; + btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); + if (btf_tmp_path) { + if (strlen(btf_tmp_path) >= PATH_MAX) { + err = -ENAMETOOLONG; + goto out; + } + obj->btf_custom_path = strdup(btf_tmp_path); + if (!obj->btf_custom_path) { + err = -ENOMEM; + goto out; + } + } + kconfig = OPTS_GET(opts, kconfig, NULL); if (kconfig) { obj->kconfig = strdup(kconfig); - if (!obj->kconfig) - return ERR_PTR(-ENOMEM); + if (!obj->kconfig) { + err = -ENOMEM; + goto out; + } } err = bpf_object__elf_init(obj); @@ -8097,7 +8118,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) err = err ? : bpf_object__sanitize_maps(obj); err = err ? : bpf_object__init_kern_struct_ops_maps(obj); err = err ? : bpf_object__create_maps(obj); - err = err ? : bpf_object__relocate(obj, attr->target_btf_path); + err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : attr->target_btf_path); err = err ? : bpf_object__load_progs(obj, attr->log_level); if (obj->gen_loader) { @@ -8744,6 +8765,7 @@ void bpf_object__close(struct bpf_object *obj) for (i = 0; i < obj->nr_maps; i++) bpf_map__destroy(&obj->maps[i]); + zfree(&obj->btf_custom_path); zfree(&obj->kconfig); zfree(&obj->externs); obj->nr_extern = 0; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 6e61342ba56c..6b08c1023609 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -94,8 +94,15 @@ struct bpf_object_open_opts { * system Kconfig for CONFIG_xxx externs. */ const char *kconfig; + /* Path to the custom BTF to be used for BPF CO-RE relocations. + * This custom BTF completely replaces the use of vmlinux BTF + * for the purpose of CO-RE relocations. + * NOTE: any other BPF feature (e.g., fentry/fexit programs, + * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux. + */ + const char *btf_custom_path; }; -#define bpf_object_open_opts__last_field kconfig +#define bpf_object_open_opts__last_field btf_custom_path LIBBPF_API struct bpf_object *bpf_object__open(const char *path); LIBBPF_API struct bpf_object * diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c index 981c251453d9..3d4b2a358d47 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_autosize.c +++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c @@ -53,8 +53,8 @@ void test_core_autosize(void) char btf_file[] = "/tmp/core_autosize.btf.XXXXXX"; int err, fd = -1, zero = 0; int char_id, short_id, int_id, long_long_id, void_ptr_id, id; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); struct test_core_autosize* skel = NULL; - struct bpf_object_load_attr load_attr = {}; struct bpf_program *prog; struct bpf_map *bss_map; struct btf *btf = NULL; @@ -125,9 +125,10 @@ void test_core_autosize(void) fd = -1; /* open and load BPF program with custom BTF as the kernel BTF */ - skel = test_core_autosize__open(); + open_opts.btf_custom_path = btf_file; + skel = test_core_autosize__open_opts(&open_opts); if (!ASSERT_OK_PTR(skel, "skel_open")) - return; + goto cleanup; /* disable handle_signed() for now */ prog = bpf_object__find_program_by_name(skel->obj, "handle_signed"); @@ -135,9 +136,7 @@ void test_core_autosize(void) goto cleanup; bpf_program__set_autoload(prog, false); - load_attr.obj = skel->obj; - load_attr.target_btf_path = btf_file; - err = bpf_object__load_xattr(&load_attr); + err = bpf_object__load(skel->obj); if (!ASSERT_OK(err, "prog_load")) goto cleanup; @@ -204,14 +203,13 @@ void test_core_autosize(void) skel = NULL; /* now re-load with handle_signed() enabled, it should fail loading */ - skel = test_core_autosize__open(); + open_opts.btf_custom_path = btf_file; + skel = test_core_autosize__open_opts(&open_opts); if (!ASSERT_OK_PTR(skel, "skel_open")) - return; + goto cleanup; - load_attr.obj = skel->obj; - load_attr.target_btf_path = btf_file; - err = bpf_object__load_xattr(&load_attr); - if (!ASSERT_ERR(err, "bad_prog_load")) + err = test_core_autosize__load(skel); + if (!ASSERT_ERR(err, "skel_load")) goto cleanup; cleanup: diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index d02e064c535f..4739b15b2a97 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -816,7 +816,7 @@ static size_t roundup_page(size_t sz) void test_core_reloc(void) { const size_t mmap_sz = roundup_page(sizeof(struct data)); - struct bpf_object_load_attr load_attr = {}; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); struct core_reloc_test_case *test_case; const char *tp_name, *probe_name; int err, i, equal; @@ -846,9 +846,16 @@ void test_core_reloc(void) continue; } - obj = bpf_object__open_file(test_case->bpf_obj_file, NULL); + if (test_case->btf_src_file) { + err = access(test_case->btf_src_file, R_OK); + if (!ASSERT_OK(err, "btf_src_file")) + goto cleanup; + } + + open_opts.btf_custom_path = test_case->btf_src_file; + obj = bpf_object__open_file(test_case->bpf_obj_file, &open_opts); if (!ASSERT_OK_PTR(obj, "obj_open")) - continue; + goto cleanup; probe_name = "raw_tracepoint/sys_enter"; tp_name = "sys_enter"; @@ -862,17 +869,7 @@ void test_core_reloc(void) "prog '%s' not found\n", probe_name)) goto cleanup; - - if (test_case->btf_src_file) { - err = access(test_case->btf_src_file, R_OK); - if (!ASSERT_OK(err, "btf_src_file")) - goto cleanup; - } - - load_attr.obj = obj; - load_attr.log_level = 0; - load_attr.target_btf_path = test_case->btf_src_file; - err = bpf_object__load_xattr(&load_attr); + err = bpf_object__load(obj); if (err) { if (!test_case->fails) ASSERT_OK(err, "obj_load");