From 4fa5bcfe07f7e97d9a140c465e1056c91651c41d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 4 Mar 2022 17:01:27 -0800 Subject: [PATCH 001/137] libbpf: Allow BPF program auto-attach handlers to bail out Allow some BPF program types to support auto-attach only in subste of cases. Currently, if some BPF program type specifies attach callback, it is assumed that during skeleton attach operation all such programs either successfully attach or entire skeleton attachment fails. If some program doesn't support auto-attachment from skeleton, such BPF program types shouldn't have attach callback specified. This is limiting for cases when, depending on how full the SEC("") definition is, there could either be enough details to support auto-attach or there might not be and user has to use some specific API to provide more details at runtime. One specific example of such desired behavior might be SEC("uprobe"). If it's specified as just uprobe auto-attach isn't possible. But if it's SEC("uprobe/:") then there are enough details to support auto-attach. Note that there is a somewhat subtle difference between auto-attach behavior of BPF skeleton and using "generic" bpf_program__attach(prog) (which uses the same attach handlers under the cover). Skeleton allow some programs within bpf_object to not have auto-attach implemented and doesn't treat that as an error. Instead such BPF programs are just skipped during skeleton's (optional) attach step. bpf_program__attach(), on the other hand, is called when user *expects* auto-attach to work, so if specified program doesn't implement or doesn't support auto-attach functionality, that will be treated as an error. Another improvement to the way libbpf is handling SEC()s would be to not require providing dummy kernel function name for kprobe. Currently, SEC("kprobe/whatever") is necessary even if actual kernel function is determined by user at runtime and bpf_program__attach_kprobe() is used to specify it. With changes in this patch, it's possible to support both SEC("kprobe") and SEC("kprobe/ Signed-off-by: Alexei Starovoitov Tested-by: Alan Maguire Reviewed-by: Alan Maguire Link: https://lore.kernel.org/bpf/20220305010129.1549719-2-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 140 +++++++++++++++++++++++++---------------- 1 file changed, 85 insertions(+), 55 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 81bf01d67671..1da4a438ba00 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -201,11 +201,12 @@ struct reloc_desc { }; }; -struct bpf_sec_def; - -typedef int (*init_fn_t)(struct bpf_program *prog, long cookie); -typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_opts *opts, long cookie); -typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie); +typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); +typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie); +/* If auto-attach is not supported, callback should return 0 and set link to NULL */ +typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, + struct bpf_link **link); /* stored as sec_def->cookie for all libbpf-supported SEC()s */ enum sec_def_flags { @@ -239,9 +240,9 @@ struct bpf_sec_def { enum bpf_attach_type expected_attach_type; long cookie; - init_fn_t init_fn; - preload_fn_t preload_fn; - attach_fn_t attach_fn; + libbpf_prog_setup_fn_t prog_setup_fn; + libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; + libbpf_prog_attach_fn_t prog_attach_fn; }; /* @@ -6572,9 +6573,9 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, int *btf_obj_fd, int *btf_type_id); -/* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */ -static int libbpf_preload_prog(struct bpf_program *prog, - struct bpf_prog_load_opts *opts, long cookie) +/* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */ +static int libbpf_prepare_prog_load(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie) { enum sec_def_flags def = cookie; @@ -6670,8 +6671,8 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog load_attr.fd_array = obj->fd_array; /* adjust load_attr if sec_def provides custom preload callback */ - if (prog->sec_def && prog->sec_def->preload_fn) { - err = prog->sec_def->preload_fn(prog, &load_attr, prog->sec_def->cookie); + if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) { + err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie); if (err < 0) { pr_warn("prog '%s': failed to prepare load attributes: %d\n", prog->name, err); @@ -6971,8 +6972,8 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object /* sec_def can have custom callback which should be called * after bpf_program is initialized to adjust its properties */ - if (prog->sec_def->init_fn) { - err = prog->sec_def->init_fn(prog, prog->sec_def->cookie); + if (prog->sec_def->prog_setup_fn) { + err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie); if (err < 0) { pr_warn("prog '%s': failed to initialize: %d\n", prog->name, err); @@ -8593,16 +8594,16 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log .prog_type = BPF_PROG_TYPE_##ptype, \ .expected_attach_type = atype, \ .cookie = (long)(flags), \ - .preload_fn = libbpf_preload_prog, \ + .prog_prepare_load_fn = libbpf_prepare_prog_load, \ __VA_ARGS__ \ } -static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie); +static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); static const struct bpf_sec_def section_defs[] = { SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX), @@ -8752,7 +8753,7 @@ static char *libbpf_get_type_names(bool attach_type) const struct bpf_sec_def *sec_def = §ion_defs[i]; if (attach_type) { - if (sec_def->preload_fn != libbpf_preload_prog) + if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) continue; if (!(sec_def->cookie & SEC_ATTACHABLE)) @@ -9135,7 +9136,7 @@ int libbpf_attach_type_by_name(const char *name, return libbpf_err(-EINVAL); } - if (sec_def->preload_fn != libbpf_preload_prog) + if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) return libbpf_err(-EINVAL); if (!(sec_def->cookie & SEC_ATTACHABLE)) return libbpf_err(-EINVAL); @@ -10109,14 +10110,13 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, return bpf_program__attach_kprobe_opts(prog, func_name, &opts); } -static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie) +static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) { DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); unsigned long offset = 0; - struct bpf_link *link; const char *func_name; char *func; - int n, err; + int n; opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); if (opts.retprobe) @@ -10126,21 +10126,19 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cooki n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); if (n < 1) { - err = -EINVAL; pr_warn("kprobe name is invalid: %s\n", func_name); - return libbpf_err_ptr(err); + return -EINVAL; } if (opts.retprobe && offset != 0) { free(func); - err = -EINVAL; pr_warn("kretprobes do not support offset specification\n"); - return libbpf_err_ptr(err); + return -EINVAL; } opts.offset = offset; - link = bpf_program__attach_kprobe_opts(prog, func, &opts); + *link = bpf_program__attach_kprobe_opts(prog, func, &opts); free(func); - return link; + return libbpf_get_error(*link); } static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, @@ -10395,14 +10393,13 @@ struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); } -static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie) +static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) { char *sec_name, *tp_cat, *tp_name; - struct bpf_link *link; sec_name = strdup(prog->sec_name); if (!sec_name) - return libbpf_err_ptr(-ENOMEM); + return -ENOMEM; /* extract "tp//" or "tracepoint//" */ if (str_has_pfx(prog->sec_name, "tp/")) @@ -10412,14 +10409,14 @@ static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie) tp_name = strchr(tp_cat, '/'); if (!tp_name) { free(sec_name); - return libbpf_err_ptr(-EINVAL); + return -EINVAL; } *tp_name = '\0'; tp_name++; - link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); + *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); free(sec_name); - return link; + return libbpf_get_error(*link); } struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, @@ -10452,7 +10449,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr return link; } -static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie) +static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) { static const char *const prefixes[] = { "raw_tp/", @@ -10472,10 +10469,11 @@ static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cooki if (!tp_name) { pr_warn("prog '%s': invalid section name '%s'\n", prog->name, prog->sec_name); - return libbpf_err_ptr(-EINVAL); + return -EINVAL; } - return bpf_program__attach_raw_tracepoint(prog, tp_name); + *link = bpf_program__attach_raw_tracepoint(prog, tp_name); + return libbpf_get_error(link); } /* Common logic for all BPF program types that attach to a btf_id */ @@ -10518,14 +10516,16 @@ struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) return bpf_program__attach_btf_id(prog); } -static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie) +static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link) { - return bpf_program__attach_trace(prog); + *link = bpf_program__attach_trace(prog); + return libbpf_get_error(*link); } -static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie) +static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link) { - return bpf_program__attach_lsm(prog); + *link = bpf_program__attach_lsm(prog); + return libbpf_get_error(*link); } static struct bpf_link * @@ -10654,17 +10654,33 @@ bpf_program__attach_iter(const struct bpf_program *prog, return link; } -static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie) +static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link) { - return bpf_program__attach_iter(prog, NULL); + *link = bpf_program__attach_iter(prog, NULL); + return libbpf_get_error(*link); } struct bpf_link *bpf_program__attach(const struct bpf_program *prog) { - if (!prog->sec_def || !prog->sec_def->attach_fn) - return libbpf_err_ptr(-ESRCH); + struct bpf_link *link = NULL; + int err; - return prog->sec_def->attach_fn(prog, prog->sec_def->cookie); + if (!prog->sec_def || !prog->sec_def->prog_attach_fn) + return libbpf_err_ptr(-EOPNOTSUPP); + + err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link); + if (err) + return libbpf_err_ptr(err); + + /* When calling bpf_program__attach() explicitly, auto-attach support + * is expected to work, so NULL returned link is considered an error. + * This is different for skeleton's attach, see comment in + * bpf_object__attach_skeleton(). + */ + if (!link) + return libbpf_err_ptr(-EOPNOTSUPP); + + return link; } static int bpf_link__detach_struct_ops(struct bpf_link *link) @@ -11805,16 +11821,30 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) continue; /* auto-attaching not supported for this program */ - if (!prog->sec_def || !prog->sec_def->attach_fn) + if (!prog->sec_def || !prog->sec_def->prog_attach_fn) continue; - *link = bpf_program__attach(prog); - err = libbpf_get_error(*link); + /* if user already set the link manually, don't attempt auto-attach */ + if (*link) + continue; + + err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link); if (err) { - pr_warn("failed to auto-attach program '%s': %d\n", + pr_warn("prog '%s': failed to auto-attach: %d\n", bpf_program__name(prog), err); return libbpf_err(err); } + + /* It's possible that for some SEC() definitions auto-attach + * is supported in some cases (e.g., if definition completely + * specifies target information), but is not in other cases. + * SEC("uprobe") is one such case. If user specified target + * binary and function name, such BPF program can be + * auto-attached. But if not, it shouldn't trigger skeleton's + * attach to fail. It should just be skipped. + * attach_fn signals such case with returning 0 (no error) and + * setting link to NULL. + */ } return 0; From 697f104db8a6177daa5e1ffadda09c2e9285ad18 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 4 Mar 2022 17:01:28 -0800 Subject: [PATCH 002/137] libbpf: Support custom SEC() handlers Allow registering and unregistering custom handlers for BPF program. This allows user applications and libraries to plug into libbpf's declarative SEC() definition handling logic. This allows to offload complex and intricate custom logic into external libraries, but still provide a great user experience. One such example is USDT handling library, which has a lot of code and complexity which doesn't make sense to put into libbpf directly, but it would be really great for users to be able to specify BPF programs with something like SEC("usdt/::") and have correct BPF program type set (BPF_PROGRAM_TYPE_KPROBE, as it is uprobe) and even support BPF skeleton's auto-attach logic. In some cases, it might be even good idea to override libbpf's default handling, like for SEC("perf_event") programs. With custom library, it's possible to extend logic to support specifying perf event specification right there in SEC() definition without burdening libbpf with lots of custom logic or extra library dependecies (e.g., libpfm4). With current patch it's possible to override libbpf's SEC("perf_event") handling and specify a completely custom ones. Further, it's possible to specify a generic fallback handling for any SEC() that doesn't match any other custom or standard libbpf handlers. This allows to accommodate whatever legacy use cases there might be, if necessary. See doc comments for libbpf_register_prog_handler() and libbpf_unregister_prog_handler() for detailed semantics. This patch also bumps libbpf development version to v0.8 and adds new APIs there. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Tested-by: Alan Maguire Reviewed-by: Alan Maguire Link: https://lore.kernel.org/bpf/20220305010129.1549719-3-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 206 ++++++++++++++++++++++++--------- tools/lib/bpf/libbpf.h | 109 +++++++++++++++++ tools/lib/bpf/libbpf.map | 6 + tools/lib/bpf/libbpf_version.h | 2 +- 4 files changed, 269 insertions(+), 54 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 1da4a438ba00..43161fdd44bb 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -201,13 +201,6 @@ struct reloc_desc { }; }; -typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); -typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, - struct bpf_prog_load_opts *opts, long cookie); -/* If auto-attach is not supported, callback should return 0 and set link to NULL */ -typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, - struct bpf_link **link); - /* stored as sec_def->cookie for all libbpf-supported SEC()s */ enum sec_def_flags { SEC_NONE = 0, @@ -235,10 +228,11 @@ enum sec_def_flags { }; struct bpf_sec_def { - const char *sec; + char *sec; enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; long cookie; + int handler_id; libbpf_prog_setup_fn_t prog_setup_fn; libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; @@ -8590,7 +8584,7 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log } #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ - .sec = sec_pfx, \ + .sec = (char *)sec_pfx, \ .prog_type = BPF_PROG_TYPE_##ptype, \ .expected_attach_type = atype, \ .cookie = (long)(flags), \ @@ -8683,61 +8677,167 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX), }; -#define MAX_TYPE_NAME_SIZE 32 +static size_t custom_sec_def_cnt; +static struct bpf_sec_def *custom_sec_defs; +static struct bpf_sec_def custom_fallback_def; +static bool has_custom_fallback_def; + +static int last_custom_sec_def_handler_id; + +int libbpf_register_prog_handler(const char *sec, + enum bpf_prog_type prog_type, + enum bpf_attach_type exp_attach_type, + const struct libbpf_prog_handler_opts *opts) +{ + struct bpf_sec_def *sec_def; + + if (!OPTS_VALID(opts, libbpf_prog_handler_opts)) + return libbpf_err(-EINVAL); + + if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */ + return libbpf_err(-E2BIG); + + if (sec) { + sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1, + sizeof(*sec_def)); + if (!sec_def) + return libbpf_err(-ENOMEM); + + custom_sec_defs = sec_def; + sec_def = &custom_sec_defs[custom_sec_def_cnt]; + } else { + if (has_custom_fallback_def) + return libbpf_err(-EBUSY); + + sec_def = &custom_fallback_def; + } + + sec_def->sec = sec ? strdup(sec) : NULL; + if (sec && !sec_def->sec) + return libbpf_err(-ENOMEM); + + sec_def->prog_type = prog_type; + sec_def->expected_attach_type = exp_attach_type; + sec_def->cookie = OPTS_GET(opts, cookie, 0); + + sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL); + sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL); + sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL); + + sec_def->handler_id = ++last_custom_sec_def_handler_id; + + if (sec) + custom_sec_def_cnt++; + else + has_custom_fallback_def = true; + + return sec_def->handler_id; +} + +int libbpf_unregister_prog_handler(int handler_id) +{ + struct bpf_sec_def *sec_defs; + int i; + + if (handler_id <= 0) + return libbpf_err(-EINVAL); + + if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) { + memset(&custom_fallback_def, 0, sizeof(custom_fallback_def)); + has_custom_fallback_def = false; + return 0; + } + + for (i = 0; i < custom_sec_def_cnt; i++) { + if (custom_sec_defs[i].handler_id == handler_id) + break; + } + + if (i == custom_sec_def_cnt) + return libbpf_err(-ENOENT); + + free(custom_sec_defs[i].sec); + for (i = i + 1; i < custom_sec_def_cnt; i++) + custom_sec_defs[i - 1] = custom_sec_defs[i]; + custom_sec_def_cnt--; + + /* try to shrink the array, but it's ok if we couldn't */ + sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); + if (sec_defs) + custom_sec_defs = sec_defs; + + return 0; +} + +static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name, + bool allow_sloppy) +{ + size_t len = strlen(sec_def->sec); + + /* "type/" always has to have proper SEC("type/extras") form */ + if (sec_def->sec[len - 1] == '/') { + if (str_has_pfx(sec_name, sec_def->sec)) + return true; + return false; + } + + /* "type+" means it can be either exact SEC("type") or + * well-formed SEC("type/extras") with proper '/' separator + */ + if (sec_def->sec[len - 1] == '+') { + len--; + /* not even a prefix */ + if (strncmp(sec_name, sec_def->sec, len) != 0) + return false; + /* exact match or has '/' separator */ + if (sec_name[len] == '\0' || sec_name[len] == '/') + return true; + return false; + } + + /* SEC_SLOPPY_PFX definitions are allowed to be just prefix + * matches, unless strict section name mode + * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the + * match has to be exact. + */ + if (allow_sloppy && str_has_pfx(sec_name, sec_def->sec)) + return true; + + /* Definitions not marked SEC_SLOPPY_PFX (e.g., + * SEC("syscall")) are exact matches in both modes. + */ + return strcmp(sec_name, sec_def->sec) == 0; +} static const struct bpf_sec_def *find_sec_def(const char *sec_name) { const struct bpf_sec_def *sec_def; - enum sec_def_flags sec_flags; - int i, n = ARRAY_SIZE(section_defs), len; - bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME; + int i, n; + bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME, allow_sloppy; + n = custom_sec_def_cnt; for (i = 0; i < n; i++) { - sec_def = §ion_defs[i]; - sec_flags = sec_def->cookie; - len = strlen(sec_def->sec); - - /* "type/" always has to have proper SEC("type/extras") form */ - if (sec_def->sec[len - 1] == '/') { - if (str_has_pfx(sec_name, sec_def->sec)) - return sec_def; - continue; - } - - /* "type+" means it can be either exact SEC("type") or - * well-formed SEC("type/extras") with proper '/' separator - */ - if (sec_def->sec[len - 1] == '+') { - len--; - /* not even a prefix */ - if (strncmp(sec_name, sec_def->sec, len) != 0) - continue; - /* exact match or has '/' separator */ - if (sec_name[len] == '\0' || sec_name[len] == '/') - return sec_def; - continue; - } - - /* SEC_SLOPPY_PFX definitions are allowed to be just prefix - * matches, unless strict section name mode - * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the - * match has to be exact. - */ - if ((sec_flags & SEC_SLOPPY_PFX) && !strict) { - if (str_has_pfx(sec_name, sec_def->sec)) - return sec_def; - continue; - } - - /* Definitions not marked SEC_SLOPPY_PFX (e.g., - * SEC("syscall")) are exact matches in both modes. - */ - if (strcmp(sec_name, sec_def->sec) == 0) + sec_def = &custom_sec_defs[i]; + if (sec_def_matches(sec_def, sec_name, false)) return sec_def; } + + n = ARRAY_SIZE(section_defs); + for (i = 0; i < n; i++) { + sec_def = §ion_defs[i]; + allow_sloppy = (sec_def->cookie & SEC_SLOPPY_PFX) && !strict; + if (sec_def_matches(sec_def, sec_name, allow_sloppy)) + return sec_def; + } + + if (has_custom_fallback_def) + return &custom_fallback_def; + return NULL; } +#define MAX_TYPE_NAME_SIZE 32 + static char *libbpf_get_type_names(bool attach_type) { int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c8d8daad212e..c1b0c2ef14d8 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -1328,6 +1328,115 @@ LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker); LIBBPF_API void bpf_linker__free(struct bpf_linker *linker); +/* + * Custom handling of BPF program's SEC() definitions + */ + +struct bpf_prog_load_opts; /* defined in bpf.h */ + +/* Called during bpf_object__open() for each recognized BPF program. Callback + * can use various bpf_program__set_*() setters to adjust whatever properties + * are necessary. + */ +typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); + +/* Called right before libbpf performs bpf_prog_load() to load BPF program + * into the kernel. Callback can adjust opts as necessary. + */ +typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie); + +/* Called during skeleton attach or through bpf_program__attach(). If + * auto-attach is not supported, callback should return 0 and set link to + * NULL (it's not considered an error during skeleton attach, but it will be + * an error for bpf_program__attach() calls). On error, error should be + * returned directly and link set to NULL. On success, return 0 and set link + * to a valid struct bpf_link. + */ +typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, + struct bpf_link **link); + +struct libbpf_prog_handler_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* User-provided value that is passed to prog_setup_fn, + * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to + * register one set of callbacks for multiple SEC() definitions and + * still be able to distinguish them, if necessary. For example, + * libbpf itself is using this to pass necessary flags (e.g., + * sleepable flag) to a common internal SEC() handler. + */ + long cookie; + /* BPF program initialization callback (see libbpf_prog_setup_fn_t). + * Callback is optional, pass NULL if it's not necessary. + */ + libbpf_prog_setup_fn_t prog_setup_fn; + /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t). + * Callback is optional, pass NULL if it's not necessary. + */ + libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; + /* BPF program attach callback (see libbpf_prog_attach_fn_t). + * Callback is optional, pass NULL if it's not necessary. + */ + libbpf_prog_attach_fn_t prog_attach_fn; +}; +#define libbpf_prog_handler_opts__last_field prog_attach_fn + +/** + * @brief **libbpf_register_prog_handler()** registers a custom BPF program + * SEC() handler. + * @param sec section prefix for which custom handler is registered + * @param prog_type BPF program type associated with specified section + * @param exp_attach_type Expected BPF attach type associated with specified section + * @param opts optional cookie, callbacks, and other extra options + * @return Non-negative handler ID is returned on success. This handler ID has + * to be passed to *libbpf_unregister_prog_handler()* to unregister such + * custom handler. Negative error code is returned on error. + * + * *sec* defines which SEC() definitions are handled by this custom handler + * registration. *sec* can have few different forms: + * - if *sec* is just a plain string (e.g., "abc"), it will match only + * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result + * in an error; + * - if *sec* is of the form "abc/", proper SEC() form is + * SEC("abc/something"), where acceptable "something" should be checked by + * *prog_init_fn* callback, if there are additional restrictions; + * - if *sec* is of the form "abc+", it will successfully match both + * SEC("abc") and SEC("abc/whatever") forms; + * - if *sec* is NULL, custom handler is registered for any BPF program that + * doesn't match any of the registered (custom or libbpf's own) SEC() + * handlers. There could be only one such generic custom handler registered + * at any given time. + * + * All custom handlers (except the one with *sec* == NULL) are processed + * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's + * SEC() handlers by registering custom ones for the same section prefix + * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses") + * handler). + * + * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), + * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs + * to ensure synchronization if there is a risk of running this API from + * multiple threads simultaneously. + */ +LIBBPF_API int libbpf_register_prog_handler(const char *sec, + enum bpf_prog_type prog_type, + enum bpf_attach_type exp_attach_type, + const struct libbpf_prog_handler_opts *opts); +/** + * @brief *libbpf_unregister_prog_handler()* unregisters previously registered + * custom BPF program SEC() handler. + * @param handler_id handler ID returned by *libbpf_register_prog_handler()* + * after successful registration + * @return 0 on success, negative error code if handler isn't found + * + * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), + * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs + * to ensure synchronization if there is a risk of running this API from + * multiple threads simultaneously. + */ +LIBBPF_API int libbpf_unregister_prog_handler(int handler_id); + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 47e70c9058d9..df1b947792c8 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -439,3 +439,9 @@ LIBBPF_0.7.0 { libbpf_probe_bpf_prog_type; libbpf_set_memlock_rlim_max; } LIBBPF_0.6.0; + +LIBBPF_0.8.0 { + global: + libbpf_register_prog_handler; + libbpf_unregister_prog_handler; +} LIBBPF_0.7.0; diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h index 0fefefc3500b..61f2039404b6 100644 --- a/tools/lib/bpf/libbpf_version.h +++ b/tools/lib/bpf/libbpf_version.h @@ -4,6 +4,6 @@ #define __LIBBPF_VERSION_H #define LIBBPF_MAJOR_VERSION 0 -#define LIBBPF_MINOR_VERSION 7 +#define LIBBPF_MINOR_VERSION 8 #endif /* __LIBBPF_VERSION_H */ From aa963bcb0adc1adb79a97260fae55461359d1ed2 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 4 Mar 2022 17:01:29 -0800 Subject: [PATCH 003/137] selftests/bpf: Add custom SEC() handling selftest Add a selftest validating various aspects of libbpf's handling of custom SEC() handlers. It also demonstrates how libraries can ensure very early callbacks registration and unregistration using __attribute__((constructor))/__attribute__((destructor)) functions. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Tested-by: Alan Maguire Reviewed-by: Alan Maguire Link: https://lore.kernel.org/bpf/20220305010129.1549719-4-andrii@kernel.org --- .../bpf/prog_tests/custom_sec_handlers.c | 176 ++++++++++++++++++ .../bpf/progs/test_custom_sec_handlers.c | 63 +++++++ 2 files changed, 239 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c create mode 100644 tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c diff --git a/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c new file mode 100644 index 000000000000..b2dfc5954aea --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include +#include "test_custom_sec_handlers.skel.h" + +#define COOKIE_ABC1 1 +#define COOKIE_ABC2 2 +#define COOKIE_CUSTOM 3 +#define COOKIE_FALLBACK 4 +#define COOKIE_KPROBE 5 + +static int custom_setup_prog(struct bpf_program *prog, long cookie) +{ + if (cookie == COOKIE_ABC1) + bpf_program__set_autoload(prog, false); + + return 0; +} + +static int custom_prepare_load_prog(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie) +{ + if (cookie == COOKIE_FALLBACK) + opts->prog_flags |= BPF_F_SLEEPABLE; + else if (cookie == COOKIE_ABC1) + ASSERT_FALSE(true, "unexpected preload for abc"); + + return 0; +} + +static int custom_attach_prog(const struct bpf_program *prog, long cookie, + struct bpf_link **link) +{ + switch (cookie) { + case COOKIE_ABC2: + *link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + return libbpf_get_error(*link); + case COOKIE_CUSTOM: + *link = bpf_program__attach_tracepoint(prog, "syscalls", "sys_enter_nanosleep"); + return libbpf_get_error(*link); + case COOKIE_KPROBE: + case COOKIE_FALLBACK: + /* no auto-attach for SEC("xyz") and SEC("kprobe") */ + *link = NULL; + return 0; + default: + ASSERT_FALSE(true, "unexpected cookie"); + return -EINVAL; + } +} + +static int abc1_id; +static int abc2_id; +static int custom_id; +static int fallback_id; +static int kprobe_id; + +__attribute__((constructor)) +static void register_sec_handlers(void) +{ + LIBBPF_OPTS(libbpf_prog_handler_opts, abc1_opts, + .cookie = COOKIE_ABC1, + .prog_setup_fn = custom_setup_prog, + .prog_prepare_load_fn = custom_prepare_load_prog, + .prog_attach_fn = NULL, + ); + LIBBPF_OPTS(libbpf_prog_handler_opts, abc2_opts, + .cookie = COOKIE_ABC2, + .prog_setup_fn = custom_setup_prog, + .prog_prepare_load_fn = custom_prepare_load_prog, + .prog_attach_fn = custom_attach_prog, + ); + LIBBPF_OPTS(libbpf_prog_handler_opts, custom_opts, + .cookie = COOKIE_CUSTOM, + .prog_setup_fn = NULL, + .prog_prepare_load_fn = NULL, + .prog_attach_fn = custom_attach_prog, + ); + + abc1_id = libbpf_register_prog_handler("abc", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc1_opts); + abc2_id = libbpf_register_prog_handler("abc/", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc2_opts); + custom_id = libbpf_register_prog_handler("custom+", BPF_PROG_TYPE_TRACEPOINT, 0, &custom_opts); +} + +__attribute__((destructor)) +static void unregister_sec_handlers(void) +{ + libbpf_unregister_prog_handler(abc1_id); + libbpf_unregister_prog_handler(abc2_id); + libbpf_unregister_prog_handler(custom_id); +} + +void test_custom_sec_handlers(void) +{ + LIBBPF_OPTS(libbpf_prog_handler_opts, opts, + .prog_setup_fn = custom_setup_prog, + .prog_prepare_load_fn = custom_prepare_load_prog, + .prog_attach_fn = custom_attach_prog, + ); + struct test_custom_sec_handlers* skel; + int err; + + ASSERT_GT(abc1_id, 0, "abc1_id"); + ASSERT_GT(abc2_id, 0, "abc2_id"); + ASSERT_GT(custom_id, 0, "custom_id"); + + /* override libbpf's handle of SEC("kprobe/...") but also allow pure + * SEC("kprobe") due to "kprobe+" specifier. Register it as + * TRACEPOINT, just for fun. + */ + opts.cookie = COOKIE_KPROBE; + kprobe_id = libbpf_register_prog_handler("kprobe+", BPF_PROG_TYPE_TRACEPOINT, 0, &opts); + /* fallback treats everything as BPF_PROG_TYPE_SYSCALL program to test + * setting custom BPF_F_SLEEPABLE bit in preload handler + */ + opts.cookie = COOKIE_FALLBACK; + fallback_id = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_SYSCALL, 0, &opts); + + if (!ASSERT_GT(fallback_id, 0, "fallback_id") /* || !ASSERT_GT(kprobe_id, 0, "kprobe_id")*/) { + if (fallback_id > 0) + libbpf_unregister_prog_handler(fallback_id); + if (kprobe_id > 0) + libbpf_unregister_prog_handler(kprobe_id); + return; + } + + /* open skeleton and validate assumptions */ + skel = test_custom_sec_handlers__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__type(skel->progs.abc1), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc1_type"); + ASSERT_FALSE(bpf_program__autoload(skel->progs.abc1), "abc1_autoload"); + + ASSERT_EQ(bpf_program__type(skel->progs.abc2), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc2_type"); + ASSERT_EQ(bpf_program__type(skel->progs.custom1), BPF_PROG_TYPE_TRACEPOINT, "custom1_type"); + ASSERT_EQ(bpf_program__type(skel->progs.custom2), BPF_PROG_TYPE_TRACEPOINT, "custom2_type"); + ASSERT_EQ(bpf_program__type(skel->progs.kprobe1), BPF_PROG_TYPE_TRACEPOINT, "kprobe1_type"); + ASSERT_EQ(bpf_program__type(skel->progs.xyz), BPF_PROG_TYPE_SYSCALL, "xyz_type"); + + skel->rodata->my_pid = getpid(); + + /* now attempt to load everything */ + err = test_custom_sec_handlers__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + /* now try to auto-attach everything */ + err = test_custom_sec_handlers__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + skel->links.xyz = bpf_program__attach(skel->progs.kprobe1); + ASSERT_EQ(errno, EOPNOTSUPP, "xyz_attach_err"); + ASSERT_ERR_PTR(skel->links.xyz, "xyz_attach"); + + /* trigger programs */ + usleep(1); + + /* SEC("abc") is set to not auto-loaded */ + ASSERT_FALSE(skel->bss->abc1_called, "abc1_called"); + ASSERT_TRUE(skel->bss->abc2_called, "abc2_called"); + ASSERT_TRUE(skel->bss->custom1_called, "custom1_called"); + ASSERT_TRUE(skel->bss->custom2_called, "custom2_called"); + /* SEC("kprobe") shouldn't be auto-attached */ + ASSERT_FALSE(skel->bss->kprobe1_called, "kprobe1_called"); + /* SEC("xyz") shouldn't be auto-attached */ + ASSERT_FALSE(skel->bss->xyz_called, "xyz_called"); + +cleanup: + test_custom_sec_handlers__destroy(skel); + + ASSERT_OK(libbpf_unregister_prog_handler(fallback_id), "unregister_fallback"); + ASSERT_OK(libbpf_unregister_prog_handler(kprobe_id), "unregister_kprobe"); +} diff --git a/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c b/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c new file mode 100644 index 000000000000..4061f701ca50 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include "vmlinux.h" +#include +#include + +const volatile int my_pid; + +bool abc1_called; +bool abc2_called; +bool custom1_called; +bool custom2_called; +bool kprobe1_called; +bool xyz_called; + +SEC("abc") +int abc1(void *ctx) +{ + abc1_called = true; + return 0; +} + +SEC("abc/whatever") +int abc2(void *ctx) +{ + abc2_called = true; + return 0; +} + +SEC("custom") +int custom1(void *ctx) +{ + custom1_called = true; + return 0; +} + +SEC("custom/something") +int custom2(void *ctx) +{ + custom2_called = true; + return 0; +} + +SEC("kprobe") +int kprobe1(void *ctx) +{ + kprobe1_called = true; + return 0; +} + +SEC("xyz/blah") +int xyz(void *ctx) +{ + int whatever; + + /* use sleepable helper, custom handler should set sleepable flag */ + bpf_copy_from_user(&whatever, sizeof(whatever), NULL); + xyz_called = true; + return 0; +} + +char _license[] SEC("license") = "GPL"; From 25b35dd28138f61f9a0fb8b76c0483761fd228bd Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:38 +0530 Subject: [PATCH 004/137] bpf: Add check_func_arg_reg_off function Lift the list of register types allowed for having fixed and variable offsets when passed as helper function arguments into a common helper, so that they can be reused for kfunc checks in later commits. Keeping a common helper aids maintainability and allows us to follow the same consistent rules across helpers and kfuncs. Also, convert check_func_arg to use this function. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-2-memxor@gmail.com --- include/linux/bpf_verifier.h | 3 ++ kernel/bpf/verifier.c | 69 +++++++++++++++++++++--------------- 2 files changed, 44 insertions(+), 28 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7a7be8c057f2..38b24ee8d8c2 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -521,6 +521,9 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); int check_ptr_off_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno); +int check_func_arg_reg_off(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno, + enum bpf_arg_type arg_type); int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno); int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a57db4b2803c..e37eb6020253 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5359,6 +5359,44 @@ found: return 0; } +int check_func_arg_reg_off(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno, + enum bpf_arg_type arg_type) +{ + enum bpf_reg_type type = reg->type; + bool fixed_off_ok = false; + + switch ((u32)type) { + case SCALAR_VALUE: + /* Pointer types where reg offset is explicitly allowed: */ + case PTR_TO_PACKET: + case PTR_TO_PACKET_META: + case PTR_TO_MAP_KEY: + case PTR_TO_MAP_VALUE: + case PTR_TO_MEM: + case PTR_TO_MEM | MEM_RDONLY: + case PTR_TO_MEM | MEM_ALLOC: + case PTR_TO_BUF: + case PTR_TO_BUF | MEM_RDONLY: + case PTR_TO_STACK: + /* Some of the argument types nevertheless require a + * zero register offset. + */ + if (arg_type != ARG_PTR_TO_ALLOC_MEM) + return 0; + break; + /* All the rest must be rejected, except PTR_TO_BTF_ID which allows + * fixed offset. + */ + case PTR_TO_BTF_ID: + fixed_off_ok = true; + break; + default: + break; + } + return __check_ptr_off_reg(env, reg, regno, fixed_off_ok); +} + static int check_func_arg(struct bpf_verifier_env *env, u32 arg, struct bpf_call_arg_meta *meta, const struct bpf_func_proto *fn) @@ -5408,34 +5446,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, if (err) return err; - switch ((u32)type) { - case SCALAR_VALUE: - /* Pointer types where reg offset is explicitly allowed: */ - case PTR_TO_PACKET: - case PTR_TO_PACKET_META: - case PTR_TO_MAP_KEY: - case PTR_TO_MAP_VALUE: - case PTR_TO_MEM: - case PTR_TO_MEM | MEM_RDONLY: - case PTR_TO_MEM | MEM_ALLOC: - case PTR_TO_BUF: - case PTR_TO_BUF | MEM_RDONLY: - case PTR_TO_STACK: - /* Some of the argument types nevertheless require a - * zero register offset. - */ - if (arg_type == ARG_PTR_TO_ALLOC_MEM) - goto force_off_check; - break; - /* All the rest must be rejected: */ - default: -force_off_check: - err = __check_ptr_off_reg(env, reg, regno, - type == PTR_TO_BTF_ID); - if (err < 0) - return err; - break; - } + err = check_func_arg_reg_off(env, reg, regno, arg_type); + if (err) + return err; skip_type_check: if (reg->ref_obj_id) { From 655efe5089f077485eec848272bd7e26b1a5a735 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:39 +0530 Subject: [PATCH 005/137] bpf: Fix PTR_TO_BTF_ID var_off check When kfunc support was added, check_ctx_reg was called for PTR_TO_CTX register, but no offset checks were made for PTR_TO_BTF_ID. Only reg->off was taken into account by btf_struct_ids_match, which protected against type mismatch due to non-zero reg->off, but when reg->off was zero, a user could set the variable offset of the register and allow it to be passed to kfunc, leading to bad pointer being passed into the kernel. Fix this by reusing the extracted helper check_func_arg_reg_off from previous commit, and make one call before checking all supported register types. Since the list is maintained, any future changes will be taken into account by updating check_func_arg_reg_off. This function prevents non-zero var_off to be set for PTR_TO_BTF_ID, but still allows a fixed non-zero reg->off, which is needed for type matching to work correctly when using pointer arithmetic. ARG_DONTCARE is passed as arg_type, since kfunc doesn't support accepting a ARG_PTR_TO_ALLOC_MEM without relying on size of parameter type from BTF (in case of pointer), or using a mem, len pair. The forcing of offset check for ARG_PTR_TO_ALLOC_MEM is done because ringbuf helpers obtain the size from the header located at the beginning of the memory region, hence any changes to the original pointer shouldn't be allowed. In case of kfunc, size is always known, either at verification time, or using the length parameter, hence this forcing is not required. Since this check will happen once already for PTR_TO_CTX, remove the check_ptr_off_reg call inside its block. Fixes: e6ac2450d6de ("bpf: Support bpf program calling kernel function") Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-3-memxor@gmail.com --- kernel/bpf/btf.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index b472cf0c8fdb..7f6a0ae5028b 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5726,7 +5726,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, const char *func_name, *ref_tname; const struct btf_type *t, *ref_t; const struct btf_param *args; - int ref_regno = 0; + int ref_regno = 0, ret; bool rel = false; t = btf_type_by_id(btf, func_id); @@ -5776,6 +5776,11 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); ref_tname = btf_name_by_offset(btf, ref_t->name_off); + + ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE); + if (ret < 0) + return ret; + if (btf_get_prog_ctx_type(log, btf, t, env->prog->type, i)) { /* If function expects ctx type in BTF check that caller @@ -5787,8 +5792,6 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, i, btf_type_str(t)); return -EINVAL; } - if (check_ptr_off_reg(env, reg, regno)) - return -EINVAL; } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) { const struct btf_type *reg_ref_t; From e1fad0ff46b32819d30cb487f1d39ba24e515843 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:40 +0530 Subject: [PATCH 006/137] bpf: Disallow negative offset in check_ptr_off_reg check_ptr_off_reg only allows fixed offset to be set for PTR_TO_BTF_ID, where reg->off < 0 doesn't make sense. This would shift the pointer backwards, and fails later in btf_struct_ids_match or btf_struct_walk due to out of bounds access (since offset is interpreted as unsigned). Improve the verifier by rejecting this case by using a better error message for BPF helpers and kfunc, by putting a check inside the check_func_arg_reg_off function. Also, update existing verifier selftests to work with new error string. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-4-memxor@gmail.com --- kernel/bpf/verifier.c | 6 ++++++ tools/testing/selftests/bpf/verifier/bounds_deduction.c | 2 +- tools/testing/selftests/bpf/verifier/ctx.c | 8 ++++---- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e37eb6020253..455b4ab69e47 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3990,6 +3990,12 @@ static int __check_ptr_off_reg(struct bpf_verifier_env *env, * is only allowed in its original, unmodified form. */ + if (reg->off < 0) { + verbose(env, "negative offset %s ptr R%d off=%d disallowed\n", + reg_type_str(env, reg->type), regno, reg->off); + return -EACCES; + } + if (!fixed_off_ok && reg->off) { verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n", reg_type_str(env, reg->type), regno, reg->off); diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c index 91869aea6d64..3931c481e30c 100644 --- a/tools/testing/selftests/bpf/verifier/bounds_deduction.c +++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c @@ -105,7 +105,7 @@ BPF_EXIT_INSN(), }, .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .errstr = "dereference of modified ctx ptr", + .errstr = "negative offset ctx ptr R1 off=-1 disallowed", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c index 60f6fbe03f19..c8eaf0536c24 100644 --- a/tools/testing/selftests/bpf/verifier/ctx.c +++ b/tools/testing/selftests/bpf/verifier/ctx.c @@ -58,7 +58,7 @@ }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, - .errstr = "dereference of modified ctx ptr", + .errstr = "negative offset ctx ptr R1 off=-612 disallowed", }, { "pass modified ctx pointer to helper, 2", @@ -71,8 +71,8 @@ }, .result_unpriv = REJECT, .result = REJECT, - .errstr_unpriv = "dereference of modified ctx ptr", - .errstr = "dereference of modified ctx ptr", + .errstr_unpriv = "negative offset ctx ptr R1 off=-612 disallowed", + .errstr = "negative offset ctx ptr R1 off=-612 disallowed", }, { "pass modified ctx pointer to helper, 3", @@ -141,7 +141,7 @@ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, .result = REJECT, - .errstr = "dereference of modified ctx ptr", + .errstr = "negative offset ctx ptr R1 off=-612 disallowed", }, { "pass ctx or null check, 5: null (connect)", From 24d5bb806c7e2c0b9972564fd493069f612d90dd Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:41 +0530 Subject: [PATCH 007/137] bpf: Harden register offset checks for release helpers and kfuncs Let's ensure that the PTR_TO_BTF_ID reg being passed in to release BPF helpers and kfuncs always has its offset set to 0. While not a real problem now, there's a very real possibility this will become a problem when more and more kfuncs are exposed, and more BPF helpers are added which can release PTR_TO_BTF_ID. Previous commits already protected against non-zero var_off. One of the case we are concerned about now is when we have a type that can be returned by e.g. an acquire kfunc: struct foo { int a; int b; struct bar b; }; ... and struct bar is also a type that can be returned by another acquire kfunc. Then, doing the following sequence: struct foo *f = bpf_get_foo(); // acquire kfunc if (!f) return 0; bpf_put_bar(&f->b); // release kfunc ... would work with the current code, since the btf_struct_ids_match takes reg->off into account for matching pointer type with release kfunc argument type, but would obviously be incorrect, and most likely lead to a kernel crash. A test has been included later to prevent regressions in this area. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-5-memxor@gmail.com --- include/linux/bpf_verifier.h | 3 ++- kernel/bpf/btf.c | 33 +++++++++++++++++++-------------- kernel/bpf/verifier.c | 25 ++++++++++++++++++++++--- 3 files changed, 43 insertions(+), 18 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 38b24ee8d8c2..c1fc4af47f69 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -523,7 +523,8 @@ int check_ptr_off_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno); int check_func_arg_reg_off(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, - enum bpf_arg_type arg_type); + enum bpf_arg_type arg_type, + bool is_release_func); int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno); int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7f6a0ae5028b..162807e3b4a5 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5753,6 +5753,10 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, return -EINVAL; } + /* Only kfunc can be release func */ + if (is_kfunc) + rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), + BTF_KFUNC_TYPE_RELEASE, func_id); /* check that BTF function arguments match actual types that the * verifier sees. */ @@ -5777,7 +5781,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); ref_tname = btf_name_by_offset(btf, ref_t->name_off); - ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE); + ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE, rel); if (ret < 0) return ret; @@ -5809,7 +5813,11 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, if (reg->type == PTR_TO_BTF_ID) { reg_btf = reg->btf; reg_ref_id = reg->btf_id; - /* Ensure only one argument is referenced PTR_TO_BTF_ID */ + /* Ensure only one argument is referenced + * PTR_TO_BTF_ID, check_func_arg_reg_off relies + * on only one referenced register being allowed + * for kfuncs. + */ if (reg->ref_obj_id) { if (ref_obj_id) { bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", @@ -5891,18 +5899,15 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, /* Either both are set, or neither */ WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno)); - if (is_kfunc) { - rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), - BTF_KFUNC_TYPE_RELEASE, func_id); - /* We already made sure ref_obj_id is set only for one argument */ - if (rel && !ref_obj_id) { - bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", - func_name); - return -EINVAL; - } - /* Allow (!rel && ref_obj_id), so that passing such referenced PTR_TO_BTF_ID to - * other kfuncs works - */ + /* We already made sure ref_obj_id is set only for one argument. We do + * allow (!rel && ref_obj_id), so that passing such referenced + * PTR_TO_BTF_ID to other kfuncs works. Note that rel is only true when + * is_kfunc is true. + */ + if (rel && !ref_obj_id) { + bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", + func_name); + return -EINVAL; } /* returns argument register number > 0 in case of reference release kfunc */ return rel ? ref_regno : 0; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 455b4ab69e47..fe9a513e2314 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5367,10 +5367,11 @@ found: int check_func_arg_reg_off(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, - enum bpf_arg_type arg_type) + enum bpf_arg_type arg_type, + bool is_release_func) { + bool fixed_off_ok = false, release_reg; enum bpf_reg_type type = reg->type; - bool fixed_off_ok = false; switch ((u32)type) { case SCALAR_VALUE: @@ -5395,6 +5396,21 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, * fixed offset. */ case PTR_TO_BTF_ID: + /* When referenced PTR_TO_BTF_ID is passed to release function, + * it's fixed offset must be 0. We rely on the property that + * only one referenced register can be passed to BPF helpers and + * kfuncs. In the other cases, fixed offset can be non-zero. + */ + release_reg = is_release_func && reg->ref_obj_id; + if (release_reg && reg->off) { + verbose(env, "R%d must have zero offset when passed to release func\n", + regno); + return -EINVAL; + } + /* For release_reg == true, fixed_off_ok must be false, but we + * already checked and rejected reg->off != 0 above, so set to + * true to allow fixed offset for all other cases. + */ fixed_off_ok = true; break; default: @@ -5452,11 +5468,14 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, if (err) return err; - err = check_func_arg_reg_off(env, reg, regno, arg_type); + err = check_func_arg_reg_off(env, reg, regno, arg_type, is_release_function(meta->func_id)); if (err) return err; skip_type_check: + /* check_func_arg_reg_off relies on only one referenced register being + * allowed for BPF helpers. + */ if (reg->ref_obj_id) { if (meta->ref_obj_id) { verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", From f014a00bbeb09cea16017b82448d32a468a6b96f Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Sat, 5 Mar 2022 04:16:42 +0530 Subject: [PATCH 008/137] compiler-clang.h: Add __diag infrastructure for clang Add __diag macros similar to those in compiler-gcc.h, so that warnings that need to be adjusted for specific cases but not globally can be ignored when building with clang. Signed-off-by: Nathan Chancellor Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-6-memxor@gmail.com [ Kartikeya: wrote commit message ] --- include/linux/compiler-clang.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 3c4de9b6c6e3..f1aa41d520bd 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -68,3 +68,25 @@ #define __nocfi __attribute__((__no_sanitize__("cfi"))) #define __cficanonical __attribute__((__cfi_canonical_jump_table__)) + +/* + * Turn individual warnings and errors on and off locally, depending + * on version. + */ +#define __diag_clang(version, severity, s) \ + __diag_clang_ ## version(__diag_clang_ ## severity s) + +/* Severity used in pragma directives */ +#define __diag_clang_ignore ignored +#define __diag_clang_warn warning +#define __diag_clang_error error + +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(clang diagnostic s)) + +#if CONFIG_CLANG_VERSION >= 110000 +#define __diag_clang_11(s) __diag(s) +#else +#define __diag_clang_11(s) +#endif From 4d1ea705d797e66edd70ffa708b83888a210a437 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:43 +0530 Subject: [PATCH 009/137] compiler_types.h: Add unified __diag_ignore_all for GCC/LLVM Add a __diag_ignore_all macro, to ignore warnings for both GCC and LLVM, without having to specify the compiler type and version. By default, GCC 8 and clang 11 are used. This will be used by bpf subsystem to ignore -Wmissing-prototypes warning for functions that are meant to be global functions so that they are in vmlinux BTF, but don't have a prototype. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-7-memxor@gmail.com --- include/linux/compiler-clang.h | 3 +++ include/linux/compiler-gcc.h | 3 +++ include/linux/compiler_types.h | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index f1aa41d520bd..babb1347148c 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -90,3 +90,6 @@ #else #define __diag_clang_11(s) #endif + +#define __diag_ignore_all(option, comment) \ + __diag_clang(11, ignore, option) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index ccbbd31b3aae..d364c98a4a80 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -151,6 +151,9 @@ #define __diag_GCC_8(s) #endif +#define __diag_ignore_all(option, comment) \ + __diag_GCC(8, ignore, option) + /* * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size" * attribute) do not work, and must be disabled. diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3f31ff400432..8e5d2f50f951 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -371,4 +371,8 @@ struct ftrace_likely_data { #define __diag_error(compiler, version, option, comment) \ __diag_ ## compiler(version, error, option) +#ifndef __diag_ignore_all +#define __diag_ignore_all(option, comment) +#endif + #endif /* __LINUX_COMPILER_TYPES_H */ From 0b206c6d1066f1ee85e4238a1e34d7437148a9a3 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:44 +0530 Subject: [PATCH 010/137] bpf: Replace __diag_ignore with unified __diag_ignore_all Currently, -Wmissing-prototypes warning is ignored for GCC, but not clang. This leads to clang build warning in W=1 mode. Since the flag used by both compilers is same, we can use the unified __diag_ignore_all macro that works for all supported versions and compilers which have __diag macro support (currently GCC >= 8.0, and Clang >= 11.0). Also add nf_conntrack_bpf.h include to prevent missing prototype warning for register_nf_conntrack_bpf. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-8-memxor@gmail.com --- net/bpf/test_run.c | 4 ++-- net/netfilter/nf_conntrack_bpf.c | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index eb129e48f90b..fcc83017cd03 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -201,8 +201,8 @@ out: * future. */ __diag_push(); -__diag_ignore(GCC, 8, "-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in vmlinux BTF"); int noinline bpf_fentry_test1(int a) { return a + 1; diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c index 8ad3f52579f3..fe98673dd5ac 100644 --- a/net/netfilter/nf_conntrack_bpf.c +++ b/net/netfilter/nf_conntrack_bpf.c @@ -12,6 +12,7 @@ #include #include #include +#include #include /* bpf_ct_opts - Options for CT lookup helpers @@ -102,8 +103,8 @@ static struct nf_conn *__bpf_nf_ct_lookup(struct net *net, } __diag_push(); -__diag_ignore(GCC, 8, "-Wmissing-prototypes", - "Global functions as their definitions will be in nf_conntrack BTF"); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in nf_conntrack BTF"); /* bpf_xdp_ct_lookup - Lookup CT entry for the given tuple, and acquire a * reference to it From 8218ccb5bd68976ed5d75028ef50c13a857eee25 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 5 Mar 2022 04:16:45 +0530 Subject: [PATCH 011/137] selftests/bpf: Add tests for kfunc register offset checks Include a few verifier selftests that test against the problems being fixed by previous commits, i.e. release kfunc always require PTR_TO_BTF_ID fixed and var_off to be 0, and negative offset is not permitted and returns a helpful error message. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220304224645.3677453-9-memxor@gmail.com --- net/bpf/test_run.c | 11 +++ tools/testing/selftests/bpf/verifier/calls.c | 83 ++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index fcc83017cd03..ba410b069824 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -270,9 +270,14 @@ struct sock * noinline bpf_kfunc_call_test3(struct sock *sk) return sk; } +struct prog_test_member { + u64 c; +}; + struct prog_test_ref_kfunc { int a; int b; + struct prog_test_member memb; struct prog_test_ref_kfunc *next; }; @@ -295,6 +300,10 @@ noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) { } +noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p) +{ +} + struct prog_test_pass1 { int x0; struct { @@ -379,6 +388,7 @@ BTF_ID(func, bpf_kfunc_call_test2) BTF_ID(func, bpf_kfunc_call_test3) BTF_ID(func, bpf_kfunc_call_test_acquire) BTF_ID(func, bpf_kfunc_call_test_release) +BTF_ID(func, bpf_kfunc_call_memb_release) BTF_ID(func, bpf_kfunc_call_test_pass_ctx) BTF_ID(func, bpf_kfunc_call_test_pass1) BTF_ID(func, bpf_kfunc_call_test_pass2) @@ -396,6 +406,7 @@ BTF_SET_END(test_sk_acquire_kfunc_ids) BTF_SET_START(test_sk_release_kfunc_ids) BTF_ID(func, bpf_kfunc_call_test_release) +BTF_ID(func, bpf_kfunc_call_memb_release) BTF_SET_END(test_sk_release_kfunc_ids) BTF_SET_START(test_sk_ret_null_kfunc_ids) diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index f890333259ad..2e03decb11b6 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -115,6 +115,89 @@ { "bpf_kfunc_call_test_release", 5 }, }, }, +{ + "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .errstr = "R1 must have zero offset when passed to release func", + .fixup_kfunc_btf_id = { + { "bpf_kfunc_call_test_acquire", 3 }, + { "bpf_kfunc_call_memb_release", 8 }, + }, +}, +{ + "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_kfunc_btf_id = { + { "bpf_kfunc_call_test_acquire", 3 }, + { "bpf_kfunc_call_test_release", 9 }, + }, + .result_unpriv = REJECT, + .result = REJECT, + .errstr = "negative offset ptr_ ptr R1 off=-4 disallowed", +}, +{ + "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), + BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_kfunc_btf_id = { + { "bpf_kfunc_call_test_acquire", 3 }, + { "bpf_kfunc_call_test_release", 9 }, + { "bpf_kfunc_call_test_release", 13 }, + { "bpf_kfunc_call_test_release", 17 }, + }, + .result_unpriv = REJECT, + .result = REJECT, + .errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed", +}, { "calls: basic sanity", .insns = { From bff61f6faedb36db6b135da898840d29aa74cbbb Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Fri, 4 Mar 2022 11:16:54 -0800 Subject: [PATCH 012/137] bpf: Fix checking PTR_TO_BTF_ID in check_mem_access With the introduction of MEM_USER in commit c6f1bfe89ac9 ("bpf: reject program if a __user tagged memory accessed in kernel way") PTR_TO_BTF_ID can be combined with a MEM_USER tag. Therefore, most likely, when we compare reg_type against PTR_TO_BTF_ID, we want to use the reg's base_type. Previously the check in check_mem_access() wants to say: if the reg is BTF_ID but not NULL, the execution flow falls into the 'then' branch. But now a reg of (BTF_ID | MEM_USER), which should go into the 'then' branch, goes into the 'else'. The end results before and after this patch are the same: regs tagged with MEM_USER get rejected, but not in a way we intended. So fix the condition, the error message now is correct. Before (log from commit 696c39011538): $ ./test_progs -v -n 22/3 ... libbpf: prog 'test_user1': BPF program load failed: Permission denied libbpf: prog 'test_user1': -- BEGIN PROG LOAD LOG -- R1 type=ctx expected=fp 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 ; int BPF_PROG(test_user1, struct bpf_testmod_btf_type_tag_1 *arg) 0: (79) r1 = *(u64 *)(r1 +0) func 'bpf_testmod_test_btf_type_tag_user_1' arg0 has btf_id 136561 type STRUCT 'bpf_testmod_btf_type_tag_1' 1: R1_w=user_ptr_bpf_testmod_btf_type_tag_1(id=0,off=0,imm=0) ; g = arg->a; 1: (61) r1 = *(u32 *)(r1 +0) R1 invalid mem access 'user_ptr_' Now: libbpf: prog 'test_user1': BPF program load failed: Permission denied libbpf: prog 'test_user1': -- BEGIN PROG LOAD LOG -- R1 type=ctx expected=fp 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 ; int BPF_PROG(test_user1, struct bpf_testmod_btf_type_tag_1 *arg) 0: (79) r1 = *(u64 *)(r1 +0) func 'bpf_testmod_test_btf_type_tag_user_1' arg0 has btf_id 104036 type STRUCT 'bpf_testmod_btf_type_tag_1' 1: R1_w=user_ptr_bpf_testmod_btf_type_tag_1(id=0,ref_obj_id=0,off=0,imm=0) ; g = arg->a; 1: (61) r1 = *(u32 *)(r1 +0) R1 is ptr_bpf_testmod_btf_type_tag_1 access user memory: off=0 Note the error message for the reason of rejection. Fixes: c6f1bfe89ac9 ("bpf: reject program if a __user tagged memory accessed in kernel way") Signed-off-by: Hao Luo Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220304191657.981240-2-haoluo@google.com --- kernel/bpf/verifier.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fe9a513e2314..7a6b58fea37d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4562,7 +4562,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn err = check_tp_buffer_access(env, reg, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); - } else if (reg->type == PTR_TO_BTF_ID) { + } else if (base_type(reg->type) == PTR_TO_BTF_ID && + !type_may_be_null(reg->type)) { err = check_ptr_to_btf_access(env, regs, regno, off, size, t, value_regno); } else if (reg->type == CONST_PTR_TO_MAP) { From 9216c916237805c93d054ed022afb172ddbc3ed1 Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Fri, 4 Mar 2022 11:16:55 -0800 Subject: [PATCH 013/137] compiler_types: Define __percpu as __attribute__((btf_type_tag("percpu"))) This is similar to commit 7472d5a642c9 ("compiler_types: define __user as __attribute__((btf_type_tag("user")))"), where a type tag "user" was introduced to identify the pointers that point to user memory. With that change, the newest compile toolchain can encode __user information into vmlinux BTF, which can be used by the BPF verifier to enforce safe program behaviors. Similarly, we have __percpu attribute, which is mainly used to indicate memory is allocated in percpu region. The __percpu pointers in kernel are supposed to be used together with functions like per_cpu_ptr() and this_cpu_ptr(), which perform necessary calculation on the pointer's base address. Without the btf_type_tag introduced in this patch, __percpu pointers will be treated as regular memory pointers in vmlinux BTF and BPF programs are allowed to directly dereference them, generating incorrect behaviors. Now with "percpu" btf_type_tag, the BPF verifier is able to differentiate __percpu pointers from regular pointers and forbids unexpected behaviors like direct load. The following is an example similar to the one given in commit 7472d5a642c9: [$ ~] cat test.c #define __percpu __attribute__((btf_type_tag("percpu"))) int foo(int __percpu *arg) { return *arg; } [$ ~] clang -O2 -g -c test.c [$ ~] pahole -JV test.o ... File test.o: [1] INT int size=4 nr_bits=32 encoding=SIGNED [2] TYPE_TAG percpu type_id=1 [3] PTR (anon) type_id=2 [4] FUNC_PROTO (anon) return=1 args=(3 arg) [5] FUNC foo type_id=4 [$ ~] for the function argument "int __percpu *arg", its type is described as PTR -> TYPE_TAG(percpu) -> INT The kernel can use this information for bpf verification or other use cases. Like commit 7472d5a642c9, this feature requires clang (>= clang14) and pahole (>= 1.23). Signed-off-by: Hao Luo Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220304191657.981240-3-haoluo@google.com --- include/linux/compiler_types.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 8e5d2f50f951..b9a8ae9440c7 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -38,7 +38,12 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __user # endif # define __iomem -# define __percpu +# if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ + __has_attribute(btf_type_tag) +# define __percpu __attribute__((btf_type_tag("percpu"))) +# else +# define __percpu +# endif # define __rcu # define __chk_user_ptr(x) (void)0 # define __chk_io_ptr(x) (void)0 From 5844101a1be9b8636024cb31c865ef13c7cc6db3 Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Fri, 4 Mar 2022 11:16:56 -0800 Subject: [PATCH 014/137] bpf: Reject programs that try to load __percpu memory. With the introduction of the btf_type_tag "percpu", we can add a MEM_PERCPU to identify those pointers that point to percpu memory. The ability of differetiating percpu pointers from regular memory pointers have two benefits: 1. It forbids unexpected use of percpu pointers, such as direct loads. In kernel, there are special functions used for accessing percpu memory. Directly loading percpu memory is meaningless. We already have BPF helpers like bpf_per_cpu_ptr() and bpf_this_cpu_ptr() that wrap the kernel percpu functions. So we can now convert percpu pointers into regular pointers in a safe way. 2. Previously, bpf_per_cpu_ptr() and bpf_this_cpu_ptr() only work on PTR_TO_PERCPU_BTF_ID, a special reg_type which describes static percpu variables in kernel (we rely on pahole to encode them into vmlinux BTF). Now, since we can identify __percpu tagged pointers, we can also identify dynamically allocated percpu memory as well. It means we can use bpf_xxx_cpu_ptr() on dynamic percpu memory. This would be very convenient when accessing fields like "cgroup->rstat_cpu". Signed-off-by: Hao Luo Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220304191657.981240-4-haoluo@google.com --- include/linux/bpf.h | 11 +++++++++-- kernel/bpf/btf.c | 8 +++++++- kernel/bpf/verifier.c | 24 ++++++++++++++---------- 3 files changed, 30 insertions(+), 13 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f19abc59b6cd..88449fbbe063 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -334,7 +334,15 @@ enum bpf_type_flag { /* MEM is in user address space. */ MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), - __BPF_TYPE_LAST_FLAG = MEM_USER, + /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged + * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In + * order to drop this tag, it must be passed into bpf_per_cpu_ptr() + * or bpf_this_cpu_ptr(), which will return the pointer corresponding + * to the specified cpu. + */ + MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS), + + __BPF_TYPE_LAST_FLAG = MEM_PERCPU, }; /* Max number of base types. */ @@ -516,7 +524,6 @@ enum bpf_reg_type { */ PTR_TO_MEM, /* reg points to valid memory region */ PTR_TO_BUF, /* reg points to a read/write buffer */ - PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ PTR_TO_FUNC, /* reg points to a bpf program function */ __BPF_REG_TYPE_MAX, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 162807e3b4a5..8b34563a832e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5057,6 +5057,8 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, tag_value = __btf_name_by_offset(btf, t->name_off); if (strcmp(tag_value, "user") == 0) info->reg_type |= MEM_USER; + if (strcmp(tag_value, "percpu") == 0) + info->reg_type |= MEM_PERCPU; } /* skip modifiers */ @@ -5285,12 +5287,16 @@ error: return -EACCES; } - /* check __user tag */ + /* check type tag */ t = btf_type_by_id(btf, mtype->type); if (btf_type_is_type_tag(t)) { tag_value = __btf_name_by_offset(btf, t->name_off); + /* check __user tag */ if (strcmp(tag_value, "user") == 0) tmp_flag = MEM_USER; + /* check __percpu tag */ + if (strcmp(tag_value, "percpu") == 0) + tmp_flag = MEM_PERCPU; } stype = btf_type_skip_modifiers(btf, mtype->type, &id); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7a6b58fea37d..ec3a7b6c9515 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -554,7 +554,6 @@ static const char *reg_type_str(struct bpf_verifier_env *env, [PTR_TO_TP_BUFFER] = "tp_buffer", [PTR_TO_XDP_SOCK] = "xdp_sock", [PTR_TO_BTF_ID] = "ptr_", - [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", [PTR_TO_MEM] = "mem", [PTR_TO_BUF] = "buf", [PTR_TO_FUNC] = "func", @@ -562,8 +561,7 @@ static const char *reg_type_str(struct bpf_verifier_env *env, }; if (type & PTR_MAYBE_NULL) { - if (base_type(type) == PTR_TO_BTF_ID || - base_type(type) == PTR_TO_PERCPU_BTF_ID) + if (base_type(type) == PTR_TO_BTF_ID) strncpy(postfix, "or_null_", 16); else strncpy(postfix, "_or_null", 16); @@ -575,6 +573,8 @@ static const char *reg_type_str(struct bpf_verifier_env *env, strncpy(prefix, "alloc_", 32); if (type & MEM_USER) strncpy(prefix, "user_", 32); + if (type & MEM_PERCPU) + strncpy(prefix, "percpu_", 32); snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s", prefix, str[base_type(type)], postfix); @@ -697,8 +697,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, const char *sep = ""; verbose(env, "%s", reg_type_str(env, t)); - if (base_type(t) == PTR_TO_BTF_ID || - base_type(t) == PTR_TO_PERCPU_BTF_ID) + if (base_type(t) == PTR_TO_BTF_ID) verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); verbose(env, "("); /* @@ -2783,7 +2782,6 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_XDP_SOCK: case PTR_TO_BTF_ID: case PTR_TO_BUF: - case PTR_TO_PERCPU_BTF_ID: case PTR_TO_MEM: case PTR_TO_FUNC: case PTR_TO_MAP_KEY: @@ -4203,6 +4201,13 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, return -EACCES; } + if (reg->type & MEM_PERCPU) { + verbose(env, + "R%d is ptr_%s access percpu memory: off=%d\n", + regno, tname, off); + return -EACCES; + } + if (env->ops->btf_struct_access) { ret = env->ops->btf_struct_access(&env->log, reg->btf, t, off, size, atype, &btf_id, &flag); @@ -4809,7 +4814,7 @@ static int check_stack_range_initialized( } if (is_spilled_reg(&state->stack[spi]) && - state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) + base_type(state->stack[spi].spilled_ptr.type) == PTR_TO_BTF_ID) goto mark; if (is_spilled_reg(&state->stack[spi]) && @@ -5265,7 +5270,7 @@ static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | ME static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } }; static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } }; -static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PERCPU_BTF_ID } }; +static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } }; static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; @@ -9677,7 +9682,6 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) dst_reg->mem_size = aux->btf_var.mem_size; break; case PTR_TO_BTF_ID: - case PTR_TO_PERCPU_BTF_ID: dst_reg->btf = aux->btf_var.btf; dst_reg->btf_id = aux->btf_var.btf_id; break; @@ -11877,7 +11881,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, type = t->type; t = btf_type_skip_modifiers(btf, type, NULL); if (percpu) { - aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID; + aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; aux->btf_var.btf = btf; aux->btf_var.btf_id = type; } else if (!btf_type_is_struct(t)) { From 50c6b8a9aea2b8dc6c4ffb0cc502b94f7f57a0dc Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Fri, 4 Mar 2022 11:16:57 -0800 Subject: [PATCH 015/137] selftests/bpf: Add a test for btf_type_tag "percpu" Add test for percpu btf_type_tag. Similar to the "user" tag, we test the following cases: 1. __percpu struct field. 2. __percpu as function parameter. 3. per_cpu_ptr() accepts dynamically allocated __percpu memory. Because the test for "user" and the test for "percpu" are very similar, a little bit of refactoring has been done in btf_tag.c. Basically, both tests share the same function for loading vmlinux and module btf. Example output from log: > ./test_progs -v -t btf_tag libbpf: prog 'test_percpu1': BPF program load failed: Permission denied libbpf: prog 'test_percpu1': -- BEGIN PROG LOAD LOG -- ... ; g = arg->a; 1: (61) r1 = *(u32 *)(r1 +0) R1 is ptr_bpf_testmod_btf_type_tag_1 access percpu memory: off=0 ... test_btf_type_tag_mod_percpu:PASS:btf_type_tag_percpu 0 nsec #26/6 btf_tag/btf_type_tag_percpu_mod1:OK libbpf: prog 'test_percpu2': BPF program load failed: Permission denied libbpf: prog 'test_percpu2': -- BEGIN PROG LOAD LOG -- ... ; g = arg->p->a; 2: (61) r1 = *(u32 *)(r1 +0) R1 is ptr_bpf_testmod_btf_type_tag_1 access percpu memory: off=0 ... test_btf_type_tag_mod_percpu:PASS:btf_type_tag_percpu 0 nsec #26/7 btf_tag/btf_type_tag_percpu_mod2:OK libbpf: prog 'test_percpu_load': BPF program load failed: Permission denied libbpf: prog 'test_percpu_load': -- BEGIN PROG LOAD LOG -- ... ; g = (__u64)cgrp->rstat_cpu->updated_children; 2: (79) r1 = *(u64 *)(r1 +48) R1 is ptr_cgroup_rstat_cpu access percpu memory: off=48 ... test_btf_type_tag_vmlinux_percpu:PASS:btf_type_tag_percpu_load 0 nsec #26/8 btf_tag/btf_type_tag_percpu_vmlinux_load:OK load_btfs:PASS:could not load vmlinux BTF 0 nsec test_btf_type_tag_vmlinux_percpu:PASS:btf_type_tag_percpu 0 nsec test_btf_type_tag_vmlinux_percpu:PASS:btf_type_tag_percpu_helper 0 nsec #26/9 btf_tag/btf_type_tag_percpu_vmlinux_helper:OK Signed-off-by: Hao Luo Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220304191657.981240-5-haoluo@google.com --- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 14 ++ .../selftests/bpf/prog_tests/btf_tag.c | 164 ++++++++++++++---- .../selftests/bpf/progs/btf_type_tag_percpu.c | 66 +++++++ 3 files changed, 215 insertions(+), 29 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 27d63be47b95..e585e1cefc77 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -33,6 +33,10 @@ struct bpf_testmod_btf_type_tag_2 { struct bpf_testmod_btf_type_tag_1 __user *p; }; +struct bpf_testmod_btf_type_tag_3 { + struct bpf_testmod_btf_type_tag_1 __percpu *p; +}; + noinline int bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) { BTF_TYPE_EMIT(func_proto_typedef); @@ -46,6 +50,16 @@ bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) { return arg->p->a; } +noinline int +bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) { + return arg->a; +} + +noinline int +bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) { + return arg->p->a; +} + noinline int bpf_testmod_loop_test(int n) { int i, sum = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c index f7560b54a6bb..071430cd54de 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_tag.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c @@ -10,6 +10,7 @@ struct btf_type_tag_test { }; #include "btf_type_tag.skel.h" #include "btf_type_tag_user.skel.h" +#include "btf_type_tag_percpu.skel.h" static void test_btf_decl_tag(void) { @@ -43,38 +44,81 @@ static void test_btf_type_tag(void) btf_type_tag__destroy(skel); } -static void test_btf_type_tag_mod_user(bool load_test_user1) +/* loads vmlinux_btf as well as module_btf. If the caller passes NULL as + * module_btf, it will not load module btf. + * + * Returns 0 on success. + * Return -1 On error. In case of error, the loaded btf will be freed and the + * input parameters will be set to pointing to NULL. + */ +static int load_btfs(struct btf **vmlinux_btf, struct btf **module_btf, + bool needs_vmlinux_tag) { const char *module_name = "bpf_testmod"; - struct btf *vmlinux_btf, *module_btf; - struct btf_type_tag_user *skel; __s32 type_id; - int err; if (!env.has_testmod) { test__skip(); - return; + return -1; } - /* skip the test if the module does not have __user tags */ - vmlinux_btf = btf__load_vmlinux_btf(); - if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF")) - return; + *vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(*vmlinux_btf, "could not load vmlinux BTF")) + return -1; - module_btf = btf__load_module_btf(module_name, vmlinux_btf); - if (!ASSERT_OK_PTR(module_btf, "could not load module BTF")) + if (!needs_vmlinux_tag) + goto load_module_btf; + + /* skip the test if the vmlinux does not have __user tags */ + type_id = btf__find_by_name_kind(*vmlinux_btf, "user", BTF_KIND_TYPE_TAG); + if (type_id <= 0) { + printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__); + test__skip(); + goto free_vmlinux_btf; + } + +load_module_btf: + /* skip loading module_btf, if not requested by caller */ + if (!module_btf) + return 0; + + *module_btf = btf__load_module_btf(module_name, *vmlinux_btf); + if (!ASSERT_OK_PTR(*module_btf, "could not load module BTF")) goto free_vmlinux_btf; - type_id = btf__find_by_name_kind(module_btf, "user", BTF_KIND_TYPE_TAG); + /* skip the test if the module does not have __user tags */ + type_id = btf__find_by_name_kind(*module_btf, "user", BTF_KIND_TYPE_TAG); if (type_id <= 0) { printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name); test__skip(); goto free_module_btf; } + return 0; + +free_module_btf: + btf__free(*module_btf); +free_vmlinux_btf: + btf__free(*vmlinux_btf); + + *vmlinux_btf = NULL; + if (module_btf) + *module_btf = NULL; + return -1; +} + +static void test_btf_type_tag_mod_user(bool load_test_user1) +{ + struct btf *vmlinux_btf = NULL, *module_btf = NULL; + struct btf_type_tag_user *skel; + int err; + + if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false)) + return; + skel = btf_type_tag_user__open(); if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) - goto free_module_btf; + goto cleanup; bpf_program__set_autoload(skel->progs.test_sys_getsockname, false); if (load_test_user1) @@ -87,34 +131,23 @@ static void test_btf_type_tag_mod_user(bool load_test_user1) btf_type_tag_user__destroy(skel); -free_module_btf: +cleanup: btf__free(module_btf); -free_vmlinux_btf: btf__free(vmlinux_btf); } static void test_btf_type_tag_vmlinux_user(void) { struct btf_type_tag_user *skel; - struct btf *vmlinux_btf; - __s32 type_id; + struct btf *vmlinux_btf = NULL; int err; - /* skip the test if the vmlinux does not have __user tags */ - vmlinux_btf = btf__load_vmlinux_btf(); - if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF")) + if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true)) return; - type_id = btf__find_by_name_kind(vmlinux_btf, "user", BTF_KIND_TYPE_TAG); - if (type_id <= 0) { - printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__); - test__skip(); - goto free_vmlinux_btf; - } - skel = btf_type_tag_user__open(); if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) - goto free_vmlinux_btf; + goto cleanup; bpf_program__set_autoload(skel->progs.test_user2, false); bpf_program__set_autoload(skel->progs.test_user1, false); @@ -124,7 +157,70 @@ static void test_btf_type_tag_vmlinux_user(void) btf_type_tag_user__destroy(skel); -free_vmlinux_btf: +cleanup: + btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_mod_percpu(bool load_test_percpu1) +{ + struct btf *vmlinux_btf, *module_btf; + struct btf_type_tag_percpu *skel; + int err; + + if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false)) + return; + + skel = btf_type_tag_percpu__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_percpu_load, false); + bpf_program__set_autoload(skel->progs.test_percpu_helper, false); + if (load_test_percpu1) + bpf_program__set_autoload(skel->progs.test_percpu2, false); + else + bpf_program__set_autoload(skel->progs.test_percpu1, false); + + err = btf_type_tag_percpu__load(skel); + ASSERT_ERR(err, "btf_type_tag_percpu"); + + btf_type_tag_percpu__destroy(skel); + +cleanup: + btf__free(module_btf); + btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_vmlinux_percpu(bool load_test) +{ + struct btf_type_tag_percpu *skel; + struct btf *vmlinux_btf = NULL; + int err; + + if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true)) + return; + + skel = btf_type_tag_percpu__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_percpu2, false); + bpf_program__set_autoload(skel->progs.test_percpu1, false); + if (load_test) { + bpf_program__set_autoload(skel->progs.test_percpu_helper, false); + + err = btf_type_tag_percpu__load(skel); + ASSERT_ERR(err, "btf_type_tag_percpu_load"); + } else { + bpf_program__set_autoload(skel->progs.test_percpu_load, false); + + err = btf_type_tag_percpu__load(skel); + ASSERT_OK(err, "btf_type_tag_percpu_helper"); + } + + btf_type_tag_percpu__destroy(skel); + +cleanup: btf__free(vmlinux_btf); } @@ -134,10 +230,20 @@ void test_btf_tag(void) test_btf_decl_tag(); if (test__start_subtest("btf_type_tag")) test_btf_type_tag(); + if (test__start_subtest("btf_type_tag_user_mod1")) test_btf_type_tag_mod_user(true); if (test__start_subtest("btf_type_tag_user_mod2")) test_btf_type_tag_mod_user(false); if (test__start_subtest("btf_type_tag_sys_user_vmlinux")) test_btf_type_tag_vmlinux_user(); + + if (test__start_subtest("btf_type_tag_percpu_mod1")) + test_btf_type_tag_mod_percpu(true); + if (test__start_subtest("btf_type_tag_percpu_mod2")) + test_btf_type_tag_mod_percpu(false); + if (test__start_subtest("btf_type_tag_percpu_vmlinux_load")) + test_btf_type_tag_vmlinux_percpu(true); + if (test__start_subtest("btf_type_tag_percpu_vmlinux_helper")) + test_btf_type_tag_vmlinux_percpu(false); } diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c new file mode 100644 index 000000000000..8feddb8289cf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Google */ +#include "vmlinux.h" +#include +#include + +struct bpf_testmod_btf_type_tag_1 { + int a; +}; + +struct bpf_testmod_btf_type_tag_2 { + struct bpf_testmod_btf_type_tag_1 *p; +}; + +__u64 g; + +SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_1") +int BPF_PROG(test_percpu1, struct bpf_testmod_btf_type_tag_1 *arg) +{ + g = arg->a; + return 0; +} + +SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_2") +int BPF_PROG(test_percpu2, struct bpf_testmod_btf_type_tag_2 *arg) +{ + g = arg->p->a; + return 0; +} + +/* trace_cgroup_mkdir(struct cgroup *cgrp, const char *path) + * + * struct cgroup_rstat_cpu { + * ... + * struct cgroup *updated_children; + * ... + * }; + * + * struct cgroup { + * ... + * struct cgroup_rstat_cpu __percpu *rstat_cpu; + * ... + * }; + */ +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path) +{ + g = (__u64)cgrp->rstat_cpu->updated_children; + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path) +{ + struct cgroup_rstat_cpu *rstat; + __u32 cpu; + + cpu = bpf_get_smp_processor_id(); + rstat = (struct cgroup_rstat_cpu *)bpf_per_cpu_ptr(cgrp->rstat_cpu, cpu); + if (rstat) { + /* READ_ONCE */ + *(volatile int *)rstat; + } + + return 0; +} From 9c6e6a80ee741adf6cb3cfd8eef7d1554f91fceb Mon Sep 17 00:00:00 2001 From: lic121 Date: Tue, 1 Mar 2022 13:26:23 +0000 Subject: [PATCH 016/137] libbpf: Unmap rings when umem deleted xsk_umem__create() does mmap for fill/comp rings, but xsk_umem__delete() doesn't do the unmap. This works fine for regular cases, because xsk_socket__delete() does unmap for the rings. But for the case that xsk_socket__create_shared() fails, umem rings are not unmapped. fill_save/comp_save are checked to determine if rings have already be unmapped by xsk. If fill_save and comp_save are NULL, it means that the rings have already been used by xsk. Then they are supposed to be unmapped by xsk_socket__delete(). Otherwise, xsk_umem__delete() does the unmap. Fixes: 2f6324a3937f ("libbpf: Support shared umems between queues and devices") Signed-off-by: Cheng Li Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220301132623.GA19995@vscode.7~ --- tools/lib/bpf/xsk.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index edafe56664f3..32a2f5749c71 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -1193,12 +1193,23 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, int xsk_umem__delete(struct xsk_umem *umem) { + struct xdp_mmap_offsets off; + int err; + if (!umem) return 0; if (umem->refcount) return -EBUSY; + err = xsk_get_mmap_offsets(umem->fd, &off); + if (!err && umem->fill_save && umem->comp_save) { + munmap(umem->fill_save->ring - off.fr.desc, + off.fr.desc + umem->config.fill_size * sizeof(__u64)); + munmap(umem->comp_save->ring - off.cr.desc, + off.cr.desc + umem->config.comp_size * sizeof(__u64)); + } + close(umem->fd); free(umem); From 03b9c7fa3f15f51bcd07f3828c2a01311e7746c4 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Fri, 4 Mar 2022 15:04:08 +0800 Subject: [PATCH 017/137] bpf: Replace strncpy() with strscpy() Using strncpy() on NUL-terminated strings is considered deprecated[1]. Moreover, if the length of 'task->comm' is less than the destination buffer size, strncpy() will NUL-pad the destination buffer, which is a needless performance penalty. Replacing strncpy() with strscpy() fixes all these issues. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html#strncpy-on-nul-terminated-strings Signed-off-by: Yuntao Wang Signed-off-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220304070408.233658-1-ytcoode@gmail.com --- kernel/bpf/helpers.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index ae64110a98b5..315053ef6a75 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -225,13 +225,8 @@ BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) if (unlikely(!task)) goto err_clear; - strncpy(buf, task->comm, size); - - /* Verifier guarantees that size > 0. For task->comm exceeding - * size, guarantee that buf is %NUL-terminated. Unconditionally - * done here to save the size test. - */ - buf[size - 1] = 0; + /* Verifier guarantees that size > 0 */ + strscpy(buf, task->comm, size); return 0; err_clear: memset(buf, 0, size); From 04b6de649e124f2ea9693a25a744e646c1203ff9 Mon Sep 17 00:00:00 2001 From: Guo Zhengkui Date: Sun, 6 Mar 2022 10:34:26 +0800 Subject: [PATCH 018/137] libbpf: Fix array_size.cocci warning Fix the following coccicheck warning: tools/lib/bpf/bpf.c:114:31-32: WARNING: Use ARRAY_SIZE tools/lib/bpf/xsk.c:484:34-35: WARNING: Use ARRAY_SIZE tools/lib/bpf/xsk.c:485:35-36: WARNING: Use ARRAY_SIZE It has been tested with gcc (Debian 8.3.0-6) 8.3.0 on x86_64. Signed-off-by: Guo Zhengkui Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220306023426.19324-1-guozhengkui@vivo.com --- tools/lib/bpf/bpf.c | 3 ++- tools/lib/bpf/xsk.c | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 418b259166f8..3c7c180294fa 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include "bpf.h" @@ -111,7 +112,7 @@ int probe_memcg_account(void) BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), BPF_EXIT_INSN(), }; - size_t insn_cnt = sizeof(insns) / sizeof(insns[0]); + size_t insn_cnt = ARRAY_SIZE(insns); union bpf_attr attr; int prog_fd; diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 32a2f5749c71..af136f73b09d 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -481,8 +481,8 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk) BPF_EMIT_CALL(BPF_FUNC_redirect_map), BPF_EXIT_INSN(), }; - size_t insns_cnt[] = {sizeof(prog) / sizeof(struct bpf_insn), - sizeof(prog_redirect_flags) / sizeof(struct bpf_insn), + size_t insns_cnt[] = {ARRAY_SIZE(prog), + ARRAY_SIZE(prog_redirect_flags), }; struct bpf_insn *progs[] = {prog, prog_redirect_flags}; enum xsk_prog option = get_xsk_prog(); From 4989135a85334337ac8b7e42e7ee1a569ad5f7f5 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Sun, 6 Mar 2022 00:10:13 +0800 Subject: [PATCH 019/137] bpf: Remove redundant slash The trailing slash of LIBBPF_SRCS is redundant, remove it. Also inline it as its only used in LIBBPF_INCLUDE. Signed-off-by: Yuntao Wang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220305161013.361646-1-ytcoode@gmail.com --- kernel/bpf/preload/Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/preload/Makefile b/kernel/bpf/preload/Makefile index 167534e3b0b4..20f89cc0a0a6 100644 --- a/kernel/bpf/preload/Makefile +++ b/kernel/bpf/preload/Makefile @@ -1,8 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 -LIBBPF_SRCS = $(srctree)/tools/lib/bpf/ -LIBBPF_INCLUDE = $(LIBBPF_SRCS)/.. +LIBBPF_INCLUDE = $(srctree)/tools/lib obj-$(CONFIG_BPF_PRELOAD_UMD) += bpf_preload.o -CFLAGS_bpf_preload_kern.o += -I $(LIBBPF_INCLUDE) +CFLAGS_bpf_preload_kern.o += -I$(LIBBPF_INCLUDE) bpf_preload-objs += bpf_preload_kern.o From 5ad0a415da6be8c13ed45c655e5acc9fa93557a9 Mon Sep 17 00:00:00 2001 From: KP Singh Date: Mon, 7 Mar 2022 13:30:47 +0000 Subject: [PATCH 020/137] bpf/docs: Update vmtest docs for static linking Dynamic linking when compiling on the host can cause issues when the libc version does not match the one in the VM image. Update the docs to explain how to do this. Before: ./vmtest.sh -- ./test_progs -t test_ima ./test_progs: /usr/lib/libc.so.6: version `GLIBC_2.33' not found (required by ./test_progs) After: LDLIBS=-static ./vmtest.sh -- ./test_progs -t test_ima test_ima:OK Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED Reported-by: "Geyslan G. Bem" Signed-off-by: KP Singh Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220307133048.1287644-1-kpsingh@kernel.org --- tools/testing/selftests/bpf/README.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst index d099d91adc3b..afe5ab2763bf 100644 --- a/tools/testing/selftests/bpf/README.rst +++ b/tools/testing/selftests/bpf/README.rst @@ -32,6 +32,14 @@ For more information on about using the script, run: $ tools/testing/selftests/bpf/vmtest.sh -h +In case of linker errors when running selftests, try using static linking: + +.. code-block:: console + + $ LDLIBS=-static vmtest.sh + +.. note:: Some distros may not support static linking. + .. note:: The script uses pahole and clang based on host environment setting. If you want to change pahole and llvm, you can change `PATH` environment variable in the beginning of script. From e878ae2d1df5de4ea36e6d96c7d3ebe789aab9a5 Mon Sep 17 00:00:00 2001 From: KP Singh Date: Mon, 7 Mar 2022 13:30:48 +0000 Subject: [PATCH 021/137] bpf/docs: Update list of architectures supported. vmtest.sh also supports s390x now. Signed-off-by: KP Singh Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220307133048.1287644-2-kpsingh@kernel.org --- tools/testing/selftests/bpf/README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst index afe5ab2763bf..eb1b7541f39d 100644 --- a/tools/testing/selftests/bpf/README.rst +++ b/tools/testing/selftests/bpf/README.rst @@ -44,7 +44,7 @@ In case of linker errors when running selftests, try using static linking: If you want to change pahole and llvm, you can change `PATH` environment variable in the beginning of script. -.. note:: The script currently only supports x86_64. +.. note:: The script currently only supports x86_64 and s390x architectures. Additional information about selftest failures are documented here. From 44e9a741cad824f45112b79c21d88c201d5aec13 Mon Sep 17 00:00:00 2001 From: Shung-Hsi Yu Date: Mon, 7 Mar 2022 13:29:18 +0800 Subject: [PATCH 022/137] bpf: Determine buf_info inside check_buffer_access() Instead of determining buf_info string in the caller of check_buffer_access(), we can determine whether the register type is read-only through type_is_rdonly_mem() helper inside check_buffer_access() and construct buf_info, making the code slightly cleaner. Signed-off-by: Shung-Hsi Yu Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/YiWYLnAkEZXBP/gH@syu-laptop --- kernel/bpf/verifier.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ec3a7b6c9515..e34264200e09 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4062,9 +4062,9 @@ static int check_buffer_access(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno, int off, int size, bool zero_size_allowed, - const char *buf_info, u32 *max_access) { + const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; int err; err = __check_buffer_access(env, buf_info, reg, regno, off, size); @@ -4576,7 +4576,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn value_regno); } else if (base_type(reg->type) == PTR_TO_BUF) { bool rdonly_mem = type_is_rdonly_mem(reg->type); - const char *buf_info; u32 *max_access; if (rdonly_mem) { @@ -4585,15 +4584,13 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn regno, reg_type_str(env, reg->type)); return -EACCES; } - buf_info = "rdonly"; max_access = &env->prog->aux->max_rdonly_access; } else { - buf_info = "rdwr"; max_access = &env->prog->aux->max_rdwr_access; } err = check_buffer_access(env, reg, regno, off, size, false, - buf_info, max_access); + max_access); if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ)) mark_reg_unknown(env, regs, value_regno); @@ -4856,7 +4853,6 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; - const char *buf_info; u32 *max_access; switch (base_type(reg->type)) { @@ -4883,15 +4879,13 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, if (meta && meta->raw_mode) return -EACCES; - buf_info = "rdonly"; max_access = &env->prog->aux->max_rdonly_access; } else { - buf_info = "rdwr"; max_access = &env->prog->aux->max_rdwr_access; } return check_buffer_access(env, reg, regno, reg->off, access_size, zero_size_allowed, - buf_info, max_access); + max_access); case PTR_TO_STACK: return check_stack_range_initialized( env, From d23a8720327d33616f584d76c80824bfa4699be6 Mon Sep 17 00:00:00 2001 From: Felix Maurer Date: Thu, 3 Mar 2022 12:15:26 +0100 Subject: [PATCH 023/137] selftests/bpf: Make test_lwt_ip_encap more stable and faster In test_lwt_ip_encap, the ingress IPv6 encap test failed from time to time. The failure occured when an IPv4 ping through the IPv6 GRE encapsulation did not receive a reply within the timeout. The IPv4 ping and the IPv6 ping in the test used different timeouts (1 sec for IPv4 and 6 sec for IPv6), probably taking into account that IPv6 might need longer to successfully complete. However, when IPv4 pings (with the short timeout) are encapsulated into the IPv6 tunnel, the delays of IPv6 apply. The actual reason for the long delays with IPv6 was that the IPv6 neighbor discovery sometimes did not complete in time. This was caused by the outgoing interface only having a tentative link local address, i.e., not having completed DAD for that lladdr. The ND was successfully retried after 1 sec but that was too late for the ping timeout. The IPv6 addresses for the test were already added with nodad. However, for the lladdrs, DAD was still performed. We now disable DAD in the test netns completely and just assume that the two lladdrs on each veth pair do not collide. This removes all the delays for IPv6 traffic in the test. Without the delays, we can now also reduce the delay of the IPv6 ping to 1 sec. This makes the whole test complete faster because we don't need to wait for the excessive timeout for each IPv6 ping that is supposed to fail. Fixes: 0fde56e4385b0 ("selftests: bpf: add test_lwt_ip_encap selftest") Signed-off-by: Felix Maurer Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/4987d549d48b4e316cd5b3936de69c8d4bc75a4f.1646305899.git.fmaurer@redhat.com --- tools/testing/selftests/bpf/test_lwt_ip_encap.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh index b497bb85b667..6c69c42b1d60 100755 --- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh +++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh @@ -120,6 +120,14 @@ setup() ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0 ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0 + # disable IPv6 DAD because it sometimes takes too long and fails tests + ip netns exec ${NS1} sysctl -wq net.ipv6.conf.all.accept_dad=0 + ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.accept_dad=0 + ip netns exec ${NS3} sysctl -wq net.ipv6.conf.all.accept_dad=0 + ip netns exec ${NS1} sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip netns exec ${NS2} sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip netns exec ${NS3} sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip link add veth1 type veth peer name veth2 ip link add veth3 type veth peer name veth4 ip link add veth5 type veth peer name veth6 @@ -289,7 +297,7 @@ test_ping() ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null RET=$? elif [ "${PROTO}" == "IPv6" ] ; then - ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null + ip netns exec ${NS1} ping6 -c 1 -W 1 -I veth1 ${IPv6_DST} 2>&1 > /dev/null RET=$? else echo " test_ping: unknown PROTO: ${PROTO}" From 7fd9fd46a459272e641be78c1cc36baab1921fa1 Mon Sep 17 00:00:00 2001 From: Adrian Ratiu Date: Tue, 8 Mar 2022 14:14:28 +0200 Subject: [PATCH 024/137] tools: Fix unavoidable GCC call in Clang builds In ChromeOS and Gentoo we catch any unwanted mixed Clang/LLVM and GCC/binutils usage via toolchain wrappers which fail builds. This has revealed that GCC is called unconditionally in Clang configured builds to populate GCC_TOOLCHAIN_DIR. Allow the user to override CLANG_CROSS_FLAGS to avoid the GCC call - in our case we set the var directly in the ebuild recipe. In theory Clang could be able to autodetect these settings so this logic could be removed entirely, but in practice as the commit cebdb7374577 ("tools: Help cross-building with clang") mentions, this does not always work, so giving distributions more control to specify their flags & sysroot is beneficial. Suggested-by: Manoj Gupta Suggested-by: Nathan Chancellor Signed-off-by: Adrian Ratiu Signed-off-by: Daniel Borkmann Acked-by: Nathan Chancellor Cc: Nick Desaulniers Link: https://lore.kernel.org/lkml/87czjk4osi.fsf@ryzen9.i-did-not-set--mail-host-address--so-tickle-me Link: https://lore.kernel.org/bpf/20220308121428.81735-1-adrian.ratiu@collabora.com --- tools/scripts/Makefile.include | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index 79d102304470..a2335e402145 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -89,6 +89,9 @@ ifeq ($(CC_NO_CLANG), 1) EXTRA_WARNINGS += -Wstrict-aliasing=3 else ifneq ($(CROSS_COMPILE),) +# Allow userspace to override CLANG_CROSS_FLAGS to specify their own +# sysroots and flags or to avoid the GCC call in pure Clang builds. +ifeq ($(CLANG_CROSS_FLAGS),) CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc 2>/dev/null)) ifneq ($(GCC_TOOLCHAIN_DIR),) @@ -96,6 +99,7 @@ CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) CLANG_CROSS_FLAGS += --sysroot=$(shell $(CROSS_COMPILE)gcc -print-sysroot) CLANG_CROSS_FLAGS += --gcc-toolchain=$(realpath $(GCC_TOOLCHAIN_DIR)/..) endif # GCC_TOOLCHAIN_DIR +endif # CLANG_CROSS_FLAGS CFLAGS += $(CLANG_CROSS_FLAGS) AFLAGS += $(CLANG_CROSS_FLAGS) endif # CROSS_COMPILE From d4b540544499d90ac81695e21e354cd5c82fa67e Mon Sep 17 00:00:00 2001 From: Mykola Lysenko Date: Tue, 8 Mar 2022 12:04:47 -0800 Subject: [PATCH 025/137] Improve perf related BPF tests (sample_freq issue) Linux kernel may automatically reduce kernel.perf_event_max_sample_rate value when running tests in parallel on slow systems. Linux kernel checks against this limit when opening perf event with freq=1 parameter set. The lower bound is 1000. This patch reduces sample_freq value to 1000 in all BPF tests that use sample_freq to ensure they always can open perf event. Signed-off-by: Mykola Lysenko Signed-off-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220308200449.1757478-2-mykolal@fb.com --- tools/testing/selftests/bpf/prog_tests/bpf_cookie.c | 2 +- tools/testing/selftests/bpf/prog_tests/find_vma.c | 2 +- tools/testing/selftests/bpf/prog_tests/perf_branches.c | 4 ++-- tools/testing/selftests/bpf/prog_tests/perf_link.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index cd10df6cd0fc..0612e79a9281 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -199,7 +199,7 @@ static void pe_subtest(struct test_bpf_cookie *skel) attr.type = PERF_TYPE_SOFTWARE; attr.config = PERF_COUNT_SW_CPU_CLOCK; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); if (!ASSERT_GE(pfd, 0, "perf_fd")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c index b74b3c0c555a..743a094c9510 100644 --- a/tools/testing/selftests/bpf/prog_tests/find_vma.c +++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c @@ -30,7 +30,7 @@ static int open_pe(void) attr.type = PERF_TYPE_HARDWARE; attr.config = PERF_COUNT_HW_CPU_CYCLES; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC); return pfd >= 0 ? pfd : -errno; diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c index 12c4f45cee1a..bc24f83339d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_branches.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c @@ -110,7 +110,7 @@ static void test_perf_branches_hw(void) attr.type = PERF_TYPE_HARDWARE; attr.config = PERF_COUNT_HW_CPU_CYCLES; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; attr.sample_type = PERF_SAMPLE_BRANCH_STACK; attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); @@ -151,7 +151,7 @@ static void test_perf_branches_no_hw(void) attr.type = PERF_TYPE_SOFTWARE; attr.config = PERF_COUNT_SW_CPU_CLOCK; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c index ede07344f264..224eba6fef2e 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -39,7 +39,7 @@ void serial_test_perf_link(void) attr.type = PERF_TYPE_SOFTWARE; attr.config = PERF_COUNT_SW_CPU_CLOCK; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); if (!ASSERT_GE(pfd, 0, "perf_fd")) goto cleanup; From 1fd49864127cd0d33aea8de4cf0858344c9c7265 Mon Sep 17 00:00:00 2001 From: Mykola Lysenko Date: Tue, 8 Mar 2022 12:04:48 -0800 Subject: [PATCH 026/137] Improve send_signal BPF test stability Substitute sleep with dummy CPU intensive computation. Finish aforemention computation as soon as signal was delivered to the test process. Make the BPF code to only execute when PID global variable is set Signed-off-by: Mykola Lysenko Signed-off-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220308200449.1757478-3-mykolal@fb.com --- .../selftests/bpf/prog_tests/send_signal.c | 17 ++++++++++------- .../selftests/bpf/progs/test_send_signal_kern.c | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index 776916b61c40..def50f1c5c31 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -4,11 +4,11 @@ #include #include "test_send_signal_kern.skel.h" -int sigusr1_received = 0; +static int sigusr1_received; static void sigusr1_handler(int signum) { - sigusr1_received++; + sigusr1_received = 1; } static void test_send_signal_common(struct perf_event_attr *attr, @@ -40,9 +40,10 @@ static void test_send_signal_common(struct perf_event_attr *attr, if (pid == 0) { int old_prio; + volatile int j = 0; /* install signal handler and notify parent */ - signal(SIGUSR1, sigusr1_handler); + ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal"); close(pipe_c2p[0]); /* close read */ close(pipe_p2c[1]); /* close write */ @@ -63,9 +64,11 @@ static void test_send_signal_common(struct perf_event_attr *attr, ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read"); /* wait a little for signal handler */ - sleep(1); + for (int i = 0; i < 100000000 && !sigusr1_received; i++) + j /= i + 1; buf[0] = sigusr1_received ? '2' : '0'; + ASSERT_EQ(sigusr1_received, 1, "sigusr1_received"); ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write"); /* wait for parent notification and exit */ @@ -93,7 +96,7 @@ static void test_send_signal_common(struct perf_event_attr *attr, goto destroy_skel; } } else { - pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1, + pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */, -1 /* group id */, 0 /* flags */); if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) { err = -1; @@ -110,9 +113,9 @@ static void test_send_signal_common(struct perf_event_attr *attr, ASSERT_EQ(read(pipe_c2p[0], buf, 1), 1, "pipe_read"); /* trigger the bpf send_signal */ - skel->bss->pid = pid; - skel->bss->sig = SIGUSR1; skel->bss->signal_thread = signal_thread; + skel->bss->sig = SIGUSR1; + skel->bss->pid = pid; /* notify child that bpf program can send_signal now */ ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write"); diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c index b4233d3efac2..92354cd72044 100644 --- a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c +++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c @@ -10,7 +10,7 @@ static __always_inline int bpf_send_signal_test(void *ctx) { int ret; - if (status != 0 || sig == 0 || pid == 0) + if (status != 0 || pid == 0) return 0; if ((bpf_get_current_pid_tgid() >> 32) == pid) { From ba83af059153441d77bc2dbb4cd22421b8a34107 Mon Sep 17 00:00:00 2001 From: Mykola Lysenko Date: Tue, 8 Mar 2022 12:04:49 -0800 Subject: [PATCH 027/137] Improve stability of find_vma BPF test Remove unneeded spleep and increase length of dummy CPU intensive computation to guarantee test process execution. Also, complete aforemention computation as soon as test success criteria is met Signed-off-by: Mykola Lysenko Signed-off-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220308200449.1757478-4-mykolal@fb.com --- .../selftests/bpf/prog_tests/find_vma.c | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c index 743a094c9510..5165b38f0e59 100644 --- a/tools/testing/selftests/bpf/prog_tests/find_vma.c +++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c @@ -7,12 +7,14 @@ #include "find_vma_fail1.skel.h" #include "find_vma_fail2.skel.h" -static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret) +static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test) { - ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); - ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); - ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); - ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); + if (need_test) { + ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); + ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); + ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); + ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); + } skel->bss->found_vm_exec = 0; skel->data->find_addr_ret = -1; @@ -36,11 +38,20 @@ static int open_pe(void) return pfd >= 0 ? pfd : -errno; } +static bool find_vma_pe_condition(struct find_vma *skel) +{ + return skel->bss->found_vm_exec == 0 || + skel->data->find_addr_ret != 0 || + skel->data->find_zero_ret == -1 || + strcmp(skel->bss->d_iname, "test_progs") != 0; +} + static void test_find_vma_pe(struct find_vma *skel) { struct bpf_link *link = NULL; volatile int j = 0; int pfd, i; + const int one_bn = 1000000000; pfd = open_pe(); if (pfd < 0) { @@ -57,10 +68,10 @@ static void test_find_vma_pe(struct find_vma *skel) if (!ASSERT_OK_PTR(link, "attach_perf_event")) goto cleanup; - for (i = 0; i < 1000000; ++i) + for (i = 0; i < one_bn && find_vma_pe_condition(skel); ++i) ++j; - test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */); + test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */, i == one_bn); cleanup: bpf_link__destroy(link); close(pfd); @@ -75,7 +86,7 @@ static void test_find_vma_kprobe(struct find_vma *skel) return; getpgid(skel->bss->target_pid); - test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */); + test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */, true); } static void test_illegal_write_vma(void) @@ -108,7 +119,6 @@ void serial_test_find_vma(void) skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe; test_find_vma_pe(skel); - usleep(100000); /* allow the irq_work to finish */ test_find_vma_kprobe(skel); find_vma__destroy(skel); From b530e9e1063ed2b817eae7eec6ed2daa8be11608 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Wed, 9 Mar 2022 11:53:42 +0100 Subject: [PATCH 028/137] bpf: Add "live packet" mode for XDP in BPF_PROG_RUN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for running XDP programs through BPF_PROG_RUN in a mode that enables live packet processing of the resulting frames. Previous uses of BPF_PROG_RUN for XDP returned the XDP program return code and the modified packet data to userspace, which is useful for unit testing of XDP programs. The existing BPF_PROG_RUN for XDP allows userspace to set the ingress ifindex and RXQ number as part of the context object being passed to the kernel. This patch reuses that code, but adds a new mode with different semantics, which can be selected with the new BPF_F_TEST_XDP_LIVE_FRAMES flag. When running BPF_PROG_RUN in this mode, the XDP program return codes will be honoured: returning XDP_PASS will result in the frame being injected into the networking stack as if it came from the selected networking interface, while returning XDP_TX and XDP_REDIRECT will result in the frame being transmitted out that interface. XDP_TX is translated into an XDP_REDIRECT operation to the same interface, since the real XDP_TX action is only possible from within the network drivers themselves, not from the process context where BPF_PROG_RUN is executed. Internally, this new mode of operation creates a page pool instance while setting up the test run, and feeds pages from that into the XDP program. The setup cost of this is amortised over the number of repetitions specified by userspace. To support the performance testing use case, we further optimise the setup step so that all pages in the pool are pre-initialised with the packet data, and pre-computed context and xdp_frame objects stored at the start of each page. This makes it possible to entirely avoid touching the page content on each XDP program invocation, and enables sending up to 9 Mpps/core on my test box. Because the data pages are recycled by the page pool, and the test runner doesn't re-initialise them for each run, subsequent invocations of the XDP program will see the packet data in the state it was after the last time it ran on that particular page. This means that an XDP program that modifies the packet before redirecting it has to be careful about which assumptions it makes about the packet content, but that is only an issue for the most naively written programs. Enabling the new flag is only allowed when not setting ctx_out and data_out in the test specification, since using it means frames will be redirected somewhere else, so they can't be returned. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220309105346.100053-2-toke@redhat.com --- include/uapi/linux/bpf.h | 3 + kernel/bpf/Kconfig | 1 + kernel/bpf/syscall.c | 2 +- net/bpf/test_run.c | 334 +++++++++++++++++++++++++++++++-- tools/include/uapi/linux/bpf.h | 3 + 5 files changed, 328 insertions(+), 15 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4eebea830613..bc23020b638d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1232,6 +1232,8 @@ enum { /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ #define BPF_F_TEST_RUN_ON_CPU (1U << 0) +/* If set, XDP frames will be transmitted after processing */ +#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { @@ -1393,6 +1395,7 @@ union bpf_attr { __aligned_u64 ctx_out; __u32 flags; __u32 cpu; + __u32 batch_size; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig index c3cf0b86eeb2..d56ee177d5f8 100644 --- a/kernel/bpf/Kconfig +++ b/kernel/bpf/Kconfig @@ -30,6 +30,7 @@ config BPF_SYSCALL select TASKS_TRACE_RCU select BINARY_PRINTF select NET_SOCK_MSG if NET + select PAGE_POOL if NET default n help Enable the bpf() system call that allows to manipulate BPF programs diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index db402ebc5570..9beb585be5a6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3336,7 +3336,7 @@ static int bpf_prog_query(const union bpf_attr *attr, } } -#define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu +#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size static int bpf_prog_test_run(const union bpf_attr *attr, union bpf_attr __user *uattr) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index ba410b069824..25169908be4a 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -53,10 +54,11 @@ static void bpf_test_timer_leave(struct bpf_test_timer *t) rcu_read_unlock(); } -static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration) +static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations, + u32 repeat, int *err, u32 *duration) __must_hold(rcu) { - t->i++; + t->i += iterations; if (t->i >= repeat) { /* We're done. */ t->time_spent += ktime_get_ns() - t->time_start; @@ -88,6 +90,286 @@ reset: return false; } +/* We put this struct at the head of each page with a context and frame + * initialised when the page is allocated, so we don't have to do this on each + * repetition of the test run. + */ +struct xdp_page_head { + struct xdp_buff orig_ctx; + struct xdp_buff ctx; + struct xdp_frame frm; + u8 data[]; +}; + +struct xdp_test_data { + struct xdp_buff *orig_ctx; + struct xdp_rxq_info rxq; + struct net_device *dev; + struct page_pool *pp; + struct xdp_frame **frames; + struct sk_buff **skbs; + u32 batch_size; + u32 frame_cnt; +}; + +#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head) \ + - sizeof(struct skb_shared_info)) +#define TEST_XDP_MAX_BATCH 256 + +static void xdp_test_run_init_page(struct page *page, void *arg) +{ + struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); + struct xdp_buff *new_ctx, *orig_ctx; + u32 headroom = XDP_PACKET_HEADROOM; + struct xdp_test_data *xdp = arg; + size_t frm_len, meta_len; + struct xdp_frame *frm; + void *data; + + orig_ctx = xdp->orig_ctx; + frm_len = orig_ctx->data_end - orig_ctx->data_meta; + meta_len = orig_ctx->data - orig_ctx->data_meta; + headroom -= meta_len; + + new_ctx = &head->ctx; + frm = &head->frm; + data = &head->data; + memcpy(data + headroom, orig_ctx->data_meta, frm_len); + + xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); + xdp_prepare_buff(new_ctx, data, headroom, frm_len, true); + new_ctx->data = new_ctx->data_meta + meta_len; + + xdp_update_frame_from_buff(new_ctx, frm); + frm->mem = new_ctx->rxq->mem; + + memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx)); +} + +static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) +{ + struct xdp_mem_info mem = {}; + struct page_pool *pp; + int err = -ENOMEM; + struct page_pool_params pp_params = { + .order = 0, + .flags = 0, + .pool_size = xdp->batch_size, + .nid = NUMA_NO_NODE, + .max_len = TEST_XDP_FRAME_SIZE, + .init_callback = xdp_test_run_init_page, + .init_arg = xdp, + }; + + xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); + if (!xdp->frames) + return -ENOMEM; + + xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); + if (!xdp->skbs) + goto err_skbs; + + pp = page_pool_create(&pp_params); + if (IS_ERR(pp)) { + err = PTR_ERR(pp); + goto err_pp; + } + + /* will copy 'mem.id' into pp->xdp_mem_id */ + err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp); + if (err) + goto err_mmodel; + + xdp->pp = pp; + + /* We create a 'fake' RXQ referencing the original dev, but with an + * xdp_mem_info pointing to our page_pool + */ + xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0); + xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL; + xdp->rxq.mem.id = pp->xdp_mem_id; + xdp->dev = orig_ctx->rxq->dev; + xdp->orig_ctx = orig_ctx; + + return 0; + +err_mmodel: + page_pool_destroy(pp); +err_pp: + kfree(xdp->skbs); +err_skbs: + kfree(xdp->frames); + return err; +} + +static void xdp_test_run_teardown(struct xdp_test_data *xdp) +{ + page_pool_destroy(xdp->pp); + kfree(xdp->frames); + kfree(xdp->skbs); +} + +static bool ctx_was_changed(struct xdp_page_head *head) +{ + return head->orig_ctx.data != head->ctx.data || + head->orig_ctx.data_meta != head->ctx.data_meta || + head->orig_ctx.data_end != head->ctx.data_end; +} + +static void reset_ctx(struct xdp_page_head *head) +{ + if (likely(!ctx_was_changed(head))) + return; + + head->ctx.data = head->orig_ctx.data; + head->ctx.data_meta = head->orig_ctx.data_meta; + head->ctx.data_end = head->orig_ctx.data_end; + xdp_update_frame_from_buff(&head->ctx, &head->frm); +} + +static int xdp_recv_frames(struct xdp_frame **frames, int nframes, + struct sk_buff **skbs, + struct net_device *dev) +{ + gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; + int i, n; + LIST_HEAD(list); + + n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs); + if (unlikely(n == 0)) { + for (i = 0; i < nframes; i++) + xdp_return_frame(frames[i]); + return -ENOMEM; + } + + for (i = 0; i < nframes; i++) { + struct xdp_frame *xdpf = frames[i]; + struct sk_buff *skb = skbs[i]; + + skb = __xdp_build_skb_from_frame(xdpf, skb, dev); + if (!skb) { + xdp_return_frame(xdpf); + continue; + } + + list_add_tail(&skb->list, &list); + } + netif_receive_skb_list(&list); + + return 0; +} + +static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, + u32 repeat) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + int err = 0, act, ret, i, nframes = 0, batch_sz; + struct xdp_frame **frames = xdp->frames; + struct xdp_page_head *head; + struct xdp_frame *frm; + bool redirect = false; + struct xdp_buff *ctx; + struct page *page; + + batch_sz = min_t(u32, repeat, xdp->batch_size); + + local_bh_disable(); + xdp_set_return_frame_no_direct(); + + for (i = 0; i < batch_sz; i++) { + page = page_pool_dev_alloc_pages(xdp->pp); + if (!page) { + err = -ENOMEM; + goto out; + } + + head = phys_to_virt(page_to_phys(page)); + reset_ctx(head); + ctx = &head->ctx; + frm = &head->frm; + xdp->frame_cnt++; + + act = bpf_prog_run_xdp(prog, ctx); + + /* if program changed pkt bounds we need to update the xdp_frame */ + if (unlikely(ctx_was_changed(head))) { + ret = xdp_update_frame_from_buff(ctx, frm); + if (ret) { + xdp_return_buff(ctx); + continue; + } + } + + switch (act) { + case XDP_TX: + /* we can't do a real XDP_TX since we're not in the + * driver, so turn it into a REDIRECT back to the same + * index + */ + ri->tgt_index = xdp->dev->ifindex; + ri->map_id = INT_MAX; + ri->map_type = BPF_MAP_TYPE_UNSPEC; + fallthrough; + case XDP_REDIRECT: + redirect = true; + ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); + if (ret) + xdp_return_buff(ctx); + break; + case XDP_PASS: + frames[nframes++] = frm; + break; + default: + bpf_warn_invalid_xdp_action(NULL, prog, act); + fallthrough; + case XDP_DROP: + xdp_return_buff(ctx); + break; + } + } + +out: + if (redirect) + xdp_do_flush(); + if (nframes) { + ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); + if (ret) + err = ret; + } + + xdp_clear_return_frame_no_direct(); + local_bh_enable(); + return err; +} + +static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, + u32 repeat, u32 batch_size, u32 *time) + +{ + struct xdp_test_data xdp = { .batch_size = batch_size }; + struct bpf_test_timer t = { .mode = NO_MIGRATE }; + int ret; + + if (!repeat) + repeat = 1; + + ret = xdp_test_run_setup(&xdp, ctx); + if (ret) + return ret; + + bpf_test_timer_enter(&t); + do { + xdp.frame_cnt = 0; + ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); + if (unlikely(ret < 0)) + break; + } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); + bpf_test_timer_leave(&t); + + xdp_test_run_teardown(&xdp); + return ret; +} + static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *retval, u32 *time, bool xdp) { @@ -119,7 +401,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, *retval = bpf_prog_run_xdp(prog, ctx); else *retval = bpf_prog_run(prog, ctx); - } while (bpf_test_timer_continue(&t, repeat, &ret, time)); + } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); bpf_reset_run_ctx(old_ctx); bpf_test_timer_leave(&t); @@ -446,7 +728,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, int b = 2, err = -EFAULT; u32 retval = 0; - if (kattr->test.flags || kattr->test.cpu) + if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) return -EINVAL; switch (prog->expected_attach_type) { @@ -510,7 +792,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, /* doesn't support data_in/out, ctx_out, duration, or repeat */ if (kattr->test.data_in || kattr->test.data_out || kattr->test.ctx_out || kattr->test.duration || - kattr->test.repeat) + kattr->test.repeat || kattr->test.batch_size) return -EINVAL; if (ctx_size_in < prog->aux->max_ctx_offset || @@ -741,7 +1023,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, void *data; int ret; - if (kattr->test.flags || kattr->test.cpu) + if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) return -EINVAL; data = bpf_test_init(kattr, kattr->test.data_size_in, @@ -922,7 +1204,9 @@ static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { + bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + u32 batch_size = kattr->test.batch_size; u32 size = kattr->test.data_size_in; u32 headroom = XDP_PACKET_HEADROOM; u32 retval, duration, max_data_sz; @@ -938,6 +1222,18 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, prog->expected_attach_type == BPF_XDP_CPUMAP) return -EINVAL; + if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) + return -EINVAL; + + if (do_live) { + if (!batch_size) + batch_size = NAPI_POLL_WEIGHT; + else if (batch_size > TEST_XDP_MAX_BATCH) + return -E2BIG; + } else if (batch_size) { + return -EINVAL; + } + ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -946,14 +1242,20 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, /* There can't be user provided data before the meta data */ if (ctx->data_meta || ctx->data_end != size || ctx->data > ctx->data_end || - unlikely(xdp_metalen_invalid(ctx->data))) + unlikely(xdp_metalen_invalid(ctx->data)) || + (do_live && (kattr->test.data_out || kattr->test.ctx_out))) goto free_ctx; /* Meta data is allocated from the headroom */ headroom -= ctx->data; } max_data_sz = 4096 - headroom - tailroom; - size = min_t(u32, size, max_data_sz); + if (size > max_data_sz) { + /* disallow live data mode for jumbo frames */ + if (do_live) + goto free_ctx; + size = max_data_sz; + } data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); if (IS_ERR(data)) { @@ -1011,7 +1313,10 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, if (repeat > 1) bpf_prog_change_xdp(NULL, prog); - ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); + if (do_live) + ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); + else + ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); /* We convert the xdp_buff back to an xdp_md before checking the return * code so the reference count of any held netdevice will be decremented * even if the test run failed. @@ -1073,7 +1378,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) return -EINVAL; - if (kattr->test.flags || kattr->test.cpu) + if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) return -EINVAL; if (size < ETH_HLEN) @@ -1108,7 +1413,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, do { retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, size, flags); - } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); + } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); bpf_test_timer_leave(&t); if (ret < 0) @@ -1140,7 +1445,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat if (prog->type != BPF_PROG_TYPE_SK_LOOKUP) return -EINVAL; - if (kattr->test.flags || kattr->test.cpu) + if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) return -EINVAL; if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || @@ -1203,7 +1508,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat do { ctx.selected_sk = NULL; retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run); - } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); + } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); bpf_test_timer_leave(&t); if (ret < 0) @@ -1242,7 +1547,8 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ if (kattr->test.data_in || kattr->test.data_out || kattr->test.ctx_out || kattr->test.duration || - kattr->test.repeat || kattr->test.flags) + kattr->test.repeat || kattr->test.flags || + kattr->test.batch_size) return -EINVAL; if (ctx_size_in < prog->aux->max_ctx_offset || diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4eebea830613..bc23020b638d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1232,6 +1232,8 @@ enum { /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ #define BPF_F_TEST_RUN_ON_CPU (1U << 0) +/* If set, XDP frames will be transmitted after processing */ +#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { @@ -1393,6 +1395,7 @@ union bpf_attr { __aligned_u64 ctx_out; __u32 flags; __u32 cpu; + __u32 batch_size; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ From 1a7551f15097dff30cf0853117b2f8077bb84f0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Wed, 9 Mar 2022 11:53:43 +0100 Subject: [PATCH 029/137] Documentation/bpf: Add documentation for BPF_PROG_RUN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds documentation for the BPF_PROG_RUN command; a short overview of the command itself, and a more verbose description of the "live packet" mode for XDP introduced in the previous commit. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220309105346.100053-3-toke@redhat.com --- Documentation/bpf/bpf_prog_run.rst | 117 +++++++++++++++++++++++++++++ Documentation/bpf/index.rst | 1 + 2 files changed, 118 insertions(+) create mode 100644 Documentation/bpf/bpf_prog_run.rst diff --git a/Documentation/bpf/bpf_prog_run.rst b/Documentation/bpf/bpf_prog_run.rst new file mode 100644 index 000000000000..4868c909df5c --- /dev/null +++ b/Documentation/bpf/bpf_prog_run.rst @@ -0,0 +1,117 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================== +Running BPF programs from userspace +=================================== + +This document describes the ``BPF_PROG_RUN`` facility for running BPF programs +from userspace. + +.. contents:: + :local: + :depth: 2 + + +Overview +-------- + +The ``BPF_PROG_RUN`` command can be used through the ``bpf()`` syscall to +execute a BPF program in the kernel and return the results to userspace. This +can be used to unit test BPF programs against user-supplied context objects, and +as way to explicitly execute programs in the kernel for their side effects. The +command was previously named ``BPF_PROG_TEST_RUN``, and both constants continue +to be defined in the UAPI header, aliased to the same value. + +The ``BPF_PROG_RUN`` command can be used to execute BPF programs of the +following types: + +- ``BPF_PROG_TYPE_SOCKET_FILTER`` +- ``BPF_PROG_TYPE_SCHED_CLS`` +- ``BPF_PROG_TYPE_SCHED_ACT`` +- ``BPF_PROG_TYPE_XDP`` +- ``BPF_PROG_TYPE_SK_LOOKUP`` +- ``BPF_PROG_TYPE_CGROUP_SKB`` +- ``BPF_PROG_TYPE_LWT_IN`` +- ``BPF_PROG_TYPE_LWT_OUT`` +- ``BPF_PROG_TYPE_LWT_XMIT`` +- ``BPF_PROG_TYPE_LWT_SEG6LOCAL`` +- ``BPF_PROG_TYPE_FLOW_DISSECTOR`` +- ``BPF_PROG_TYPE_STRUCT_OPS`` +- ``BPF_PROG_TYPE_RAW_TRACEPOINT`` +- ``BPF_PROG_TYPE_SYSCALL`` + +When using the ``BPF_PROG_RUN`` command, userspace supplies an input context +object and (for program types operating on network packets) a buffer containing +the packet data that the BPF program will operate on. The kernel will then +execute the program and return the results to userspace. Note that programs will +not have any side effects while being run in this mode; in particular, packets +will not actually be redirected or dropped, the program return code will just be +returned to userspace. A separate mode for live execution of XDP programs is +provided, documented separately below. + +Running XDP programs in "live frame mode" +----------------------------------------- + +The ``BPF_PROG_RUN`` command has a separate mode for running live XDP programs, +which can be used to execute XDP programs in a way where packets will actually +be processed by the kernel after the execution of the XDP program as if they +arrived on a physical interface. This mode is activated by setting the +``BPF_F_TEST_XDP_LIVE_FRAMES`` flag when supplying an XDP program to +``BPF_PROG_RUN``. + +The live packet mode is optimised for high performance execution of the supplied +XDP program many times (suitable for, e.g., running as a traffic generator), +which means the semantics are not quite as straight-forward as the regular test +run mode. Specifically: + +- When executing an XDP program in live frame mode, the result of the execution + will not be returned to userspace; instead, the kernel will perform the + operation indicated by the program's return code (drop the packet, redirect + it, etc). For this reason, setting the ``data_out`` or ``ctx_out`` attributes + in the syscall parameters when running in this mode will be rejected. In + addition, not all failures will be reported back to userspace directly; + specifically, only fatal errors in setup or during execution (like memory + allocation errors) will halt execution and return an error. If an error occurs + in packet processing, like a failure to redirect to a given interface, + execution will continue with the next repetition; these errors can be detected + via the same trace points as for regular XDP programs. + +- Userspace can supply an ifindex as part of the context object, just like in + the regular (non-live) mode. The XDP program will be executed as though the + packet arrived on this interface; i.e., the ``ingress_ifindex`` of the context + object will point to that interface. Furthermore, if the XDP program returns + ``XDP_PASS``, the packet will be injected into the kernel networking stack as + though it arrived on that ifindex, and if it returns ``XDP_TX``, the packet + will be transmitted *out* of that same interface. Do note, though, that + because the program execution is not happening in driver context, an + ``XDP_TX`` is actually turned into the same action as an ``XDP_REDIRECT`` to + that same interface (i.e., it will only work if the driver has support for the + ``ndo_xdp_xmit`` driver op). + +- When running the program with multiple repetitions, the execution will happen + in batches. The batch size defaults to 64 packets (which is same as the + maximum NAPI receive batch size), but can be specified by userspace through + the ``batch_size`` parameter, up to a maximum of 256 packets. For each batch, + the kernel executes the XDP program repeatedly, each invocation getting a + separate copy of the packet data. For each repetition, if the program drops + the packet, the data page is immediately recycled (see below). Otherwise, the + packet is buffered until the end of the batch, at which point all packets + buffered this way during the batch are transmitted at once. + +- When setting up the test run, the kernel will initialise a pool of memory + pages of the same size as the batch size. Each memory page will be initialised + with the initial packet data supplied by userspace at ``BPF_PROG_RUN`` + invocation. When possible, the pages will be recycled on future program + invocations, to improve performance. Pages will generally be recycled a full + batch at a time, except when a packet is dropped (by return code or because + of, say, a redirection error), in which case that page will be recycled + immediately. If a packet ends up being passed to the regular networking stack + (because the XDP program returns ``XDP_PASS``, or because it ends up being + redirected to an interface that injects it into the stack), the page will be + released and a new one will be allocated when the pool is empty. + + When recycling, the page content is not rewritten; only the packet boundary + pointers (``data``, ``data_end`` and ``data_meta``) in the context object will + be reset to the original values. This means that if a program rewrites the + packet contents, it has to be prepared to see either the original content or + the modified version on subsequent invocations. diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index ef5c996547ec..96056a7447c7 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -21,6 +21,7 @@ that goes into great technical depth about the BPF Architecture. helpers programs maps + bpf_prog_run classic_vs_extended.rst bpf_licensing test_debug From 24592ad1ab18416a850f03d46d07ae483f808895 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Wed, 9 Mar 2022 11:53:44 +0100 Subject: [PATCH 030/137] libbpf: Support batch_size option to bpf_prog_test_run MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for setting the new batch_size parameter to BPF_PROG_TEST_RUN to libbpf; just add it as an option and pass it through to the kernel. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220309105346.100053-4-toke@redhat.com --- tools/lib/bpf/bpf.c | 1 + tools/lib/bpf/bpf.h | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 3c7c180294fa..f69ce3a01385 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -995,6 +995,7 @@ int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) memset(&attr, 0, sizeof(attr)); attr.test.prog_fd = prog_fd; + attr.test.batch_size = OPTS_GET(opts, batch_size, 0); attr.test.cpu = OPTS_GET(opts, cpu, 0); attr.test.flags = OPTS_GET(opts, flags, 0); attr.test.repeat = OPTS_GET(opts, repeat, 0); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 16b21757b8bf..5253cb4a4c0a 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -512,8 +512,9 @@ struct bpf_test_run_opts { __u32 duration; /* out: average per repetition in ns */ __u32 flags; __u32 cpu; + __u32 batch_size; }; -#define bpf_test_run_opts__last_field cpu +#define bpf_test_run_opts__last_field batch_size LIBBPF_API int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts); From a30338840fa5c6e400673b7d8a31323280bfd521 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Wed, 9 Mar 2022 11:53:45 +0100 Subject: [PATCH 031/137] selftests/bpf: Move open_netns() and close_netns() into network_helpers.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These will also be used by the xdp_do_redirect test being added in the next commit. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220309105346.100053-5-toke@redhat.com --- tools/testing/selftests/bpf/network_helpers.c | 86 ++++++++++++++++++ tools/testing/selftests/bpf/network_helpers.h | 9 ++ .../selftests/bpf/prog_tests/tc_redirect.c | 89 ------------------- 3 files changed, 95 insertions(+), 89 deletions(-) diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 6db1af8fdee7..2bb1f9b3841d 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -1,18 +1,25 @@ // SPDX-License-Identifier: GPL-2.0-only +#define _GNU_SOURCE + #include #include #include #include #include +#include #include +#include +#include #include #include #include +#include #include "bpf_util.h" #include "network_helpers.h" +#include "test_progs.h" #define clean_errno() (errno == 0 ? "None" : strerror(errno)) #define log_err(MSG, ...) ({ \ @@ -356,3 +363,82 @@ char *ping_command(int family) } return "ping"; } + +struct nstoken { + int orig_netns_fd; +}; + +static int setns_by_fd(int nsfd) +{ + int err; + + err = setns(nsfd, CLONE_NEWNET); + close(nsfd); + + if (!ASSERT_OK(err, "setns")) + return err; + + /* Switch /sys to the new namespace so that e.g. /sys/class/net + * reflects the devices in the new namespace. + */ + err = unshare(CLONE_NEWNS); + if (!ASSERT_OK(err, "unshare")) + return err; + + /* Make our /sys mount private, so the following umount won't + * trigger the global umount in case it's shared. + */ + err = mount("none", "/sys", NULL, MS_PRIVATE, NULL); + if (!ASSERT_OK(err, "remount private /sys")) + return err; + + err = umount2("/sys", MNT_DETACH); + if (!ASSERT_OK(err, "umount2 /sys")) + return err; + + err = mount("sysfs", "/sys", "sysfs", 0, NULL); + if (!ASSERT_OK(err, "mount /sys")) + return err; + + err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL); + if (!ASSERT_OK(err, "mount /sys/fs/bpf")) + return err; + + return 0; +} + +struct nstoken *open_netns(const char *name) +{ + int nsfd; + char nspath[PATH_MAX]; + int err; + struct nstoken *token; + + token = malloc(sizeof(struct nstoken)); + if (!ASSERT_OK_PTR(token, "malloc token")) + return NULL; + + token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY); + if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net")) + goto fail; + + snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name); + nsfd = open(nspath, O_RDONLY | O_CLOEXEC); + if (!ASSERT_GE(nsfd, 0, "open netns fd")) + goto fail; + + err = setns_by_fd(nsfd); + if (!ASSERT_OK(err, "setns_by_fd")) + goto fail; + + return token; +fail: + free(token); + return NULL; +} + +void close_netns(struct nstoken *token) +{ + ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd"); + free(token); +} diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index d198181a5648..a4b3b2f9877b 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -55,4 +55,13 @@ int make_sockaddr(int family, const char *addr_str, __u16 port, struct sockaddr_storage *addr, socklen_t *len); char *ping_command(int family); +struct nstoken; +/** + * open_netns() - Switch to specified network namespace by name. + * + * Returns token with which to restore the original namespace + * using close_netns(). + */ +struct nstoken *open_netns(const char *name); +void close_netns(struct nstoken *token); #endif diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 2b255e28ed26..7ad66a247c02 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -10,8 +10,6 @@ * to drop unexpected traffic. */ -#define _GNU_SOURCE - #include #include #include @@ -19,10 +17,8 @@ #include #include #include -#include #include #include -#include #include #include @@ -92,91 +88,6 @@ static int write_file(const char *path, const char *newval) return 0; } -struct nstoken { - int orig_netns_fd; -}; - -static int setns_by_fd(int nsfd) -{ - int err; - - err = setns(nsfd, CLONE_NEWNET); - close(nsfd); - - if (!ASSERT_OK(err, "setns")) - return err; - - /* Switch /sys to the new namespace so that e.g. /sys/class/net - * reflects the devices in the new namespace. - */ - err = unshare(CLONE_NEWNS); - if (!ASSERT_OK(err, "unshare")) - return err; - - /* Make our /sys mount private, so the following umount won't - * trigger the global umount in case it's shared. - */ - err = mount("none", "/sys", NULL, MS_PRIVATE, NULL); - if (!ASSERT_OK(err, "remount private /sys")) - return err; - - err = umount2("/sys", MNT_DETACH); - if (!ASSERT_OK(err, "umount2 /sys")) - return err; - - err = mount("sysfs", "/sys", "sysfs", 0, NULL); - if (!ASSERT_OK(err, "mount /sys")) - return err; - - err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL); - if (!ASSERT_OK(err, "mount /sys/fs/bpf")) - return err; - - return 0; -} - -/** - * open_netns() - Switch to specified network namespace by name. - * - * Returns token with which to restore the original namespace - * using close_netns(). - */ -static struct nstoken *open_netns(const char *name) -{ - int nsfd; - char nspath[PATH_MAX]; - int err; - struct nstoken *token; - - token = calloc(1, sizeof(struct nstoken)); - if (!ASSERT_OK_PTR(token, "malloc token")) - return NULL; - - token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY); - if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net")) - goto fail; - - snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name); - nsfd = open(nspath, O_RDONLY | O_CLOEXEC); - if (!ASSERT_GE(nsfd, 0, "open netns fd")) - goto fail; - - err = setns_by_fd(nsfd); - if (!ASSERT_OK(err, "setns_by_fd")) - goto fail; - - return token; -fail: - free(token); - return NULL; -} - -static void close_netns(struct nstoken *token) -{ - ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd"); - free(token); -} - static int netns_setup_namespaces(const char *verb) { const char * const *ns = namespaces; From 55fcacca36468801e62e1d9cc81e5870eea620ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Wed, 9 Mar 2022 11:53:46 +0100 Subject: [PATCH 032/137] selftests/bpf: Add selftest for XDP_REDIRECT in BPF_PROG_RUN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a selftest for the XDP_REDIRECT facility in BPF_PROG_RUN, that redirects packets into a veth and counts them using an XDP program on the other side of the veth pair and a TC program on the local side of the veth. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220309105346.100053-6-toke@redhat.com --- .../bpf/prog_tests/xdp_do_redirect.c | 177 ++++++++++++++++++ .../bpf/progs/test_xdp_do_redirect.c | 100 ++++++++++ 2 files changed, 277 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c create mode 100644 tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c new file mode 100644 index 000000000000..9926b07e38c8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "test_xdp_do_redirect.skel.h" + +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto out; \ + }) + +struct udp_packet { + struct ethhdr eth; + struct ipv6hdr iph; + struct udphdr udp; + __u8 payload[64 - sizeof(struct udphdr) + - sizeof(struct ethhdr) - sizeof(struct ipv6hdr)]; +} __packed; + +static struct udp_packet pkt_udp = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .eth.h_dest = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}, + .eth.h_source = {0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb}, + .iph.version = 6, + .iph.nexthdr = IPPROTO_UDP, + .iph.payload_len = bpf_htons(sizeof(struct udp_packet) + - offsetof(struct udp_packet, udp)), + .iph.hop_limit = 2, + .iph.saddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(1)}, + .iph.daddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(2)}, + .udp.source = bpf_htons(1), + .udp.dest = bpf_htons(1), + .udp.len = bpf_htons(sizeof(struct udp_packet) + - offsetof(struct udp_packet, udp)), + .payload = {0x42}, /* receiver XDP program matches on this */ +}; + +static int attach_tc_prog(struct bpf_tc_hook *hook, int fd) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd); + int ret; + + ret = bpf_tc_hook_create(hook); + if (!ASSERT_OK(ret, "create tc hook")) + return ret; + + ret = bpf_tc_attach(hook, &opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(hook); + return ret; + } + + return 0; +} + +#define NUM_PKTS 10000 +void test_xdp_do_redirect(void) +{ + int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst; + char data[sizeof(pkt_udp) + sizeof(__u32)]; + struct test_xdp_do_redirect *skel = NULL; + struct nstoken *nstoken = NULL; + struct bpf_link *link; + + struct xdp_md ctx_in = { .data = sizeof(__u32), + .data_end = sizeof(data) }; + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &data, + .data_size_in = sizeof(data), + .ctx_in = &ctx_in, + .ctx_size_in = sizeof(ctx_in), + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, + .repeat = NUM_PKTS, + .batch_size = 64, + ); + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + memcpy(&data[sizeof(__u32)], &pkt_udp, sizeof(pkt_udp)); + *((__u32 *)data) = 0x42; /* metadata test value */ + + skel = test_xdp_do_redirect__open(); + if (!ASSERT_OK_PTR(skel, "skel")) + return; + + /* The XDP program we run with bpf_prog_run() will cycle through all + * three xmit (PASS/TX/REDIRECT) return codes starting from above, and + * ending up with PASS, so we should end up with two packets on the dst + * iface and NUM_PKTS-2 in the TC hook. We match the packets on the UDP + * payload. + */ + SYS("ip netns add testns"); + nstoken = open_netns("testns"); + if (!ASSERT_OK_PTR(nstoken, "setns")) + goto out; + + SYS("ip link add veth_src type veth peer name veth_dst"); + SYS("ip link set dev veth_src address 00:11:22:33:44:55"); + SYS("ip link set dev veth_dst address 66:77:88:99:aa:bb"); + SYS("ip link set dev veth_src up"); + SYS("ip link set dev veth_dst up"); + SYS("ip addr add dev veth_src fc00::1/64"); + SYS("ip addr add dev veth_dst fc00::2/64"); + SYS("ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb"); + + /* We enable forwarding in the test namespace because that will cause + * the packets that go through the kernel stack (with XDP_PASS) to be + * forwarded back out the same interface (because of the packet dst + * combined with the interface addresses). When this happens, the + * regular forwarding path will end up going through the same + * veth_xdp_xmit() call as the XDP_REDIRECT code, which can cause a + * deadlock if it happens on the same CPU. There's a local_bh_disable() + * in the test_run code to prevent this, but an earlier version of the + * code didn't have this, so we keep the test behaviour to make sure the + * bug doesn't resurface. + */ + SYS("sysctl -qw net.ipv6.conf.all.forwarding=1"); + + ifindex_src = if_nametoindex("veth_src"); + ifindex_dst = if_nametoindex("veth_dst"); + if (!ASSERT_NEQ(ifindex_src, 0, "ifindex_src") || + !ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst")) + goto out; + + memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN); + skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */ + skel->rodata->ifindex_in = ifindex_src; + ctx_in.ingress_ifindex = ifindex_src; + tc_hook.ifindex = ifindex_src; + + if (!ASSERT_OK(test_xdp_do_redirect__load(skel), "load")) + goto out; + + link = bpf_program__attach_xdp(skel->progs.xdp_count_pkts, ifindex_dst); + if (!ASSERT_OK_PTR(link, "prog_attach")) + goto out; + skel->links.xdp_count_pkts = link; + + tc_prog_fd = bpf_program__fd(skel->progs.tc_count_pkts); + if (attach_tc_prog(&tc_hook, tc_prog_fd)) + goto out; + + xdp_prog_fd = bpf_program__fd(skel->progs.xdp_redirect); + err = bpf_prog_test_run_opts(xdp_prog_fd, &opts); + if (!ASSERT_OK(err, "prog_run")) + goto out_tc; + + /* wait for the packets to be flushed */ + kern_sync_rcu(); + + /* There will be one packet sent through XDP_REDIRECT and one through + * XDP_TX; these will show up on the XDP counting program, while the + * rest will be counted at the TC ingress hook (and the counting program + * resets the packet payload so they don't get counted twice even though + * they are re-xmited out the veth device + */ + ASSERT_EQ(skel->bss->pkts_seen_xdp, 2, "pkt_count_xdp"); + ASSERT_EQ(skel->bss->pkts_seen_zero, 2, "pkt_count_zero"); + ASSERT_EQ(skel->bss->pkts_seen_tc, NUM_PKTS - 2, "pkt_count_tc"); + +out_tc: + bpf_tc_hook_destroy(&tc_hook); +out: + if (nstoken) + close_netns(nstoken); + system("ip netns del testns"); + test_xdp_do_redirect__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c new file mode 100644 index 000000000000..77a123071940 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#define ETH_ALEN 6 +#define HDR_SZ (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + sizeof(struct udphdr)) +const volatile int ifindex_out; +const volatile int ifindex_in; +const volatile __u8 expect_dst[ETH_ALEN]; +volatile int pkts_seen_xdp = 0; +volatile int pkts_seen_zero = 0; +volatile int pkts_seen_tc = 0; +volatile int retcode = XDP_REDIRECT; + +SEC("xdp") +int xdp_redirect(struct xdp_md *xdp) +{ + __u32 *metadata = (void *)(long)xdp->data_meta; + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + + __u8 *payload = data + HDR_SZ; + int ret = retcode; + + if (payload + 1 > data_end) + return XDP_ABORTED; + + if (xdp->ingress_ifindex != ifindex_in) + return XDP_ABORTED; + + if (metadata + 1 > data) + return XDP_ABORTED; + + if (*metadata != 0x42) + return XDP_ABORTED; + + if (*payload == 0) { + *payload = 0x42; + pkts_seen_zero++; + } + + if (bpf_xdp_adjust_meta(xdp, 4)) + return XDP_ABORTED; + + if (retcode > XDP_PASS) + retcode--; + + if (ret == XDP_REDIRECT) + return bpf_redirect(ifindex_out, 0); + + return ret; +} + +static bool check_pkt(void *data, void *data_end) +{ + struct ipv6hdr *iph = data + sizeof(struct ethhdr); + __u8 *payload = data + HDR_SZ; + + if (payload + 1 > data_end) + return false; + + if (iph->nexthdr != IPPROTO_UDP || *payload != 0x42) + return false; + + /* reset the payload so the same packet doesn't get counted twice when + * it cycles back through the kernel path and out the dst veth + */ + *payload = 0; + return true; +} + +SEC("xdp") +int xdp_count_pkts(struct xdp_md *xdp) +{ + void *data = (void *)(long)xdp->data; + void *data_end = (void *)(long)xdp->data_end; + + if (check_pkt(data, data_end)) + pkts_seen_xdp++; + + /* Return XDP_DROP to make sure the data page is recycled, like when it + * exits a physical NIC. Recycled pages will be counted in the + * pkts_seen_zero counter above. + */ + return XDP_DROP; +} + +SEC("tc") +int tc_count_pkts(struct __sk_buff *skb) +{ + void *data = (void *)(long)skb->data; + void *data_end = (void *)(long)skb->data_end; + + if (check_pkt(data, data_end)) + pkts_seen_tc++; + + return 0; +} + +char _license[] SEC("license") = "GPL"; From f655c088e74f4681134f650b5743174586a61016 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Thu, 10 Mar 2022 13:18:46 +0100 Subject: [PATCH 033/137] bpftool: Restore support for BPF offload-enabled feature probing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 1a56c18e6c2e4e74 ("bpftool: Stop supporting BPF offload-enabled feature probing") removed the support to probe for BPF offload features. This is still something that is useful for NFP NIC that can support offloading of BPF programs. The reason for the dropped support was that libbpf starting with v1.0 would drop support for passing the ifindex to the BPF prog/map/helper feature probing APIs. In order to keep this useful feature for NFP restore the functionality by moving it directly into bpftool. The code restored is a simplified version of the code that existed in libbpf which supposed passing the ifindex. The simplification is that it only targets the cases where ifindex is given and call into libbpf for the cases where it's not. Before restoring support for probing offload features: # bpftool feature probe dev ens4np0 Scanning system call availability... bpf() syscall is available Scanning eBPF program types... Scanning eBPF map types... Scanning eBPF helper functions... eBPF helpers supported for program type sched_cls: eBPF helpers supported for program type xdp: Scanning miscellaneous eBPF features... Large program size limit is NOT available Bounded loop support is NOT available ISA extension v2 is NOT available ISA extension v3 is NOT available With support for probing offload features restored: # bpftool feature probe dev ens4np0 Scanning system call availability... bpf() syscall is available Scanning eBPF program types... eBPF program_type sched_cls is available eBPF program_type xdp is available Scanning eBPF map types... eBPF map_type hash is available eBPF map_type array is available Scanning eBPF helper functions... eBPF helpers supported for program type sched_cls: - bpf_map_lookup_elem - bpf_get_prandom_u32 - bpf_perf_event_output eBPF helpers supported for program type xdp: - bpf_map_lookup_elem - bpf_get_prandom_u32 - bpf_perf_event_output - bpf_xdp_adjust_head - bpf_xdp_adjust_tail Scanning miscellaneous eBPF features... Large program size limit is NOT available Bounded loop support is NOT available ISA extension v2 is NOT available ISA extension v3 is NOT available Signed-off-by: Niklas Söderlund Signed-off-by: Simon Horman Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20220310121846.921256-1-niklas.soderlund@corigine.com --- tools/bpf/bpftool/feature.c | 152 +++++++++++++++++++++++++++++++++--- 1 file changed, 139 insertions(+), 13 deletions(-) diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index 9c894b1447de..c2f43a5d38e0 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -45,6 +46,11 @@ static bool run_as_unprivileged; /* Miscellaneous utility functions */ +static bool grep(const char *buffer, const char *pattern) +{ + return !!strstr(buffer, pattern); +} + static bool check_procfs(void) { struct statfs st_fs; @@ -135,6 +141,32 @@ static void print_end_section(void) /* Probing functions */ +static int get_vendor_id(int ifindex) +{ + char ifname[IF_NAMESIZE], path[64], buf[8]; + ssize_t len; + int fd; + + if (!if_indextoname(ifindex, ifname)) + return -1; + + snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname); + + fd = open(path, O_RDONLY | O_CLOEXEC); + if (fd < 0) + return -1; + + len = read(fd, buf, sizeof(buf)); + close(fd); + if (len < 0) + return -1; + if (len >= (ssize_t)sizeof(buf)) + return -1; + buf[len] = '\0'; + + return strtol(buf, NULL, 0); +} + static int read_procfs(const char *path) { char *endptr, *line = NULL; @@ -478,6 +510,40 @@ static bool probe_bpf_syscall(const char *define_prefix) return res; } +static bool +probe_prog_load_ifindex(enum bpf_prog_type prog_type, + const struct bpf_insn *insns, size_t insns_cnt, + char *log_buf, size_t log_buf_sz, + __u32 ifindex) +{ + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_buf = log_buf, + .log_size = log_buf_sz, + .log_level = log_buf ? 1 : 0, + .prog_ifindex = ifindex, + ); + int fd; + + errno = 0; + fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts); + if (fd >= 0) + close(fd); + + return fd >= 0 && errno != EINVAL && errno != EOPNOTSUPP; +} + +static bool probe_prog_type_ifindex(enum bpf_prog_type prog_type, __u32 ifindex) +{ + /* nfp returns -EINVAL on exit(0) with TC offload */ + struct bpf_insn insns[2] = { + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN() + }; + + return probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns), + NULL, 0, ifindex); +} + static void probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types, const char *define_prefix, __u32 ifindex) @@ -488,11 +554,19 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types, bool res; if (ifindex) { - p_info("BPF offload feature probing is not supported"); - return; + switch (prog_type) { + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_XDP: + break; + default: + return; + } + + res = probe_prog_type_ifindex(prog_type, ifindex); + } else { + res = libbpf_probe_bpf_prog_type(prog_type, NULL); } - res = libbpf_probe_bpf_prog_type(prog_type, NULL); #ifdef USE_LIBCAP /* Probe may succeed even if program load fails, for unprivileged users * check that we did not fail because of insufficient permissions @@ -521,6 +595,26 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types, define_prefix); } +static bool probe_map_type_ifindex(enum bpf_map_type map_type, __u32 ifindex) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts); + int key_size, value_size, max_entries; + int fd; + + opts.map_ifindex = ifindex; + + key_size = sizeof(__u32); + value_size = sizeof(__u32); + max_entries = 1; + + fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, + &opts); + if (fd >= 0) + close(fd); + + return fd >= 0; +} + static void probe_map_type(enum bpf_map_type map_type, const char *define_prefix, __u32 ifindex) @@ -531,11 +625,18 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix, bool res; if (ifindex) { - p_info("BPF offload feature probing is not supported"); - return; - } + switch (map_type) { + case BPF_MAP_TYPE_HASH: + case BPF_MAP_TYPE_ARRAY: + break; + default: + return; + } - res = libbpf_probe_bpf_map_type(map_type, NULL); + res = probe_map_type_ifindex(map_type, ifindex); + } else { + res = libbpf_probe_bpf_map_type(map_type, NULL); + } /* Probe result depends on the success of map creation, no additional * check required for unprivileged users @@ -559,6 +660,33 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix, define_prefix); } +static bool +probe_helper_ifindex(enum bpf_func_id id, enum bpf_prog_type prog_type, + __u32 ifindex) +{ + struct bpf_insn insns[2] = { + BPF_EMIT_CALL(id), + BPF_EXIT_INSN() + }; + char buf[4096] = {}; + bool res; + + probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns), buf, + sizeof(buf), ifindex); + res = !grep(buf, "invalid func ") && !grep(buf, "unknown func "); + + switch (get_vendor_id(ifindex)) { + case 0x19ee: /* Netronome specific */ + res = res && !grep(buf, "not supported by FW") && + !grep(buf, "unsupported function id"); + break; + default: + break; + } + + return res; +} + static void probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type, const char *define_prefix, unsigned int id, @@ -567,12 +695,10 @@ probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type, bool res = false; if (supported_type) { - if (ifindex) { - p_info("BPF offload feature probing is not supported"); - return; - } - - res = libbpf_probe_bpf_helper(prog_type, id, NULL); + if (ifindex) + res = probe_helper_ifindex(id, prog_type, ifindex); + else + res = libbpf_probe_bpf_helper(prog_type, id, NULL); #ifdef USE_LIBCAP /* Probe may succeed even if program load fails, for * unprivileged users check that we did not fail because of From eecbfd976e8617125c69e4b3efc89fe34eac7bb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Thu, 10 Mar 2022 12:02:28 +0100 Subject: [PATCH 034/137] bpf: Initialise retval in bpf_prog_test_run_xdp() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The kernel test robot pointed out that the newly added bpf_test_run_xdp_live() runner doesn't set the retval in the caller (by design), which means that the variable can be passed unitialised to bpf_test_finish(). Fix this by initialising the variable properly. Fixes: b530e9e1063e ("bpf: Add "live packet" mode for XDP in BPF_PROG_RUN") Reported-by: kernel test robot Reported-by: Dan Carpenter Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220310110228.161869-1-toke@redhat.com --- net/bpf/test_run.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 25169908be4a..0acdc37c8415 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1207,9 +1207,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); u32 batch_size = kattr->test.batch_size; + u32 retval = 0, duration, max_data_sz; u32 size = kattr->test.data_size_in; u32 headroom = XDP_PACKET_HEADROOM; - u32 retval, duration, max_data_sz; u32 repeat = kattr->test.repeat; struct netdev_rx_queue *rxqueue; struct skb_shared_info *sinfo; From 743bec1b78af174f588956ffbdb9bca4348e3eaf Mon Sep 17 00:00:00 2001 From: Yihao Han Date: Thu, 10 Mar 2022 01:28:27 -0800 Subject: [PATCH 035/137] bpf, test_run: Use kvfree() for memory allocated with kvmalloc() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is allocated with kvmalloc(), the corresponding release function should not be kfree(), use kvfree() instead. Generated by: scripts/coccinelle/api/kfree_mismatch.cocci Fixes: b530e9e1063e ("bpf: Add "live packet" mode for XDP in BPF_PROG_RUN") Signed-off-by: Yihao Han Signed-off-by: Daniel Borkmann Cc: Toke Høiland-Jørgensen Link: https://lore.kernel.org/bpf/20220310092828.13405-1-hanyihao@vivo.com --- net/bpf/test_run.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 0acdc37c8415..24405a280a9b 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -196,9 +196,9 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c err_mmodel: page_pool_destroy(pp); err_pp: - kfree(xdp->skbs); + kvfree(xdp->skbs); err_skbs: - kfree(xdp->frames); + kvfree(xdp->frames); return err; } From 3b5d4ddf8fe1f60082513f94bae586ac80188a03 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 9 Mar 2022 01:04:50 -0800 Subject: [PATCH 036/137] bpf: net: Remove TC_AT_INGRESS_OFFSET and SKB_MONO_DELIVERY_TIME_OFFSET macro This patch removes the TC_AT_INGRESS_OFFSET and SKB_MONO_DELIVERY_TIME_OFFSET macros. Instead, PKT_VLAN_PRESENT_OFFSET is used because all of them are at the same offset. Comment is added to make it clear that changing the position of tc_at_ingress or mono_delivery_time will require to adjust the defined macros. The earlier discussion can be found here: https://lore.kernel.org/bpf/419d994e-ff61-7c11-0ec7-11fefcb0186e@iogearbox.net/ Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220309090450.3710955-1-kafai@fb.com --- include/linux/skbuff.h | 10 +++++----- net/core/filter.c | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2be263184d1e..7b0cb10e70cb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -960,10 +960,10 @@ struct sk_buff { __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 dst_pending_confirm:1; - __u8 mono_delivery_time:1; + __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */ #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; - __u8 tc_at_ingress:1; + __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ #endif #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; @@ -1062,7 +1062,9 @@ struct sk_buff { #endif #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset) -/* if you move pkt_vlan_present around you also must adapt these constants */ +/* if you move pkt_vlan_present, tc_at_ingress, or mono_delivery_time + * around, you also must adapt these constants. + */ #ifdef __BIG_ENDIAN_BITFIELD #define PKT_VLAN_PRESENT_BIT 7 #define TC_AT_INGRESS_MASK (1 << 0) @@ -1073,8 +1075,6 @@ struct sk_buff { #define SKB_MONO_DELIVERY_TIME_MASK (1 << 5) #endif #define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) -#define TC_AT_INGRESS_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) -#define SKB_MONO_DELIVERY_TIME_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) #ifdef __KERNEL__ /* diff --git a/net/core/filter.c b/net/core/filter.c index 88767f7da150..738a294a3c82 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8896,7 +8896,7 @@ static struct bpf_insn *bpf_convert_dtime_type_read(const struct bpf_insn *si, __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, SKB_MONO_DELIVERY_TIME_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); @@ -8912,7 +8912,7 @@ static struct bpf_insn *bpf_convert_dtime_type_read(const struct bpf_insn *si, *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 6 : 1); #ifdef CONFIG_NET_CLS_ACT - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); /* At ingress, value_reg = 0 */ @@ -8959,14 +8959,14 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, if (!prog->delivery_time_access) { __u8 tmp_reg = BPF_REG_AX; - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 5); /* @ingress, read __sk_buff->tstamp as the (rcv) timestamp, * so check the skb->mono_delivery_time. */ *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, SKB_MONO_DELIVERY_TIME_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); @@ -8992,18 +8992,18 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, if (!prog->delivery_time_access) { __u8 tmp_reg = BPF_REG_AX; - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 3); /* Writing __sk_buff->tstamp at ingress as the (rcv) timestamp. * Clear the skb->mono_delivery_time. */ *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK); *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + PKT_VLAN_PRESENT_OFFSET); } #endif From 539de9328e3ab444efe51ddc7416ce2a3f0f23b2 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 9 Mar 2022 01:04:56 -0800 Subject: [PATCH 037/137] bpf: Simplify insn rewrite on BPF_READ __sk_buff->tstamp The skb->tc_at_ingress and skb->mono_delivery_time are at the same byte offset. Thus, only one BPF_LDX_MEM(BPF_B) is needed and both bits can be tested together. /* BPF_READ: a = __sk_buff->tstamp */ if (skb->tc_at_ingress && skb->mono_delivery_time) a = 0; else a = skb->tstamp; Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220309090456.3711530-1-kafai@fb.com --- net/core/filter.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 738a294a3c82..2c83d1f38704 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8956,21 +8956,22 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, __u8 skb_reg = si->src_reg; #ifdef CONFIG_NET_CLS_ACT + /* If the delivery_time_type is read, + * the bpf prog is aware the tstamp could have delivery time. + * Thus, read skb->tstamp as is if delivery_time_access is true. + */ if (!prog->delivery_time_access) { + /* AX is needed because src_reg and dst_reg could be the same */ __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 5); - /* @ingress, read __sk_buff->tstamp as the (rcv) timestamp, - * so check the skb->mono_delivery_time. - */ - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - PKT_VLAN_PRESENT_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); - /* skb->mono_delivery_time is set, read 0 as the (rcv) timestamp. */ + TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg, + TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2); + /* skb->tc_at_ingress && skb->mono_delivery_time, + * read 0 as the (rcv) timestamp. + */ *insn++ = BPF_MOV64_IMM(value_reg, 0); *insn++ = BPF_JMP_A(1); } From 9d90db97e4d4537c82a5ee476b1c13553e487f28 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 9 Mar 2022 01:05:02 -0800 Subject: [PATCH 038/137] bpf: Simplify insn rewrite on BPF_WRITE __sk_buff->tstamp BPF_JMP32_IMM(BPF_JSET) is used to save a BPF_ALU32_IMM(BPF_AND). The skb->tc_at_ingress and skb->mono_delivery_time are at the same offset, so only one BPF_LDX_MEM(BPF_B) is needed. Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220309090502.3711982-1-kafai@fb.com --- net/core/filter.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 2c83d1f38704..f914e4b13b18 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8990,25 +8990,27 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, __u8 skb_reg = si->dst_reg; #ifdef CONFIG_NET_CLS_ACT + /* If the delivery_time_type is read, + * the bpf prog is aware the tstamp could have delivery time. + * Thus, write skb->tstamp as is if delivery_time_access is true. + * Otherwise, writing at ingress will have to clear the + * mono_delivery_time bit also. + */ if (!prog->delivery_time_access) { __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 3); - /* Writing __sk_buff->tstamp at ingress as the (rcv) timestamp. - * Clear the skb->mono_delivery_time. - */ - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - PKT_VLAN_PRESENT_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - ~SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, - PKT_VLAN_PRESENT_OFFSET); + /* Writing __sk_buff->tstamp as ingress, goto */ + *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1); + /* goto */ + *insn++ = BPF_JMP_A(2); + /* : mono_delivery_time */ + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, PKT_VLAN_PRESENT_OFFSET); } #endif - /* skb->tstamp = tstamp */ + /* : skb->tstamp = tstamp */ *insn++ = BPF_STX_MEM(BPF_DW, skb_reg, value_reg, offsetof(struct sk_buff, tstamp)); return insn; From 9bb984f28d5bcb917d35d930fcfb89f90f9449fd Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 9 Mar 2022 01:05:09 -0800 Subject: [PATCH 039/137] bpf: Remove BPF_SKB_DELIVERY_TIME_NONE and rename s/delivery_time_/tstamp_/ This patch is to simplify the uapi bpf.h regarding to the tstamp type and use a similar way as the kernel to describe the value stored in __sk_buff->tstamp. My earlier thought was to avoid describing the semantic and clock base for the rcv timestamp until there is more clarity on the use case, so the __sk_buff->delivery_time_type naming instead of __sk_buff->tstamp_type. With some thoughts, it can reuse the UNSPEC naming. This patch first removes BPF_SKB_DELIVERY_TIME_NONE and also rename BPF_SKB_DELIVERY_TIME_UNSPEC to BPF_SKB_TSTAMP_UNSPEC and BPF_SKB_DELIVERY_TIME_MONO to BPF_SKB_TSTAMP_DELIVERY_MONO. The semantic of BPF_SKB_TSTAMP_DELIVERY_MONO is the same: __sk_buff->tstamp has delivery time in mono clock base. BPF_SKB_TSTAMP_UNSPEC means __sk_buff->tstamp has the (rcv) tstamp at ingress and the delivery time at egress. At egress, the clock base could be found from skb->sk->sk_clockid. __sk_buff->tstamp == 0 naturally means NONE, so NONE is not needed. With BPF_SKB_TSTAMP_UNSPEC for the rcv tstamp at ingress, the __sk_buff->delivery_time_type is also renamed to __sk_buff->tstamp_type which was also suggested in the earlier discussion: https://lore.kernel.org/bpf/b181acbe-caf8-502d-4b7b-7d96b9fc5d55@iogearbox.net/ The above will then make __sk_buff->tstamp and __sk_buff->tstamp_type the same as its kernel skb->tstamp and skb->mono_delivery_time counter part. The internal kernel function bpf_skb_convert_dtime_type_read() is then renamed to bpf_skb_convert_tstamp_type_read() and it can be simplified with the BPF_SKB_DELIVERY_TIME_NONE gone. A BPF_ALU32_IMM(BPF_AND) insn is also saved by using BPF_JMP32_IMM(BPF_JSET). The bpf helper bpf_skb_set_delivery_time() is also renamed to bpf_skb_set_tstamp(). The arg name is changed from dtime to tstamp also. It only allows setting tstamp 0 for BPF_SKB_TSTAMP_UNSPEC and it could be relaxed later if there is use case to change mono delivery time to non mono. prog->delivery_time_access is also renamed to prog->tstamp_type_access. Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220309090509.3712315-1-kafai@fb.com --- include/linux/filter.h | 2 +- include/uapi/linux/bpf.h | 40 ++++++++-------- net/core/filter.c | 88 +++++++++++++--------------------- tools/include/uapi/linux/bpf.h | 40 ++++++++-------- 4 files changed, 77 insertions(+), 93 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 9bf26307247f..05ed9bd31b45 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -573,7 +573,7 @@ struct bpf_prog { enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ call_get_func_ip:1, /* Do we call get_func_ip() */ - delivery_time_access:1; /* Accessed __sk_buff->delivery_time_type */ + tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index bc23020b638d..d288a0a9f797 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5090,23 +5090,22 @@ union bpf_attr { * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. * - * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) * Description - * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also - * change the __sk_buff->delivery_time_type to *dtime_type*. + * Change the __sk_buff->tstamp_type to *tstamp_type* + * and set *tstamp* to the __sk_buff->tstamp together. * - * When setting a delivery time (non zero *dtime*) to - * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* - * is supported. It is the only delivery_time_type that will be - * kept after bpf_redirect_*(). - * - * If there is no need to change the __sk_buff->delivery_time_type, - * the delivery time can be directly written to __sk_buff->tstamp + * If there is no need to change the __sk_buff->tstamp_type, + * the tstamp value can be directly written to __sk_buff->tstamp * instead. * - * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE - * can be used to clear any delivery time stored in - * __sk_buff->tstamp. + * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that + * will be kept during bpf_redirect_*(). A non zero + * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO + * *tstamp_type*. + * + * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used + * with a zero *tstamp*. * * Only IPv4 and IPv6 skb->protocol are supported. * @@ -5119,7 +5118,7 @@ union bpf_attr { * Return * 0 on success. * **-EINVAL** for invalid input - * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol + * **-EOPNOTSUPP** for unsupported protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,7 +5313,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ - FN(skb_set_delivery_time), \ + FN(skb_set_tstamp), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5505,9 +5504,12 @@ union { \ } __attribute__((aligned(8))) enum { - BPF_SKB_DELIVERY_TIME_NONE, - BPF_SKB_DELIVERY_TIME_UNSPEC, - BPF_SKB_DELIVERY_TIME_MONO, + BPF_SKB_TSTAMP_UNSPEC, + BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ + /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, + * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC + * and try to deduce it by ingress, egress or skb->sk->sk_clockid. + */ }; /* user accessible mirror of in-kernel sk_buff. @@ -5550,7 +5552,7 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u8 delivery_time_type; + __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; diff --git a/net/core/filter.c b/net/core/filter.c index f914e4b13b18..03655f2074ae 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7388,36 +7388,36 @@ static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_skb_set_delivery_time, struct sk_buff *, skb, - u64, dtime, u32, dtime_type) +BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb, + u64, tstamp, u32, tstamp_type) { /* skb_clear_delivery_time() is done for inet protocol */ if (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)) return -EOPNOTSUPP; - switch (dtime_type) { - case BPF_SKB_DELIVERY_TIME_MONO: - if (!dtime) + switch (tstamp_type) { + case BPF_SKB_TSTAMP_DELIVERY_MONO: + if (!tstamp) return -EINVAL; - skb->tstamp = dtime; + skb->tstamp = tstamp; skb->mono_delivery_time = 1; break; - case BPF_SKB_DELIVERY_TIME_NONE: - if (dtime) + case BPF_SKB_TSTAMP_UNSPEC: + if (tstamp) return -EINVAL; skb->tstamp = 0; skb->mono_delivery_time = 0; break; default: - return -EOPNOTSUPP; + return -EINVAL; } return 0; } -static const struct bpf_func_proto bpf_skb_set_delivery_time_proto = { - .func = bpf_skb_set_delivery_time, +static const struct bpf_func_proto bpf_skb_set_tstamp_proto = { + .func = bpf_skb_set_tstamp, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, @@ -7786,8 +7786,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_tcp_gen_syncookie_proto; case BPF_FUNC_sk_assign: return &bpf_sk_assign_proto; - case BPF_FUNC_skb_set_delivery_time: - return &bpf_skb_set_delivery_time_proto; + case BPF_FUNC_skb_set_tstamp: + return &bpf_skb_set_tstamp_proto; #endif default: return bpf_sk_base_func_proto(func_id); @@ -8127,9 +8127,9 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; - case offsetof(struct __sk_buff, delivery_time_type): + case offsetof(struct __sk_buff, tstamp_type): return false; - case offsetofend(struct __sk_buff, delivery_time_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: + case offsetofend(struct __sk_buff, tstamp_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: /* Explicitly prohibit access to padding in __sk_buff. */ return false; default: @@ -8484,14 +8484,14 @@ static bool tc_cls_act_is_valid_access(int off, int size, break; case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; - case offsetof(struct __sk_buff, delivery_time_type): + case offsetof(struct __sk_buff, tstamp_type): /* The convert_ctx_access() on reading and writing * __sk_buff->tstamp depends on whether the bpf prog - * has used __sk_buff->delivery_time_type or not. - * Thus, we need to set prog->delivery_time_access + * has used __sk_buff->tstamp_type or not. + * Thus, we need to set prog->tstamp_type_access * earlier during is_valid_access() here. */ - ((struct bpf_prog *)prog)->delivery_time_access = 1; + ((struct bpf_prog *)prog)->tstamp_type_access = 1; return size == sizeof(__u8); } @@ -8888,42 +8888,22 @@ static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, return insn - insn_buf; } -static struct bpf_insn *bpf_convert_dtime_type_read(const struct bpf_insn *si, - struct bpf_insn *insn) +static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si, + struct bpf_insn *insn) { __u8 value_reg = si->dst_reg; __u8 skb_reg = si->src_reg; + /* AX is needed because src_reg and dst_reg could be the same */ __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); - /* value_reg = BPF_SKB_DELIVERY_TIME_MONO */ - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_MONO); - *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 10 : 5); - - *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, skb_reg, - offsetof(struct sk_buff, tstamp)); - *insn++ = BPF_JMP_IMM(BPF_JNE, tmp_reg, 0, 2); - /* value_reg = BPF_SKB_DELIVERY_TIME_NONE */ - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_NONE); - *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 6 : 1); - -#ifdef CONFIG_NET_CLS_ACT - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); - /* At ingress, value_reg = 0 */ - *insn++ = BPF_MOV32_IMM(value_reg, 0); + *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, + SKB_MONO_DELIVERY_TIME_MASK, 2); + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC); *insn++ = BPF_JMP_A(1); -#endif + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_DELIVERY_MONO); - /* value_reg = BPF_SKB_DELIVERYT_TIME_UNSPEC */ - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_UNSPEC); - - /* 15 insns with CONFIG_NET_CLS_ACT */ return insn; } @@ -8956,11 +8936,11 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, __u8 skb_reg = si->src_reg; #ifdef CONFIG_NET_CLS_ACT - /* If the delivery_time_type is read, + /* If the tstamp_type is read, * the bpf prog is aware the tstamp could have delivery time. - * Thus, read skb->tstamp as is if delivery_time_access is true. + * Thus, read skb->tstamp as is if tstamp_type_access is true. */ - if (!prog->delivery_time_access) { + if (!prog->tstamp_type_access) { /* AX is needed because src_reg and dst_reg could be the same */ __u8 tmp_reg = BPF_REG_AX; @@ -8990,13 +8970,13 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, __u8 skb_reg = si->dst_reg; #ifdef CONFIG_NET_CLS_ACT - /* If the delivery_time_type is read, + /* If the tstamp_type is read, * the bpf prog is aware the tstamp could have delivery time. - * Thus, write skb->tstamp as is if delivery_time_access is true. + * Thus, write skb->tstamp as is if tstamp_type_access is true. * Otherwise, writing at ingress will have to clear the * mono_delivery_time bit also. */ - if (!prog->delivery_time_access) { + if (!prog->tstamp_type_access) { __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); @@ -9329,8 +9309,8 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, insn = bpf_convert_tstamp_read(prog, si, insn); break; - case offsetof(struct __sk_buff, delivery_time_type): - insn = bpf_convert_dtime_type_read(si, insn); + case offsetof(struct __sk_buff, tstamp_type): + insn = bpf_convert_tstamp_type_read(si, insn); break; case offsetof(struct __sk_buff, gso_segs): diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index bc23020b638d..d288a0a9f797 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5090,23 +5090,22 @@ union bpf_attr { * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. * - * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) * Description - * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also - * change the __sk_buff->delivery_time_type to *dtime_type*. + * Change the __sk_buff->tstamp_type to *tstamp_type* + * and set *tstamp* to the __sk_buff->tstamp together. * - * When setting a delivery time (non zero *dtime*) to - * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* - * is supported. It is the only delivery_time_type that will be - * kept after bpf_redirect_*(). - * - * If there is no need to change the __sk_buff->delivery_time_type, - * the delivery time can be directly written to __sk_buff->tstamp + * If there is no need to change the __sk_buff->tstamp_type, + * the tstamp value can be directly written to __sk_buff->tstamp * instead. * - * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE - * can be used to clear any delivery time stored in - * __sk_buff->tstamp. + * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that + * will be kept during bpf_redirect_*(). A non zero + * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO + * *tstamp_type*. + * + * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used + * with a zero *tstamp*. * * Only IPv4 and IPv6 skb->protocol are supported. * @@ -5119,7 +5118,7 @@ union bpf_attr { * Return * 0 on success. * **-EINVAL** for invalid input - * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol + * **-EOPNOTSUPP** for unsupported protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,7 +5313,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ - FN(skb_set_delivery_time), \ + FN(skb_set_tstamp), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5505,9 +5504,12 @@ union { \ } __attribute__((aligned(8))) enum { - BPF_SKB_DELIVERY_TIME_NONE, - BPF_SKB_DELIVERY_TIME_UNSPEC, - BPF_SKB_DELIVERY_TIME_MONO, + BPF_SKB_TSTAMP_UNSPEC, + BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ + /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, + * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC + * and try to deduce it by ingress, egress or skb->sk->sk_clockid. + */ }; /* user accessible mirror of in-kernel sk_buff. @@ -5550,7 +5552,7 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u8 delivery_time_type; + __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; From 3daf0896f3f958b48d7747e96dd57a6b10745b76 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 9 Mar 2022 01:05:15 -0800 Subject: [PATCH 040/137] bpf: selftests: Update tests after s/delivery_time/tstamp/ change in bpf.h The previous patch made the follow changes: - s/delivery_time_type/tstamp_type/ - s/bpf_skb_set_delivery_time/bpf_skb_set_tstamp/ - BPF_SKB_DELIVERY_TIME_* to BPF_SKB_TSTAMP_* This patch is to change the test_tc_dtime.c to reflect the above. Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220309090515.3712742-1-kafai@fb.com --- .../selftests/bpf/progs/test_tc_dtime.c | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c index 9d9e8e17b8a0..06f300d06dbd 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_dtime.c +++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c @@ -174,13 +174,13 @@ int egress_host(struct __sk_buff *skb) return TC_ACT_OK; if (skb_proto(skb_type) == IPPROTO_TCP) { - if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO && + if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO && skb->tstamp) inc_dtimes(EGRESS_ENDHOST); else inc_errs(EGRESS_ENDHOST); } else { - if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_UNSPEC && + if (skb->tstamp_type == BPF_SKB_TSTAMP_UNSPEC && skb->tstamp) inc_dtimes(EGRESS_ENDHOST); else @@ -204,7 +204,7 @@ int ingress_host(struct __sk_buff *skb) if (!skb_type) return TC_ACT_OK; - if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO && + if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO && skb->tstamp == EGRESS_FWDNS_MAGIC) inc_dtimes(INGRESS_ENDHOST); else @@ -226,7 +226,7 @@ int ingress_fwdns_prio100(struct __sk_buff *skb) return TC_ACT_OK; /* delivery_time is only available to the ingress - * if the tc-bpf checks the skb->delivery_time_type. + * if the tc-bpf checks the skb->tstamp_type. */ if (skb->tstamp == EGRESS_ENDHOST_MAGIC) inc_errs(INGRESS_FWDNS_P100); @@ -250,7 +250,7 @@ int egress_fwdns_prio100(struct __sk_buff *skb) return TC_ACT_OK; /* delivery_time is always available to egress even - * the tc-bpf did not use the delivery_time_type. + * the tc-bpf did not use the tstamp_type. */ if (skb->tstamp == INGRESS_FWDNS_MAGIC) inc_dtimes(EGRESS_FWDNS_P100); @@ -278,9 +278,9 @@ int ingress_fwdns_prio101(struct __sk_buff *skb) if (skb_proto(skb_type) == IPPROTO_UDP) expected_dtime = 0; - if (skb->delivery_time_type) { + if (skb->tstamp_type) { if (fwdns_clear_dtime() || - skb->delivery_time_type != BPF_SKB_DELIVERY_TIME_MONO || + skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO || skb->tstamp != expected_dtime) inc_errs(INGRESS_FWDNS_P101); else @@ -290,14 +290,14 @@ int ingress_fwdns_prio101(struct __sk_buff *skb) inc_errs(INGRESS_FWDNS_P101); } - if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO) { + if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) { skb->tstamp = INGRESS_FWDNS_MAGIC; } else { - if (bpf_skb_set_delivery_time(skb, INGRESS_FWDNS_MAGIC, - BPF_SKB_DELIVERY_TIME_MONO)) + if (bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC, + BPF_SKB_TSTAMP_DELIVERY_MONO)) inc_errs(SET_DTIME); - if (!bpf_skb_set_delivery_time(skb, INGRESS_FWDNS_MAGIC, - BPF_SKB_DELIVERY_TIME_UNSPEC)) + if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC, + BPF_SKB_TSTAMP_UNSPEC)) inc_errs(SET_DTIME); } @@ -320,9 +320,9 @@ int egress_fwdns_prio101(struct __sk_buff *skb) /* Should have handled in prio100 */ return TC_ACT_SHOT; - if (skb->delivery_time_type) { + if (skb->tstamp_type) { if (fwdns_clear_dtime() || - skb->delivery_time_type != BPF_SKB_DELIVERY_TIME_MONO || + skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO || skb->tstamp != INGRESS_FWDNS_MAGIC) inc_errs(EGRESS_FWDNS_P101); else @@ -332,14 +332,14 @@ int egress_fwdns_prio101(struct __sk_buff *skb) inc_errs(EGRESS_FWDNS_P101); } - if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO) { + if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) { skb->tstamp = EGRESS_FWDNS_MAGIC; } else { - if (bpf_skb_set_delivery_time(skb, EGRESS_FWDNS_MAGIC, - BPF_SKB_DELIVERY_TIME_MONO)) + if (bpf_skb_set_tstamp(skb, EGRESS_FWDNS_MAGIC, + BPF_SKB_TSTAMP_DELIVERY_MONO)) inc_errs(SET_DTIME); - if (!bpf_skb_set_delivery_time(skb, EGRESS_FWDNS_MAGIC, - BPF_SKB_DELIVERY_TIME_UNSPEC)) + if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC, + BPF_SKB_TSTAMP_UNSPEC)) inc_errs(SET_DTIME); } From 58617014405ad5c9f94f464444f4972dabb71ca7 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Thu, 10 Mar 2022 23:53:35 +0800 Subject: [PATCH 041/137] bpf: Fix comment for helper bpf_current_task_under_cgroup() Fix the descriptions of the return values of helper bpf_current_task_under_cgroup(). Fixes: c6b5fb8690fa ("bpf: add documentation for eBPF helpers (42-50)") Signed-off-by: Hengqi Chen Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220310155335.1278783-1-hengqi.chen@gmail.com --- include/uapi/linux/bpf.h | 4 ++-- tools/include/uapi/linux/bpf.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d288a0a9f797..e9978a916c3e 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2302,8 +2302,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if current task belongs to the cgroup2. - * * 1, if current task does not belong to the cgroup2. + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d288a0a9f797..e9978a916c3e 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2302,8 +2302,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if current task belongs to the cgroup2. - * * 1, if current task does not belong to the cgroup2. + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) From 1b773d0003aa90e60953d263c2ee31e6fb4a8a69 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Fri, 11 Mar 2022 00:15:18 +0800 Subject: [PATCH 042/137] bpf: Use offsetofend() to simplify macro definition Use offsetofend() instead of offsetof() + sizeof() to simplify MIN_BPF_LINEINFO_SIZE macro definition. Signed-off-by: Yuntao Wang Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Acked-by: Joanne Koong Link: https://lore.kernel.org/bpf/20220310161518.534544-1-ytcoode@gmail.com --- kernel/bpf/verifier.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e34264200e09..0db6cd8dcb35 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10400,8 +10400,7 @@ static void adjust_btf_func(struct bpf_verifier_env *env) aux->func_info[i].insn_off = env->subprog_info[i].start; } -#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ - sizeof(((struct bpf_line_info *)(0))->line_col)) +#define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col) #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE static int check_btf_line(struct bpf_verifier_env *env, From 357b3cc3c0467b2f7cd6c4a87f7a18bfd779ce5b Mon Sep 17 00:00:00 2001 From: Chris J Arges Date: Wed, 9 Mar 2022 15:41:58 -0600 Subject: [PATCH 043/137] bpftool: Ensure bytes_memlock json output is correct If a BPF map is created over 2^32 the memlock value as displayed in JSON format will be incorrect. Use atoll instead of atoi so that the correct number is displayed. ``` $ bpftool map create /sys/fs/bpf/test_bpfmap type hash key 4 \ value 1024 entries 4194304 name test_bpfmap $ bpftool map list 1: hash name test_bpfmap flags 0x0 key 4B value 1024B max_entries 4194304 memlock 4328521728B $ sudo bpftool map list -j | jq .[].bytes_memlock 33554432 ``` Signed-off-by: Chris J Arges Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/b6601087-0b11-33cc-904a-1133d1500a10@cloudflare.com --- tools/bpf/bpftool/map.c | 2 +- tools/bpf/bpftool/prog.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index e746642de292..f91d9bf9054e 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -504,7 +504,7 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) jsonw_uint_field(json_wtr, "max_entries", info->max_entries); if (memlock) - jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock)); + jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock)); free(memlock); if (info->type == BPF_MAP_TYPE_PROG_ARRAY) { diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 8a52eed19fa2..bc4e05542c2b 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -485,7 +485,7 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) memlock = get_fdinfo(fd, "memlock"); if (memlock) - jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock)); + jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock)); free(memlock); if (info->nr_map_ids) From bae60eefb95ca8f2abebaf157d4815ce8fbb0e75 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:13:56 +0100 Subject: [PATCH 044/137] ima: Fix documentation-related warnings in ima_main.c Fix the following warnings in ima_main.c, displayed with W=n make argument: security/integrity/ima/ima_main.c:432: warning: Function parameter or member 'vma' not described in 'ima_file_mprotect' security/integrity/ima/ima_main.c:636: warning: Function parameter or member 'inode' not described in 'ima_post_create_tmpfile' security/integrity/ima/ima_main.c:636: warning: Excess function parameter 'file' description in 'ima_post_create_tmpfile' security/integrity/ima/ima_main.c:843: warning: Function parameter or member 'load_id' not described in 'ima_post_load_data' security/integrity/ima/ima_main.c:843: warning: Excess function parameter 'id' description in 'ima_post_load_data' Also, fix some style issues in the description of ima_post_create_tmpfile() and ima_post_path_mknod(). Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Reviewed-by: Shuah Khan Reviewed-by: Mimi Zohar Link: https://lore.kernel.org/bpf/20220302111404.193900-2-roberto.sassu@huawei.com --- security/integrity/ima/ima_main.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 8c6e4514d494..946ba8a12eab 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -418,6 +418,7 @@ int ima_file_mmap(struct file *file, unsigned long prot) /** * ima_file_mprotect - based on policy, limit mprotect change + * @vma: vm_area_struct protection is set to * @prot: contains the protection that will be applied by the kernel. * * Files can be mmap'ed read/write and later changed to execute to circumvent @@ -610,8 +611,8 @@ EXPORT_SYMBOL_GPL(ima_inode_hash); /** * ima_post_create_tmpfile - mark newly created tmpfile as new - * @mnt_userns: user namespace of the mount the inode was found from - * @file : newly created tmpfile + * @mnt_userns: user namespace of the mount the inode was found from + * @inode: inode of the newly created tmpfile * * No measuring, appraising or auditing of newly created tmpfiles is needed. * Skip calling process_measurement(), but indicate which newly, created @@ -643,7 +644,7 @@ void ima_post_create_tmpfile(struct user_namespace *mnt_userns, /** * ima_post_path_mknod - mark as a new inode - * @mnt_userns: user namespace of the mount the inode was found from + * @mnt_userns: user namespace of the mount the inode was found from * @dentry: newly created dentry * * Mark files created via the mknodat syscall as new, so that the @@ -814,8 +815,8 @@ int ima_load_data(enum kernel_load_data_id id, bool contents) * ima_post_load_data - appraise decision based on policy * @buf: pointer to in memory file contents * @size: size of in memory file contents - * @id: kernel load data caller identifier - * @description: @id-specific description of contents + * @load_id: kernel load data caller identifier + * @description: @load_id-specific description of contents * * Measure/appraise/audit in memory buffer based on policy. Policy rules * are written in terms of a policy identifier. From 280fe8367b0dc45b6ac5e04fad03e16e99540c0c Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:13:57 +0100 Subject: [PATCH 045/137] ima: Always return a file measurement in ima_file_hash() __ima_inode_hash() checks if a digest has been already calculated by looking for the integrity_iint_cache structure associated to the passed inode. Users of ima_file_hash() (e.g. eBPF) might be interested in obtaining the information without having to setup an IMA policy so that the digest is always available at the time they call this function. In addition, they likely expect the digest to be fresh, e.g. recalculated by IMA after a file write. Although getting the digest from the bprm_committed_creds hook (as in the eBPF test) ensures that the digest is fresh, as the IMA hook is executed before that hook, this is not always the case (e.g. for the mmap_file hook). Call ima_collect_measurement() in __ima_inode_hash(), if the file descriptor is available (passed by ima_file_hash()) and the digest is not available/not fresh, and store the file measurement in a temporary integrity_iint_cache structure. This change does not cause memory usage increase, due to using the temporary integrity_iint_cache structure, and due to freeing the ima_digest_data structure inside integrity_iint_cache before exiting from __ima_inode_hash(). For compatibility reasons, the behavior of ima_inode_hash() remains unchanged. Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Reviewed-by: Mimi Zohar Link: https://lore.kernel.org/bpf/20220302111404.193900-3-roberto.sassu@huawei.com --- security/integrity/ima/ima_main.c | 46 ++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 13 deletions(-) diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 946ba8a12eab..ed1a82f1def3 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -520,20 +520,38 @@ int ima_file_check(struct file *file, int mask) } EXPORT_SYMBOL_GPL(ima_file_check); -static int __ima_inode_hash(struct inode *inode, char *buf, size_t buf_size) +static int __ima_inode_hash(struct inode *inode, struct file *file, char *buf, + size_t buf_size) { - struct integrity_iint_cache *iint; - int hash_algo; + struct integrity_iint_cache *iint = NULL, tmp_iint; + int rc, hash_algo; - if (!ima_policy_flag) - return -EOPNOTSUPP; + if (ima_policy_flag) { + iint = integrity_iint_find(inode); + if (iint) + mutex_lock(&iint->mutex); + } + + if ((!iint || !(iint->flags & IMA_COLLECTED)) && file) { + if (iint) + mutex_unlock(&iint->mutex); + + memset(&tmp_iint, 0, sizeof(tmp_iint)); + tmp_iint.inode = inode; + mutex_init(&tmp_iint.mutex); + + rc = ima_collect_measurement(&tmp_iint, file, NULL, 0, + ima_hash_algo, NULL); + if (rc < 0) + return -EOPNOTSUPP; + + iint = &tmp_iint; + mutex_lock(&iint->mutex); + } - iint = integrity_iint_find(inode); if (!iint) return -EOPNOTSUPP; - mutex_lock(&iint->mutex); - /* * ima_file_hash can be called when ima_collect_measurement has still * not been called, we might not always have a hash. @@ -552,12 +570,14 @@ static int __ima_inode_hash(struct inode *inode, char *buf, size_t buf_size) hash_algo = iint->ima_hash->algo; mutex_unlock(&iint->mutex); + if (iint == &tmp_iint) + kfree(iint->ima_hash); + return hash_algo; } /** - * ima_file_hash - return the stored measurement if a file has been hashed and - * is in the iint cache. + * ima_file_hash - return a measurement of the file * @file: pointer to the file * @buf: buffer in which to store the hash * @buf_size: length of the buffer @@ -570,7 +590,7 @@ static int __ima_inode_hash(struct inode *inode, char *buf, size_t buf_size) * The file hash returned is based on the entire file, including the appended * signature. * - * If IMA is disabled or if no measurement is available, return -EOPNOTSUPP. + * If the measurement cannot be performed, return -EOPNOTSUPP. * If the parameters are incorrect, return -EINVAL. */ int ima_file_hash(struct file *file, char *buf, size_t buf_size) @@ -578,7 +598,7 @@ int ima_file_hash(struct file *file, char *buf, size_t buf_size) if (!file) return -EINVAL; - return __ima_inode_hash(file_inode(file), buf, buf_size); + return __ima_inode_hash(file_inode(file), file, buf, buf_size); } EXPORT_SYMBOL_GPL(ima_file_hash); @@ -605,7 +625,7 @@ int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size) if (!inode) return -EINVAL; - return __ima_inode_hash(inode, buf, buf_size); + return __ima_inode_hash(inode, NULL, buf, buf_size); } EXPORT_SYMBOL_GPL(ima_inode_hash); From 174b16946e39ebd369097e0f773536c91a8c1a4c Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:13:58 +0100 Subject: [PATCH 046/137] bpf-lsm: Introduce new helper bpf_ima_file_hash() ima_file_hash() has been modified to calculate the measurement of a file on demand, if it has not been already performed by IMA or the measurement is not fresh. For compatibility reasons, ima_inode_hash() remains unchanged. Keep the same approach in eBPF and introduce the new helper bpf_ima_file_hash() to take advantage of the modified behavior of ima_file_hash(). Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220302111404.193900-4-roberto.sassu@huawei.com --- include/uapi/linux/bpf.h | 11 +++++++++++ kernel/bpf/bpf_lsm.c | 20 ++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 11 +++++++++++ 3 files changed, 42 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e9978a916c3e..99fab54ae9c0 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5119,6 +5119,16 @@ union bpf_attr { * 0 on success. * **-EINVAL** for invalid input * **-EOPNOTSUPP** for unsupported protocol + * + * long bpf_ima_file_hash(struct file *file, void *dst, u32 size) + * Description + * Returns a calculated IMA hash of the *file*. + * If the hash is larger than *size*, then only *size* + * bytes will be copied to *dst* + * Return + * The **hash_algo** is returned on success, + * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if + * invalid arguments are passed. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,6 +5324,7 @@ union bpf_attr { FN(xdp_store_bytes), \ FN(copy_from_user_task), \ FN(skb_set_tstamp), \ + FN(ima_file_hash), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index 9e4ecc990647..e8d27af5bbcc 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -99,6 +99,24 @@ static const struct bpf_func_proto bpf_ima_inode_hash_proto = { .allowed = bpf_ima_inode_hash_allowed, }; +BPF_CALL_3(bpf_ima_file_hash, struct file *, file, void *, dst, u32, size) +{ + return ima_file_hash(file, dst, size); +} + +BTF_ID_LIST_SINGLE(bpf_ima_file_hash_btf_ids, struct, file) + +static const struct bpf_func_proto bpf_ima_file_hash_proto = { + .func = bpf_ima_file_hash, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &bpf_ima_file_hash_btf_ids[0], + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE, + .allowed = bpf_ima_inode_hash_allowed, +}; + static const struct bpf_func_proto * bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -121,6 +139,8 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_bprm_opts_set_proto; case BPF_FUNC_ima_inode_hash: return prog->aux->sleepable ? &bpf_ima_inode_hash_proto : NULL; + case BPF_FUNC_ima_file_hash: + return prog->aux->sleepable ? &bpf_ima_file_hash_proto : NULL; default: return tracing_prog_func_proto(func_id, prog); } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e9978a916c3e..99fab54ae9c0 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5119,6 +5119,16 @@ union bpf_attr { * 0 on success. * **-EINVAL** for invalid input * **-EOPNOTSUPP** for unsupported protocol + * + * long bpf_ima_file_hash(struct file *file, void *dst, u32 size) + * Description + * Returns a calculated IMA hash of the *file*. + * If the hash is larger than *size*, then only *size* + * bytes will be copied to *dst* + * Return + * The **hash_algo** is returned on success, + * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if + * invalid arguments are passed. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,6 +5324,7 @@ union bpf_attr { FN(xdp_store_bytes), \ FN(copy_from_user_task), \ FN(skb_set_tstamp), \ + FN(ima_file_hash), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper From 2746de3c53d64436a5a565e87d74b65d82ab6ac7 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:13:59 +0100 Subject: [PATCH 047/137] selftests/bpf: Move sample generation code to ima_test_common() Move sample generator code to ima_test_common() so that the new function can be called by multiple LSM hooks. Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220302111404.193900-5-roberto.sassu@huawei.com --- tools/testing/selftests/bpf/progs/ima.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/ima.c b/tools/testing/selftests/bpf/progs/ima.c index 96060ff4ffc6..b5a0de50d1b4 100644 --- a/tools/testing/selftests/bpf/progs/ima.c +++ b/tools/testing/selftests/bpf/progs/ima.c @@ -18,8 +18,7 @@ struct { char _license[] SEC("license") = "GPL"; -SEC("lsm.s/bprm_committed_creds") -void BPF_PROG(ima, struct linux_binprm *bprm) +static void ima_test_common(struct file *file) { u64 ima_hash = 0; u64 *sample; @@ -28,7 +27,7 @@ void BPF_PROG(ima, struct linux_binprm *bprm) pid = bpf_get_current_pid_tgid() >> 32; if (pid == monitored_pid) { - ret = bpf_ima_inode_hash(bprm->file->f_inode, &ima_hash, + ret = bpf_ima_inode_hash(file->f_inode, &ima_hash, sizeof(ima_hash)); if (ret < 0 || ima_hash == 0) return; @@ -43,3 +42,9 @@ void BPF_PROG(ima, struct linux_binprm *bprm) return; } + +SEC("lsm.s/bprm_committed_creds") +void BPF_PROG(bprm_committed_creds, struct linux_binprm *bprm) +{ + ima_test_common(bprm->file); +} From 27a77d0d460cdeec57fda2bb6c4f8820ab6e8b38 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:14:00 +0100 Subject: [PATCH 048/137] selftests/bpf: Add test for bpf_ima_file_hash() Add new test to ensure that bpf_ima_file_hash() returns the digest of the executed files. Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220302111404.193900-6-roberto.sassu@huawei.com --- .../selftests/bpf/prog_tests/test_ima.c | 43 +++++++++++++++++-- tools/testing/selftests/bpf/progs/ima.c | 10 ++++- 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c index 97d8a6f84f4a..acc911ba63fd 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -13,6 +13,8 @@ #include "ima.skel.h" +#define MAX_SAMPLES 2 + static int run_measured_process(const char *measured_dir, u32 *monitored_pid) { int child_pid, child_status; @@ -32,14 +34,25 @@ static int run_measured_process(const char *measured_dir, u32 *monitored_pid) return -EINVAL; } -static u64 ima_hash_from_bpf; +static u64 ima_hash_from_bpf[MAX_SAMPLES]; +static int ima_hash_from_bpf_idx; static int process_sample(void *ctx, void *data, size_t len) { - ima_hash_from_bpf = *((u64 *)data); + if (ima_hash_from_bpf_idx >= MAX_SAMPLES) + return -ENOSPC; + + ima_hash_from_bpf[ima_hash_from_bpf_idx++] = *((u64 *)data); return 0; } +static void test_init(struct ima__bss *bss) +{ + ima_hash_from_bpf_idx = 0; + + bss->use_ima_file_hash = false; +} + void test_test_ima(void) { char measured_dir_template[] = "/tmp/ima_measuredXXXXXX"; @@ -72,13 +85,35 @@ void test_test_ima(void) if (CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno)) goto close_clean; + /* + * Test #1 + * - Goal: obtain a sample with the bpf_ima_inode_hash() helper + * - Expected result: 1 sample (/bin/true) + */ + test_init(skel->bss); err = run_measured_process(measured_dir, &skel->bss->monitored_pid); - if (CHECK(err, "run_measured_process", "err = %d\n", err)) + if (CHECK(err, "run_measured_process #1", "err = %d\n", err)) goto close_clean; err = ring_buffer__consume(ringbuf); ASSERT_EQ(err, 1, "num_samples_or_err"); - ASSERT_NEQ(ima_hash_from_bpf, 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + + /* + * Test #2 + * - Goal: obtain samples with the bpf_ima_file_hash() helper + * - Expected result: 2 samples (./ima_setup.sh, /bin/true) + */ + test_init(skel->bss); + skel->bss->use_ima_file_hash = true; + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #2", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 2, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); close_clean: snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir); diff --git a/tools/testing/selftests/bpf/progs/ima.c b/tools/testing/selftests/bpf/progs/ima.c index b5a0de50d1b4..e0b073dcfb5d 100644 --- a/tools/testing/selftests/bpf/progs/ima.c +++ b/tools/testing/selftests/bpf/progs/ima.c @@ -18,6 +18,8 @@ struct { char _license[] SEC("license") = "GPL"; +bool use_ima_file_hash; + static void ima_test_common(struct file *file) { u64 ima_hash = 0; @@ -27,8 +29,12 @@ static void ima_test_common(struct file *file) pid = bpf_get_current_pid_tgid() >> 32; if (pid == monitored_pid) { - ret = bpf_ima_inode_hash(file->f_inode, &ima_hash, - sizeof(ima_hash)); + if (!use_ima_file_hash) + ret = bpf_ima_inode_hash(file->f_inode, &ima_hash, + sizeof(ima_hash)); + else + ret = bpf_ima_file_hash(file, &ima_hash, + sizeof(ima_hash)); if (ret < 0 || ima_hash == 0) return; From 91e8fa254dbd0890c34286acdc12e96412305840 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:14:01 +0100 Subject: [PATCH 049/137] selftests/bpf: Check if the digest is refreshed after a file write Verify that bpf_ima_inode_hash() returns a non-fresh digest after a file write, and that bpf_ima_file_hash() returns a fresh digest. Verification is done by requesting the digest from the bprm_creds_for_exec hook, called before ima_bprm_check(). Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220302111404.193900-7-roberto.sassu@huawei.com --- tools/testing/selftests/bpf/ima_setup.sh | 24 ++++++- .../selftests/bpf/prog_tests/test_ima.c | 72 ++++++++++++++++++- tools/testing/selftests/bpf/progs/ima.c | 11 +++ 3 files changed, 103 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/ima_setup.sh b/tools/testing/selftests/bpf/ima_setup.sh index 8e62581113a3..a3de1cd43ba0 100755 --- a/tools/testing/selftests/bpf/ima_setup.sh +++ b/tools/testing/selftests/bpf/ima_setup.sh @@ -12,7 +12,7 @@ LOG_FILE="$(mktemp /tmp/ima_setup.XXXX.log)" usage() { - echo "Usage: $0 " + echo "Usage: $0 " exit 1 } @@ -77,6 +77,24 @@ run() exec "${copied_bin_path}" } +modify_bin() +{ + local tmp_dir="$1" + local mount_dir="${tmp_dir}/mnt" + local copied_bin_path="${mount_dir}/$(basename ${TEST_BINARY})" + + echo "mod" >> "${copied_bin_path}" +} + +restore_bin() +{ + local tmp_dir="$1" + local mount_dir="${tmp_dir}/mnt" + local copied_bin_path="${mount_dir}/$(basename ${TEST_BINARY})" + + truncate -s -4 "${copied_bin_path}" +} + catch() { local exit_code="$1" @@ -105,6 +123,10 @@ main() cleanup "${tmp_dir}" elif [[ "${action}" == "run" ]]; then run "${tmp_dir}" + elif [[ "${action}" == "modify-bin" ]]; then + modify_bin "${tmp_dir}" + elif [[ "${action}" == "restore-bin" ]]; then + restore_bin "${tmp_dir}" else echo "Unknown action: ${action}" exit 1 diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c index acc911ba63fd..a0cfba0b4273 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -13,16 +13,17 @@ #include "ima.skel.h" -#define MAX_SAMPLES 2 +#define MAX_SAMPLES 4 -static int run_measured_process(const char *measured_dir, u32 *monitored_pid) +static int _run_measured_process(const char *measured_dir, u32 *monitored_pid, + const char *cmd) { int child_pid, child_status; child_pid = fork(); if (child_pid == 0) { *monitored_pid = getpid(); - execlp("./ima_setup.sh", "./ima_setup.sh", "run", measured_dir, + execlp("./ima_setup.sh", "./ima_setup.sh", cmd, measured_dir, NULL); exit(errno); @@ -34,6 +35,11 @@ static int run_measured_process(const char *measured_dir, u32 *monitored_pid) return -EINVAL; } +static int run_measured_process(const char *measured_dir, u32 *monitored_pid) +{ + return _run_measured_process(measured_dir, monitored_pid, "run"); +} + static u64 ima_hash_from_bpf[MAX_SAMPLES]; static int ima_hash_from_bpf_idx; @@ -51,6 +57,7 @@ static void test_init(struct ima__bss *bss) ima_hash_from_bpf_idx = 0; bss->use_ima_file_hash = false; + bss->enable_bprm_creds_for_exec = false; } void test_test_ima(void) @@ -58,6 +65,7 @@ void test_test_ima(void) char measured_dir_template[] = "/tmp/ima_measuredXXXXXX"; struct ring_buffer *ringbuf = NULL; const char *measured_dir; + u64 bin_true_sample; char cmd[256]; int err, duration = 0; @@ -114,6 +122,64 @@ void test_test_ima(void) ASSERT_EQ(err, 2, "num_samples_or_err"); ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + bin_true_sample = ima_hash_from_bpf[1]; + + /* + * Test #3 + * - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest + * - Expected result: 2 samples (/bin/true: non-fresh, fresh) + */ + test_init(skel->bss); + + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "modify-bin"); + if (CHECK(err, "modify-bin #3", "err = %d\n", err)) + goto close_clean; + + skel->bss->enable_bprm_creds_for_exec = true; + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #3", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 2, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample, "sample_equal_or_err"); + /* IMA refreshed the digest. */ + ASSERT_NEQ(ima_hash_from_bpf[1], bin_true_sample, + "sample_different_or_err"); + + /* + * Test #4 + * - Goal: verify that bpf_ima_file_hash() returns a fresh digest + * - Expected result: 4 samples (./ima_setup.sh: fresh, fresh; + * /bin/true: fresh, fresh) + */ + test_init(skel->bss); + skel->bss->use_ima_file_hash = true; + skel->bss->enable_bprm_creds_for_exec = true; + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #4", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 4, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[2], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[3], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[2], bin_true_sample, + "sample_different_or_err"); + ASSERT_EQ(ima_hash_from_bpf[3], ima_hash_from_bpf[2], + "sample_equal_or_err"); + + skel->bss->use_ima_file_hash = false; + skel->bss->enable_bprm_creds_for_exec = false; + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "restore-bin"); + if (CHECK(err, "restore-bin #3", "err = %d\n", err)) + goto close_clean; close_clean: snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir); diff --git a/tools/testing/selftests/bpf/progs/ima.c b/tools/testing/selftests/bpf/progs/ima.c index e0b073dcfb5d..9633e5f2453d 100644 --- a/tools/testing/selftests/bpf/progs/ima.c +++ b/tools/testing/selftests/bpf/progs/ima.c @@ -19,6 +19,7 @@ struct { char _license[] SEC("license") = "GPL"; bool use_ima_file_hash; +bool enable_bprm_creds_for_exec; static void ima_test_common(struct file *file) { @@ -54,3 +55,13 @@ void BPF_PROG(bprm_committed_creds, struct linux_binprm *bprm) { ima_test_common(bprm->file); } + +SEC("lsm.s/bprm_creds_for_exec") +int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm) +{ + if (!enable_bprm_creds_for_exec) + return 0; + + ima_test_common(bprm->file); + return 0; +} From df6b3039fa112e17555776213cab7f07c0a8d98d Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:14:02 +0100 Subject: [PATCH 050/137] bpf-lsm: Make bpf_lsm_kernel_read_file() as sleepable Make bpf_lsm_kernel_read_file() as sleepable, so that bpf_ima_inode_hash() or bpf_ima_file_hash() can be called inside the implementation of this hook. Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220302111404.193900-8-roberto.sassu@huawei.com --- kernel/bpf/bpf_lsm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index e8d27af5bbcc..064eccba641d 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -187,6 +187,7 @@ BTF_ID(func, bpf_lsm_inode_setxattr) BTF_ID(func, bpf_lsm_inode_symlink) BTF_ID(func, bpf_lsm_inode_unlink) BTF_ID(func, bpf_lsm_kernel_module_request) +BTF_ID(func, bpf_lsm_kernel_read_file) BTF_ID(func, bpf_lsm_kernfs_init_security) #ifdef CONFIG_KEYS From e6dcf7bbf37c9ae72b0bc3a09d5f91dd1f5c19e1 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:14:03 +0100 Subject: [PATCH 051/137] selftests/bpf: Add test for bpf_lsm_kernel_read_file() Test the ability of bpf_lsm_kernel_read_file() to call the sleepable functions bpf_ima_inode_hash() or bpf_ima_file_hash() to obtain a measurement of a loaded IMA policy. Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220302111404.193900-9-roberto.sassu@huawei.com --- tools/testing/selftests/bpf/ima_setup.sh | 13 ++++++++++++- .../selftests/bpf/prog_tests/test_ima.c | 19 +++++++++++++++++++ tools/testing/selftests/bpf/progs/ima.c | 18 ++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/ima_setup.sh b/tools/testing/selftests/bpf/ima_setup.sh index a3de1cd43ba0..8ecead4ccad0 100755 --- a/tools/testing/selftests/bpf/ima_setup.sh +++ b/tools/testing/selftests/bpf/ima_setup.sh @@ -12,7 +12,7 @@ LOG_FILE="$(mktemp /tmp/ima_setup.XXXX.log)" usage() { - echo "Usage: $0 " + echo "Usage: $0 " exit 1 } @@ -51,6 +51,7 @@ setup() ensure_mount_securityfs echo "measure func=BPRM_CHECK fsuuid=${mount_uuid}" > ${IMA_POLICY_FILE} + echo "measure func=BPRM_CHECK fsuuid=${mount_uuid}" > ${mount_dir}/policy_test } cleanup() { @@ -95,6 +96,14 @@ restore_bin() truncate -s -4 "${copied_bin_path}" } +load_policy() +{ + local tmp_dir="$1" + local mount_dir="${tmp_dir}/mnt" + + echo ${mount_dir}/policy_test > ${IMA_POLICY_FILE} 2> /dev/null +} + catch() { local exit_code="$1" @@ -127,6 +136,8 @@ main() modify_bin "${tmp_dir}" elif [[ "${action}" == "restore-bin" ]]; then restore_bin "${tmp_dir}" + elif [[ "${action}" == "load-policy" ]]; then + load_policy "${tmp_dir}" else echo "Unknown action: ${action}" exit 1 diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c index a0cfba0b4273..b13a141c4220 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -58,6 +58,7 @@ static void test_init(struct ima__bss *bss) bss->use_ima_file_hash = false; bss->enable_bprm_creds_for_exec = false; + bss->enable_kernel_read_file = false; } void test_test_ima(void) @@ -181,6 +182,24 @@ void test_test_ima(void) if (CHECK(err, "restore-bin #3", "err = %d\n", err)) goto close_clean; + /* + * Test #5 + * - Goal: obtain a sample from the kernel_read_file hook + * - Expected result: 2 samples (./ima_setup.sh, policy_test) + */ + test_init(skel->bss); + skel->bss->use_ima_file_hash = true; + skel->bss->enable_kernel_read_file = true; + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "load-policy"); + if (CHECK(err, "run_measured_process #5", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 2, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + close_clean: snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir); err = system(cmd); diff --git a/tools/testing/selftests/bpf/progs/ima.c b/tools/testing/selftests/bpf/progs/ima.c index 9633e5f2453d..e3ce943c5c3d 100644 --- a/tools/testing/selftests/bpf/progs/ima.c +++ b/tools/testing/selftests/bpf/progs/ima.c @@ -20,6 +20,7 @@ char _license[] SEC("license") = "GPL"; bool use_ima_file_hash; bool enable_bprm_creds_for_exec; +bool enable_kernel_read_file; static void ima_test_common(struct file *file) { @@ -65,3 +66,20 @@ int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm) ima_test_common(bprm->file); return 0; } + +SEC("lsm.s/kernel_read_file") +int BPF_PROG(kernel_read_file, struct file *file, enum kernel_read_file_id id, + bool contents) +{ + if (!enable_kernel_read_file) + return 0; + + if (!contents) + return 0; + + if (id != READING_POLICY) + return 0; + + ima_test_common(file); + return 0; +} From 7bae42b68d7f070a346fde4c7c1ce182f2284933 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:14:04 +0100 Subject: [PATCH 052/137] selftests/bpf: Check that bpf_kernel_read_file() denies reading IMA policy Check that bpf_kernel_read_file() denies the reading of an IMA policy, by ensuring that ima_setup.sh exits with an error. Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220302111404.193900-10-roberto.sassu@huawei.com --- .../selftests/bpf/prog_tests/test_ima.c | 17 +++++++++++++++++ tools/testing/selftests/bpf/progs/ima.c | 18 ++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c index b13a141c4220..b13feceb38f1 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -59,6 +59,7 @@ static void test_init(struct ima__bss *bss) bss->use_ima_file_hash = false; bss->enable_bprm_creds_for_exec = false; bss->enable_kernel_read_file = false; + bss->test_deny = false; } void test_test_ima(void) @@ -200,6 +201,22 @@ void test_test_ima(void) ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + /* + * Test #6 + * - Goal: ensure that the kernel_read_file hook denies an operation + * - Expected result: 0 samples + */ + test_init(skel->bss); + skel->bss->enable_kernel_read_file = true; + skel->bss->test_deny = true; + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "load-policy"); + if (CHECK(!err, "run_measured_process #6", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 0, "num_samples_or_err"); + close_clean: snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir); err = system(cmd); diff --git a/tools/testing/selftests/bpf/progs/ima.c b/tools/testing/selftests/bpf/progs/ima.c index e3ce943c5c3d..e16a2c208481 100644 --- a/tools/testing/selftests/bpf/progs/ima.c +++ b/tools/testing/selftests/bpf/progs/ima.c @@ -21,6 +21,7 @@ char _license[] SEC("license") = "GPL"; bool use_ima_file_hash; bool enable_bprm_creds_for_exec; bool enable_kernel_read_file; +bool test_deny; static void ima_test_common(struct file *file) { @@ -51,6 +52,17 @@ static void ima_test_common(struct file *file) return; } +static int ima_test_deny(void) +{ + u32 pid; + + pid = bpf_get_current_pid_tgid() >> 32; + if (pid == monitored_pid && test_deny) + return -EPERM; + + return 0; +} + SEC("lsm.s/bprm_committed_creds") void BPF_PROG(bprm_committed_creds, struct linux_binprm *bprm) { @@ -71,6 +83,8 @@ SEC("lsm.s/kernel_read_file") int BPF_PROG(kernel_read_file, struct file *file, enum kernel_read_file_id id, bool contents) { + int ret; + if (!enable_kernel_read_file) return 0; @@ -80,6 +94,10 @@ int BPF_PROG(kernel_read_file, struct file *file, enum kernel_read_file_id id, if (id != READING_POLICY) return 0; + ret = ima_test_deny(); + if (ret < 0) + return ret; + ima_test_common(file); return 0; } From 6789ab9668d920d7be636cb994d85be9a816f9d5 Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Thu, 10 Mar 2022 13:16:55 -0800 Subject: [PATCH 053/137] compiler_types: Refactor the use of btf_type_tag attribute. Previous patches have introduced the compiler attribute btf_type_tag for __user and __percpu. The availability of this attribute depends on some CONFIGs and compiler support. This patch refactors the use of btf_type_tag by introducing BTF_TYPE_TAG, which hides all the dependencies. No functional change. Suggested-by: Andrii Nakryiko Signed-off-by: Hao Luo Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220310211655.3173786-1-haoluo@google.com --- include/linux/compiler_types.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index b9a8ae9440c7..1bc760ba400c 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -4,6 +4,13 @@ #ifndef __ASSEMBLY__ +#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ + __has_attribute(btf_type_tag) +# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value))) +#else +# define BTF_TYPE_TAG(value) /* nothing */ +#endif + #ifdef __CHECKER__ /* address spaces */ # define __kernel __attribute__((address_space(0))) @@ -31,19 +38,11 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __kernel # ifdef STRUCTLEAK_PLUGIN # define __user __attribute__((user)) -# elif defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ - __has_attribute(btf_type_tag) -# define __user __attribute__((btf_type_tag("user"))) # else -# define __user +# define __user BTF_TYPE_TAG(user) # endif # define __iomem -# if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ - __has_attribute(btf_type_tag) -# define __percpu __attribute__((btf_type_tag("percpu"))) -# else -# define __percpu -# endif +# define __percpu BTF_TYPE_TAG(percpu) # define __rcu # define __chk_user_ptr(x) (void)0 # define __chk_io_ptr(x) (void)0 From b6f1f780b3932ae497ed85e79bc8a1e513883624 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Thu, 10 Mar 2022 23:56:20 +0100 Subject: [PATCH 054/137] bpf, test_run: Fix packet size check for live packet mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The live packet mode uses some extra space at the start of each page to cache data structures so they don't have to be rebuilt at every repetition. This space wasn't correctly accounted for in the size checking of the arguments supplied to userspace. In addition, the definition of the frame size should include the size of the skb_shared_info (as there is other logic that subtracts the size of this). Together, these mistakes resulted in userspace being able to trip the XDP_WARN() in xdp_update_frame_from_buff(), which syzbot discovered in short order. Fix this by changing the frame size define and adding the extra headroom to the bpf_prog_test_run_xdp() function. Also drop the max_len parameter to the page_pool init, since this is related to DMA which is not used for the page pool instance in PROG_TEST_RUN. Fixes: b530e9e1063e ("bpf: Add "live packet" mode for XDP in BPF_PROG_RUN") Reported-by: syzbot+0e91362d99386dc5de99@syzkaller.appspotmail.com Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220310225621.53374-1-toke@redhat.com --- net/bpf/test_run.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 24405a280a9b..e7b9c2636d10 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -112,8 +112,7 @@ struct xdp_test_data { u32 frame_cnt; }; -#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head) \ - - sizeof(struct skb_shared_info)) +#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) #define TEST_XDP_MAX_BATCH 256 static void xdp_test_run_init_page(struct page *page, void *arg) @@ -156,7 +155,6 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c .flags = 0, .pool_size = xdp->batch_size, .nid = NUMA_NO_NODE, - .max_len = TEST_XDP_FRAME_SIZE, .init_callback = xdp_test_run_init_page, .init_arg = xdp, }; @@ -1230,6 +1228,8 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, batch_size = NAPI_POLL_WEIGHT; else if (batch_size > TEST_XDP_MAX_BATCH) return -E2BIG; + + headroom += sizeof(struct xdp_page_head); } else if (batch_size) { return -EINVAL; } From c09df4bd3a915079eec5e77160366225a28699a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Thu, 10 Mar 2022 23:56:21 +0100 Subject: [PATCH 055/137] selftests/bpf: Add a test for maximum packet size in xdp_do_redirect MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds an extra test to the xdp_do_redirect selftest for XDP live packet mode, which verifies that the maximum permissible packet size is accepted without any errors, and that a too big packet is correctly rejected. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220310225621.53374-2-toke@redhat.com --- .../bpf/prog_tests/xdp_do_redirect.c | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index 9926b07e38c8..a50971c6cf4a 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -62,6 +62,28 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd) return 0; } +/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) - + * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes + */ +#define MAX_PKT_SIZE 3368 +static void test_max_pkt_size(int fd) +{ + char data[MAX_PKT_SIZE + 1] = {}; + int err; + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &data, + .data_size_in = MAX_PKT_SIZE, + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, + .repeat = 1, + ); + err = bpf_prog_test_run_opts(fd, &opts); + ASSERT_OK(err, "prog_run_max_size"); + + opts.data_size_in += 1; + err = bpf_prog_test_run_opts(fd, &opts); + ASSERT_EQ(err, -EINVAL, "prog_run_too_big"); +} + #define NUM_PKTS 10000 void test_xdp_do_redirect(void) { @@ -167,6 +189,8 @@ void test_xdp_do_redirect(void) ASSERT_EQ(skel->bss->pkts_seen_zero, 2, "pkt_count_zero"); ASSERT_EQ(skel->bss->pkts_seen_tc, NUM_PKTS - 2, "pkt_count_tc"); + test_max_pkt_size(bpf_program__fd(skel->progs.xdp_count_pkts)); + out_tc: bpf_tc_hook_destroy(&tc_hook); out: From d3b351f65bf42ccda1f686de3ccb21ea1a0c4f5a Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 10 Mar 2022 16:37:21 -0800 Subject: [PATCH 056/137] selftests/bpf: Fix a clang compilation error for send_signal.c Building selftests/bpf with latest clang compiler (clang15 built from source), I hit the following compilation error: /.../prog_tests/send_signal.c:43:16: error: variable 'j' set but not used [-Werror,-Wunused-but-set-variable] volatile int j = 0; ^ 1 error generated. The problem also exists with clang13 and clang14. clang12 is okay. In send_signal.c, we have the following code ... volatile int j = 0; [...] for (int i = 0; i < 100000000 && !sigusr1_received; i++) j /= i + 1; ... to burn CPU cycles so bpf_send_signal() helper can be tested in NMI mode. Slightly changing 'j /= i + 1' to 'j /= i + j + 1' or 'j++' can fix the problem. Further investigation indicated this should be a clang bug ([1]). The upstream fix will be proposed later. But it is a good idea to workaround the issue to unblock people who build kernel/selftests with clang. [1] https://discourse.llvm.org/t/strange-clang-unused-but-set-variable-error-with-volatile-variables/60841 Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220311003721.2177170-1-yhs@fb.com --- tools/testing/selftests/bpf/prog_tests/send_signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index def50f1c5c31..d71226e34c34 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -65,7 +65,7 @@ static void test_send_signal_common(struct perf_event_attr *attr, /* wait a little for signal handler */ for (int i = 0; i < 100000000 && !sigusr1_received; i++) - j /= i + 1; + j /= i + j + 1; buf[0] = sigusr1_received ? '2' : '0'; ASSERT_EQ(sigusr1_received, 1, "sigusr1_received"); From 938d3480b92fa5e454b7734294f12a7b75126f09 Mon Sep 17 00:00:00 2001 From: Wang Yufen Date: Fri, 4 Mar 2022 16:11:42 +0800 Subject: [PATCH 057/137] bpf, sockmap: Fix memleak in sk_psock_queue_msg If tcp_bpf_sendmsg is running during a tear down operation we may enqueue data on the ingress msg queue while tear down is trying to free it. sk1 (redirect sk2) sk2 ------------------- --------------- tcp_bpf_sendmsg() tcp_bpf_send_verdict() tcp_bpf_sendmsg_redir() bpf_tcp_ingress() sock_map_close() lock_sock() lock_sock() ... blocking sk_psock_stop sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); release_sock(sk); lock_sock() sk_mem_charge() get_page() sk_psock_queue_msg() sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED); drop_sk_msg() release_sock() While drop_sk_msg(), the msg has charged memory form sk by sk_mem_charge and has sg pages need to put. To fix we use sk_msg_free() and then kfee() msg. This issue can cause the following info: WARNING: CPU: 0 PID: 9202 at net/core/stream.c:205 sk_stream_kill_queues+0xc8/0xe0 Call Trace: inet_csk_destroy_sock+0x55/0x110 tcp_rcv_state_process+0xe5f/0xe90 ? sk_filter_trim_cap+0x10d/0x230 ? tcp_v4_do_rcv+0x161/0x250 tcp_v4_do_rcv+0x161/0x250 tcp_v4_rcv+0xc3a/0xce0 ip_protocol_deliver_rcu+0x3d/0x230 ip_local_deliver_finish+0x54/0x60 ip_local_deliver+0xfd/0x110 ? ip_protocol_deliver_rcu+0x230/0x230 ip_rcv+0xd6/0x100 ? ip_local_deliver+0x110/0x110 __netif_receive_skb_one_core+0x85/0xa0 process_backlog+0xa4/0x160 __napi_poll+0x29/0x1b0 net_rx_action+0x287/0x300 __do_softirq+0xff/0x2fc do_softirq+0x79/0x90 WARNING: CPU: 0 PID: 531 at net/ipv4/af_inet.c:154 inet_sock_destruct+0x175/0x1b0 Call Trace: __sk_destruct+0x24/0x1f0 sk_psock_destroy+0x19b/0x1c0 process_one_work+0x1b3/0x3c0 ? process_one_work+0x3c0/0x3c0 worker_thread+0x30/0x350 ? process_one_work+0x3c0/0x3c0 kthread+0xe6/0x110 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x22/0x30 Fixes: 9635720b7c88 ("bpf, sockmap: Fix memleak on ingress msg enqueue") Signed-off-by: Wang Yufen Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220304081145.2037182-2-wangyufen@huawei.com --- include/linux/skmsg.h | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index fdb5375f0562..c5a2d6f50f25 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -304,21 +304,16 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } -static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg) -{ - if (msg->skb) - sock_drop(psock->sk, msg->skb); - kfree(msg); -} - static inline void sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) list_add_tail(&msg->list, &psock->ingress_msg); - else - drop_sk_msg(psock, msg); + else { + sk_msg_free(psock->sk, msg); + kfree(msg); + } spin_unlock_bh(&psock->ingress_lock); } From 9c34e38c4a870eb30b13f42f5b44f42e9d19ccb8 Mon Sep 17 00:00:00 2001 From: Wang Yufen Date: Fri, 4 Mar 2022 16:11:43 +0800 Subject: [PATCH 058/137] bpf, sockmap: Fix memleak in tcp_bpf_sendmsg while sk msg is full If tcp_bpf_sendmsg() is running while sk msg is full. When sk_msg_alloc() returns -ENOMEM error, tcp_bpf_sendmsg() goes to wait_for_memory. If partial memory has been alloced by sk_msg_alloc(), that is, msg_tx->sg.size is greater than osize after sk_msg_alloc(), memleak occurs. To fix we use sk_msg_trim() to release the allocated memory, then goto wait for memory. Other call paths of sk_msg_alloc() have the similar issue, such as tls_sw_sendmsg(), so handle sk_msg_trim logic inside sk_msg_alloc(), as Cong Wang suggested. This issue can cause the following info: WARNING: CPU: 3 PID: 7950 at net/core/stream.c:208 sk_stream_kill_queues+0xd4/0x1a0 Call Trace: inet_csk_destroy_sock+0x55/0x110 __tcp_close+0x279/0x470 tcp_close+0x1f/0x60 inet_release+0x3f/0x80 __sock_release+0x3d/0xb0 sock_close+0x11/0x20 __fput+0x92/0x250 task_work_run+0x6a/0xa0 do_exit+0x33b/0xb60 do_group_exit+0x2f/0xa0 get_signal+0xb6/0x950 arch_do_signal_or_restart+0xac/0x2a0 exit_to_user_mode_prepare+0xa9/0x200 syscall_exit_to_user_mode+0x12/0x30 do_syscall_64+0x46/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xae WARNING: CPU: 3 PID: 2094 at net/ipv4/af_inet.c:155 inet_sock_destruct+0x13c/0x260 Call Trace: __sk_destruct+0x24/0x1f0 sk_psock_destroy+0x19b/0x1c0 process_one_work+0x1b3/0x3c0 kthread+0xe6/0x110 ret_from_fork+0x22/0x30 Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface") Signed-off-by: Wang Yufen Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220304081145.2037182-3-wangyufen@huawei.com --- net/core/skmsg.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 929a2b096b04..cc381165ea08 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -27,6 +27,7 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, int elem_first_coalesce) { struct page_frag *pfrag = sk_page_frag(sk); + u32 osize = msg->sg.size; int ret = 0; len -= msg->sg.size; @@ -35,13 +36,17 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, u32 orig_offset; int use, i; - if (!sk_page_frag_refill(sk, pfrag)) - return -ENOMEM; + if (!sk_page_frag_refill(sk, pfrag)) { + ret = -ENOMEM; + goto msg_trim; + } orig_offset = pfrag->offset; use = min_t(int, len, pfrag->size - orig_offset); - if (!sk_wmem_schedule(sk, use)) - return -ENOMEM; + if (!sk_wmem_schedule(sk, use)) { + ret = -ENOMEM; + goto msg_trim; + } i = msg->sg.end; sk_msg_iter_var_prev(i); @@ -71,6 +76,10 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, } return ret; + +msg_trim: + sk_msg_trim(sk, msg, osize); + return ret; } EXPORT_SYMBOL_GPL(sk_msg_alloc); From 84472b436e760ba439e1969a9e3c5ae7c86de39d Mon Sep 17 00:00:00 2001 From: Wang Yufen Date: Fri, 4 Mar 2022 16:11:44 +0800 Subject: [PATCH 059/137] bpf, sockmap: Fix more uncharged while msg has more_data In tcp_bpf_send_verdict(), if msg has more data after tcp_bpf_sendmsg_redir(): tcp_bpf_send_verdict() tosend = msg->sg.size //msg->sg.size = 22220 case __SK_REDIRECT: sk_msg_return() //uncharged msg->sg.size(22220) sk->sk_forward_alloc tcp_bpf_sendmsg_redir() //after tcp_bpf_sendmsg_redir, msg->sg.size=11000 goto more_data; tosend = msg->sg.size //msg->sg.size = 11000 case __SK_REDIRECT: sk_msg_return() //uncharged msg->sg.size(11000) to sk->sk_forward_alloc The msg->sg.size(11000) has been uncharged twice, to fix we can charge the remaining msg->sg.size before goto more data. This issue can cause the following info: WARNING: CPU: 0 PID: 9860 at net/core/stream.c:208 sk_stream_kill_queues+0xd4/0x1a0 Call Trace: inet_csk_destroy_sock+0x55/0x110 __tcp_close+0x279/0x470 tcp_close+0x1f/0x60 inet_release+0x3f/0x80 __sock_release+0x3d/0xb0 sock_close+0x11/0x20 __fput+0x92/0x250 task_work_run+0x6a/0xa0 do_exit+0x33b/0xb60 do_group_exit+0x2f/0xa0 get_signal+0xb6/0x950 arch_do_signal_or_restart+0xac/0x2a0 ? vfs_write+0x237/0x290 exit_to_user_mode_prepare+0xa9/0x200 syscall_exit_to_user_mode+0x12/0x30 do_syscall_64+0x46/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xae WARNING: CPU: 0 PID: 2136 at net/ipv4/af_inet.c:155 inet_sock_destruct+0x13c/0x260 Call Trace: __sk_destruct+0x24/0x1f0 sk_psock_destroy+0x19b/0x1c0 process_one_work+0x1b3/0x3c0 worker_thread+0x30/0x350 ? process_one_work+0x3c0/0x3c0 kthread+0xe6/0x110 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x22/0x30 Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface") Signed-off-by: Wang Yufen Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220304081145.2037182-4-wangyufen@huawei.com --- net/ipv4/tcp_bpf.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 9b9b02052fd3..304800c60427 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -335,7 +335,7 @@ more_data: cork = true; psock->cork = NULL; } - sk_msg_return(sk, msg, tosend); + sk_msg_return(sk, msg, msg->sg.size); release_sock(sk); ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); @@ -375,8 +375,11 @@ more_data: } if (msg && msg->sg.data[msg->sg.start].page_link && - msg->sg.data[msg->sg.start].length) + msg->sg.data[msg->sg.start].length) { + if (eval == __SK_REDIRECT) + sk_mem_charge(sk, msg->sg.size); goto more_data; + } } return ret; } From 2486ab434b2c2a14e9237296db00b1e1b7ae3273 Mon Sep 17 00:00:00 2001 From: Wang Yufen Date: Fri, 4 Mar 2022 16:11:45 +0800 Subject: [PATCH 060/137] bpf, sockmap: Fix double uncharge the mem of sk_msg If tcp_bpf_sendmsg is running during a tear down operation, psock may be freed. tcp_bpf_sendmsg() tcp_bpf_send_verdict() sk_msg_return() tcp_bpf_sendmsg_redir() unlikely(!psock)) sk_msg_free() The mem of msg has been uncharged in tcp_bpf_send_verdict() by sk_msg_return(), and would be uncharged by sk_msg_free() again. When psock is null, we can simply returning an error code, this would then trigger the sk_msg_free_nocharge in the error path of __SK_REDIRECT and would have the side effect of throwing an error up to user space. This would be a slight change in behavior from user side but would look the same as an error if the redirect on the socket threw an error. This issue can cause the following info: WARNING: CPU: 0 PID: 2136 at net/ipv4/af_inet.c:155 inet_sock_destruct+0x13c/0x260 Call Trace: __sk_destruct+0x24/0x1f0 sk_psock_destroy+0x19b/0x1c0 process_one_work+0x1b3/0x3c0 worker_thread+0x30/0x350 ? process_one_work+0x3c0/0x3c0 kthread+0xe6/0x110 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x22/0x30 Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface") Signed-off-by: Wang Yufen Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220304081145.2037182-5-wangyufen@huawei.com --- net/ipv4/tcp_bpf.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 304800c60427..1cdcb4df0eb7 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -138,10 +138,9 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, struct sk_psock *psock = sk_psock_get(sk); int ret; - if (unlikely(!psock)) { - sk_msg_free(sk, msg); - return 0; - } + if (unlikely(!psock)) + return -EPIPE; + ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : tcp_bpf_push_locked(sk, msg, bytes, flags, false); sk_psock_put(sk, psock); From 8fa42d78f6354bb96ad3a079dcbef528ca9fa9e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Tue, 15 Mar 2022 11:29:48 +0100 Subject: [PATCH 061/137] samples/bpf, xdpsock: Fix race when running for fix duration of time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When running xdpsock for a fix duration of time before terminating using --duration=, there is a race condition that may cause xdpsock to terminate immediately. When running for a fixed duration of time the check to determine when to terminate execution is in is_benchmark_done() and is being executed in the context of the poller thread, if (opt_duration > 0) { unsigned long dt = (get_nsecs() - start_time); if (dt >= opt_duration) benchmark_done = true; } However start_time is only set after the poller thread have been created. This leaves a small window when the poller thread is starting and calls is_benchmark_done() for the first time that start_time is not yet set. In that case start_time have its initial value of 0 and the duration check fails as it do not correlate correctly for the applications start time and immediately sets benchmark_done which in turn terminates the xdpsock application. Fix this by setting start_time before creating the poller thread. Fixes: d3f11b018f6c ("samples/bpf: xdpsock: Add duration option to specify how long to run") Signed-off-by: Niklas Söderlund Signed-off-by: Simon Horman Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220315102948.466436-1-niklas.soderlund@corigine.com --- samples/bpf/xdpsock_user.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 19288a2bbc75..6f3fe30ad283 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -1984,15 +1984,15 @@ int main(int argc, char **argv) setlocale(LC_ALL, ""); + prev_time = get_nsecs(); + start_time = prev_time; + if (!opt_quiet) { ret = pthread_create(&pt, NULL, poller, NULL); if (ret) exit_with_error(ret); } - prev_time = get_nsecs(); - start_time = prev_time; - /* Configure sched priority for better wake-up accuracy */ memset(&schparam, 0, sizeof(schparam)); schparam.sched_priority = opt_schprio; From f98d6dd1e79d4b04c2e13e91a9348473cfa805a6 Mon Sep 17 00:00:00 2001 From: Guo Zhengkui Date: Tue, 15 Mar 2022 21:01:26 +0800 Subject: [PATCH 062/137] selftests/bpf: Clean up array_size.cocci warnings Clean up the array_size.cocci warnings under tools/testing/selftests/bpf/: Use `ARRAY_SIZE(arr)` instead of forms like `sizeof(arr)/sizeof(arr[0])`. tools/testing/selftests/bpf/test_cgroup_storage.c uses ARRAY_SIZE() defined in tools/include/linux/kernel.h (sys/sysinfo.h -> linux/kernel.h), while others use ARRAY_SIZE() in bpf_util.h. Signed-off-by: Guo Zhengkui Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220315130143.2403-1-guozhengkui@vivo.com --- .../selftests/bpf/prog_tests/cgroup_attach_autodetach.c | 2 +- .../testing/selftests/bpf/prog_tests/cgroup_attach_multi.c | 2 +- .../selftests/bpf/prog_tests/cgroup_attach_override.c | 2 +- tools/testing/selftests/bpf/prog_tests/global_data.c | 6 +++--- tools/testing/selftests/bpf/prog_tests/obj_name.c | 2 +- tools/testing/selftests/bpf/test_cgroup_storage.c | 2 +- tools/testing/selftests/bpf/test_lru_map.c | 4 ++-- tools/testing/selftests/bpf/test_sock_addr.c | 6 +++--- tools/testing/selftests/bpf/test_sockmap.c | 4 ++-- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c index 858916d11e2e..9367bd2f0ae1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c @@ -14,7 +14,7 @@ static int prog_load(void) BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */ BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(prog); return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, prog, insns_cnt, "GPL", 0, diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c index 38b3c47293da..db0b7bac78d1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -63,7 +63,7 @@ static int prog_load_cnt(int verdict, int val) BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(prog); int ret; ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c index 356547e849e2..9421a5b7f4e1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c @@ -16,7 +16,7 @@ static int prog_load(int verdict) BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(prog); return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, prog, insns_cnt, "GPL", 0, diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index 6fb3d3155c35..027685858925 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -29,7 +29,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) { "relocate .rodata reference", 10, ~0 }, }; - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + for (i = 0; i < ARRAY_SIZE(tests); i++) { err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); CHECK(err || num != tests[i].num, tests[i].name, "err %d result %llx expected %llx\n", @@ -58,7 +58,7 @@ static void test_global_data_string(struct bpf_object *obj, __u32 duration) { "relocate .bss reference", 4, "\0\0hello" }, }; - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + for (i = 0; i < ARRAY_SIZE(tests); i++) { err = bpf_map_lookup_elem(map_fd, &tests[i].key, str); CHECK(err || memcmp(str, tests[i].str, sizeof(str)), tests[i].name, "err %d result \'%s\' expected \'%s\'\n", @@ -92,7 +92,7 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration) { "relocate .data reference", 3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } }, }; - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + for (i = 0; i < ARRAY_SIZE(tests); i++) { err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val); CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)), tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n", diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c index 6194b776a28b..7093edca6e08 100644 --- a/tools/testing/selftests/bpf/prog_tests/obj_name.c +++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c @@ -20,7 +20,7 @@ void test_obj_name(void) __u32 duration = 0; int i; - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + for (i = 0; i < ARRAY_SIZE(tests); i++) { size_t name_len = strlen(tests[i].name) + 1; union bpf_attr attr; size_t ncopy; diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c index 5b8314cd77fd..d6a1be4d8020 100644 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c @@ -36,7 +36,7 @@ int main(int argc, char **argv) BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(prog); int error = EXIT_FAILURE; int map_fd, percpu_map_fd, prog_fd, cgroup_fd; struct bpf_cgroup_storage_key key; diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 6e6235185a86..563bbe18c172 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -878,11 +878,11 @@ int main(int argc, char **argv) assert(nr_cpus != -1); printf("nr_cpus:%d\n\n", nr_cpus); - for (f = 0; f < sizeof(map_flags) / sizeof(*map_flags); f++) { + for (f = 0; f < ARRAY_SIZE(map_flags); f++) { unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ? PERCPU_FREE_TARGET : LOCAL_FREE_TARGET; - for (t = 0; t < sizeof(map_types) / sizeof(*map_types); t++) { + for (t = 0; t < ARRAY_SIZE(map_types); t++) { test_lru_sanity0(map_types[t], map_flags[f]); test_lru_sanity1(map_types[t], map_flags[f], tgt_free); test_lru_sanity2(map_types[t], map_flags[f], tgt_free); diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index f0c8d05ba6d1..f3d5d7ac6505 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -723,7 +723,7 @@ static int xmsg_ret_only_prog_load(const struct sock_addr_test *test, BPF_MOV64_IMM(BPF_REG_0, rc), BPF_EXIT_INSN(), }; - return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); + return load_insns(test, insns, ARRAY_SIZE(insns)); } static int sendmsg_allow_prog_load(const struct sock_addr_test *test) @@ -795,7 +795,7 @@ static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) BPF_EXIT_INSN(), }; - return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); + return load_insns(test, insns, ARRAY_SIZE(insns)); } static int recvmsg4_rw_c_prog_load(const struct sock_addr_test *test) @@ -858,7 +858,7 @@ static int sendmsg6_rw_dst_asm_prog_load(const struct sock_addr_test *test, BPF_EXIT_INSN(), }; - return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); + return load_insns(test, insns, ARRAY_SIZE(insns)); } static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test) diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 1ba7e7346afb..dfb4f5c0fcb9 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -1786,7 +1786,7 @@ static int populate_progs(char *bpf_file) i++; } - for (i = 0; i < sizeof(map_fd)/sizeof(int); i++) { + for (i = 0; i < ARRAY_SIZE(map_fd); i++) { maps[i] = bpf_object__find_map_by_name(obj, map_names[i]); map_fd[i] = bpf_map__fd(maps[i]); if (map_fd[i] < 0) { @@ -1867,7 +1867,7 @@ static int __test_selftests(int cg_fd, struct sockmap_options *opt) } /* Tests basic commands and APIs */ - for (i = 0; i < sizeof(test)/sizeof(struct _test); i++) { + for (i = 0; i < ARRAY_SIZE(test); i++) { struct _test t = test[i]; if (check_whitelist(&t, opt) != 0) From cbdaf71f7e65a45d6e96378ee7bfe3da39c30908 Mon Sep 17 00:00:00 2001 From: Dmitrii Dolgov <9erthalion6@gmail.com> Date: Wed, 9 Mar 2022 17:31:12 +0100 Subject: [PATCH 063/137] bpftool: Add bpf_cookie to link output Commit 82e6b1eee6a8 ("bpf: Allow to specify user-provided bpf_cookie for BPF perf links") introduced the concept of user specified bpf_cookie, which could be accessed by BPF programs using bpf_get_attach_cookie(). For troubleshooting purposes it is convenient to expose bpf_cookie via bpftool as well, so there is no need to meddle with the target BPF program itself. Implemented using the pid iterator BPF program to actually fetch bpf_cookies, which allows constraining code changes only to bpftool. $ bpftool link 1: type 7 prog 5 bpf_cookie 123 pids bootstrap(81) Signed-off-by: Dmitrii Dolgov <9erthalion6@gmail.com> Signed-off-by: Andrii Nakryiko Acked-by: Yonghong Song Acked-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20220309163112.24141-1-9erthalion6@gmail.com --- tools/bpf/bpftool/main.h | 2 ++ tools/bpf/bpftool/pids.c | 8 ++++++++ tools/bpf/bpftool/skeleton/pid_iter.bpf.c | 22 ++++++++++++++++++++++ tools/bpf/bpftool/skeleton/pid_iter.h | 2 ++ 4 files changed, 34 insertions(+) diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 0468e5b24bd4..6e9277ffc68c 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -113,7 +113,9 @@ struct obj_ref { struct obj_refs { int ref_cnt; + bool has_bpf_cookie; struct obj_ref *refs; + __u64 bpf_cookie; }; struct btf; diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index 7c384d10e95f..bb6c969a114a 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -78,6 +78,8 @@ static void add_ref(struct hashmap *map, struct pid_iter_entry *e) ref->pid = e->pid; memcpy(ref->comm, e->comm, sizeof(ref->comm)); refs->ref_cnt = 1; + refs->has_bpf_cookie = e->has_bpf_cookie; + refs->bpf_cookie = e->bpf_cookie; err = hashmap__append(map, u32_as_hash_field(e->id), refs); if (err) @@ -205,6 +207,9 @@ void emit_obj_refs_json(struct hashmap *map, __u32 id, if (refs->ref_cnt == 0) break; + if (refs->has_bpf_cookie) + jsonw_lluint_field(json_writer, "bpf_cookie", refs->bpf_cookie); + jsonw_name(json_writer, "pids"); jsonw_start_array(json_writer); for (i = 0; i < refs->ref_cnt; i++) { @@ -234,6 +239,9 @@ void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) if (refs->ref_cnt == 0) break; + if (refs->has_bpf_cookie) + printf("\n\tbpf_cookie %llu", (unsigned long long) refs->bpf_cookie); + printf("%s", prefix); for (i = 0; i < refs->ref_cnt; i++) { struct obj_ref *ref = &refs->refs[i]; diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c index f70702fcb224..eb05ea53afb1 100644 --- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c +++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c @@ -38,6 +38,17 @@ static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type) } } +/* could be used only with BPF_LINK_TYPE_PERF_EVENT links */ +static __u64 get_bpf_cookie(struct bpf_link *link) +{ + struct bpf_perf_link *perf_link; + struct perf_event *event; + + perf_link = container_of(link, struct bpf_perf_link, link); + event = BPF_CORE_READ(perf_link, perf_file, private_data); + return BPF_CORE_READ(event, bpf_cookie); +} + SEC("iter/task_file") int iter(struct bpf_iter__task_file *ctx) { @@ -69,8 +80,19 @@ int iter(struct bpf_iter__task_file *ctx) if (file->f_op != fops) return 0; + __builtin_memset(&e, 0, sizeof(e)); e.pid = task->tgid; e.id = get_obj_id(file->private_data, obj_type); + + if (obj_type == BPF_OBJ_LINK) { + struct bpf_link *link = (struct bpf_link *) file->private_data; + + if (BPF_CORE_READ(link, type) == BPF_LINK_TYPE_PERF_EVENT) { + e.has_bpf_cookie = true; + e.bpf_cookie = get_bpf_cookie(link); + } + } + bpf_probe_read_kernel_str(&e.comm, sizeof(e.comm), task->group_leader->comm); bpf_seq_write(ctx->meta->seq, &e, sizeof(e)); diff --git a/tools/bpf/bpftool/skeleton/pid_iter.h b/tools/bpf/bpftool/skeleton/pid_iter.h index 5692cf257adb..bbb570d4cca6 100644 --- a/tools/bpf/bpftool/skeleton/pid_iter.h +++ b/tools/bpf/bpftool/skeleton/pid_iter.h @@ -6,6 +6,8 @@ struct pid_iter_entry { __u32 id; int pid; + __u64 bpf_cookie; + bool has_bpf_cookie; char comm[16]; }; From 6585abea98ae5f750358a6427f2ddf7715393f69 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Sun, 13 Mar 2022 16:19:46 -0700 Subject: [PATCH 064/137] bpftool: man: Add missing top level docs The top-level (bpftool.8) man page was missing docs for a few subcommands and their respective sub-sub-commands. This commit brings the top level man page up to date. Note that I've kept the ordering of the subcommands the same as in `bpftool help`. Signed-off-by: Daniel Xu Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/3049ef5dc509c0d1832f0a8b2dba2ccaad0af688.1647213551.git.dxu@dxuuu.xyz --- tools/bpf/bpftool/Documentation/bpftool.rst | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index 7084dd9fa2f8..6965c94dfdaf 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -20,7 +20,8 @@ SYNOPSIS **bpftool** **version** - *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** } + *OBJECT* := { **map** | **program** | **link** | **cgroup** | **perf** | **net** | **feature** | + **btf** | **gen** | **struct_ops** | **iter** } *OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| } @@ -31,6 +32,8 @@ SYNOPSIS *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** | **load** | **attach** | **detach** | **help** } + *LINK-COMMANDS* := { **show** | **list** | **pin** | **detach** | **help** } + *CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** } *PERF-COMMANDS* := { **show** | **list** | **help** } @@ -39,6 +42,14 @@ SYNOPSIS *FEATURE-COMMANDS* := { **probe** | **help** } + *BTF-COMMANDS* := { **show** | **list** | **dump** | **help** } + + *GEN-COMMANDS* := { **object** | **skeleton** | **min_core_btf** | **help** } + + *STRUCT-OPS-COMMANDS* := { **show** | **list** | **dump** | **register** | **unregister** | **help** } + + *ITER-COMMANDS* := { **pin** | **help** } + DESCRIPTION =========== *bpftool* allows for inspection and simple modification of BPF objects From 663af70aabb7c9b6bd5e1c1cdeb44e7025a4f855 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 16 Mar 2022 10:38:23 -0700 Subject: [PATCH 065/137] bpf: selftests: Add helpers to directly use the capget and capset syscall After upgrading to the newer libcap (>= 2.60), the libcap commit aca076443591 ("Make cap_t operations thread safe.") added a "__u8 mutex;" to the "struct _cap_struct". It caused a few byte shift that breaks the assumption made in the "struct libcap" definition in test_verifier.c. The bpf selftest usage only needs to enable and disable the effective caps of the running task. It is easier to directly syscall the capget and capset instead. It can also remove the libcap library dependency. The cap_helpers.{c,h} is added. One __u64 is used for all CAP_* bits instead of two __u32. Signed-off-by: Martin KaFai Lau Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220316173823.2036955-1-kafai@fb.com --- tools/testing/selftests/bpf/cap_helpers.c | 67 +++++++++++++++++++++++ tools/testing/selftests/bpf/cap_helpers.h | 19 +++++++ 2 files changed, 86 insertions(+) create mode 100644 tools/testing/selftests/bpf/cap_helpers.c create mode 100644 tools/testing/selftests/bpf/cap_helpers.h diff --git a/tools/testing/selftests/bpf/cap_helpers.c b/tools/testing/selftests/bpf/cap_helpers.c new file mode 100644 index 000000000000..d5ac507401d7 --- /dev/null +++ b/tools/testing/selftests/bpf/cap_helpers.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "cap_helpers.h" + +/* Avoid including from the libcap-devel package, + * so directly declare them here and use them from glibc. + */ +int capget(cap_user_header_t header, cap_user_data_t data); +int capset(cap_user_header_t header, const cap_user_data_t data); + +int cap_enable_effective(__u64 caps, __u64 *old_caps) +{ + struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3]; + struct __user_cap_header_struct hdr = { + .version = _LINUX_CAPABILITY_VERSION_3, + }; + __u32 cap0 = caps; + __u32 cap1 = caps >> 32; + int err; + + err = capget(&hdr, data); + if (err) + return err; + + if (old_caps) + *old_caps = (__u64)(data[1].effective) << 32 | data[0].effective; + + if ((data[0].effective & cap0) == cap0 && + (data[1].effective & cap1) == cap1) + return 0; + + data[0].effective |= cap0; + data[1].effective |= cap1; + err = capset(&hdr, data); + if (err) + return err; + + return 0; +} + +int cap_disable_effective(__u64 caps, __u64 *old_caps) +{ + struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3]; + struct __user_cap_header_struct hdr = { + .version = _LINUX_CAPABILITY_VERSION_3, + }; + __u32 cap0 = caps; + __u32 cap1 = caps >> 32; + int err; + + err = capget(&hdr, data); + if (err) + return err; + + if (old_caps) + *old_caps = (__u64)(data[1].effective) << 32 | data[0].effective; + + if (!(data[0].effective & cap0) && !(data[1].effective & cap1)) + return 0; + + data[0].effective &= ~cap0; + data[1].effective &= ~cap1; + err = capset(&hdr, data); + if (err) + return err; + + return 0; +} diff --git a/tools/testing/selftests/bpf/cap_helpers.h b/tools/testing/selftests/bpf/cap_helpers.h new file mode 100644 index 000000000000..6d163530cb0f --- /dev/null +++ b/tools/testing/selftests/bpf/cap_helpers.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __CAP_HELPERS_H +#define __CAP_HELPERS_H + +#include +#include + +#ifndef CAP_PERFMON +#define CAP_PERFMON 38 +#endif + +#ifndef CAP_BPF +#define CAP_BPF 39 +#endif + +int cap_enable_effective(__u64 caps, __u64 *old_caps); +int cap_disable_effective(__u64 caps, __u64 *old_caps); + +#endif From b1c2768a82b9cd149f54e335de38ea1212f47b07 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 16 Mar 2022 10:38:29 -0700 Subject: [PATCH 066/137] bpf: selftests: Remove libcap usage from test_verifier This patch removes the libcap usage from test_verifier. The cap_*_effective() helpers added in the earlier patch are used instead. Signed-off-by: Martin KaFai Lau Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220316173829.2038682-1-kafai@fb.com --- tools/testing/selftests/bpf/Makefile | 3 +- tools/testing/selftests/bpf/test_verifier.c | 84 ++++++--------------- 2 files changed, 25 insertions(+), 62 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index fe12b4f5fe20..1c6e55740019 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -195,6 +195,7 @@ $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ) CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o TESTING_HELPERS := $(OUTPUT)/testing_helpers.o TRACE_HELPERS := $(OUTPUT)/trace_helpers.o +CAP_HELPERS := $(OUTPUT)/cap_helpers.o $(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) @@ -211,7 +212,7 @@ $(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS) $(OUTPUT)/xdping: $(TESTING_HELPERS) $(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS) $(OUTPUT)/test_maps: $(TESTING_HELPERS) -$(OUTPUT)/test_verifier: $(TESTING_HELPERS) +$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS) BPFTOOL ?= $(DEFAULT_BPFTOOL) $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 92e3465fbae8..a2cd236c32eb 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -22,8 +22,6 @@ #include #include -#include - #include #include #include @@ -42,6 +40,7 @@ # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 # endif #endif +#include "cap_helpers.h" #include "bpf_rand.h" #include "bpf_util.h" #include "test_btf.h" @@ -62,6 +61,10 @@ #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) +/* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */ +#define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \ + 1ULL << CAP_PERFMON | \ + 1ULL << CAP_BPF) #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled" static bool unpriv_disabled = false; static int skips; @@ -973,47 +976,19 @@ struct libcap { static int set_admin(bool admin) { - cap_t caps; - /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */ - const cap_value_t cap_net_admin = CAP_NET_ADMIN; - const cap_value_t cap_sys_admin = CAP_SYS_ADMIN; - struct libcap *cap; - int ret = -1; + int err; - caps = cap_get_proc(); - if (!caps) { - perror("cap_get_proc"); - return -1; - } - cap = (struct libcap *)caps; - if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) { - perror("cap_set_flag clear admin"); - goto out; - } - if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin, - admin ? CAP_SET : CAP_CLEAR)) { - perror("cap_set_flag set_or_clear net"); - goto out; - } - /* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON, - * so update effective bits manually - */ if (admin) { - cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32); - cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32); + err = cap_enable_effective(ADMIN_CAPS, NULL); + if (err) + perror("cap_enable_effective(ADMIN_CAPS)"); } else { - cap->data[1].effective &= ~(1 << (38 - 32)); - cap->data[1].effective &= ~(1 << (39 - 32)); + err = cap_disable_effective(ADMIN_CAPS, NULL); + if (err) + perror("cap_disable_effective(ADMIN_CAPS)"); } - if (cap_set_proc(caps)) { - perror("cap_set_proc"); - goto out; - } - ret = 0; -out: - if (cap_free(caps)) - perror("cap_free"); - return ret; + + return err; } static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val, @@ -1291,31 +1266,18 @@ fail_log: static bool is_admin(void) { - cap_flag_value_t net_priv = CAP_CLEAR; - bool perfmon_priv = false; - bool bpf_priv = false; - struct libcap *cap; - cap_t caps; + __u64 caps; -#ifdef CAP_IS_SUPPORTED - if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) { - perror("cap_get_flag"); + /* The test checks for finer cap as CAP_NET_ADMIN, + * CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN. + * Thus, disable CAP_SYS_ADMIN at the beginning. + */ + if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) { + perror("cap_disable_effective(CAP_SYS_ADMIN)"); return false; } -#endif - caps = cap_get_proc(); - if (!caps) { - perror("cap_get_proc"); - return false; - } - cap = (struct libcap *)caps; - bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32)); - perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32)); - if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv)) - perror("cap_get_flag NET"); - if (cap_free(caps)) - perror("cap_free"); - return bpf_priv && perfmon_priv && net_priv == CAP_SET; + + return (caps & ADMIN_CAPS) == ADMIN_CAPS; } static void get_unpriv_disabled() From 82cb2b30773e3ccbbd2ed4427d52a91862d4db6d Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 16 Mar 2022 10:38:35 -0700 Subject: [PATCH 067/137] bpf: selftests: Remove libcap usage from test_progs This patch removes the libcap usage from test_progs. bind_perm.c is the only user. cap_*_effective() helpers added in the earlier patch are directly used instead. No other selftest binary is using libcap, so '-lcap' is also removed from the Makefile. Signed-off-by: Martin KaFai Lau Signed-off-by: Alexei Starovoitov Reviewed-by: Stanislav Fomichev Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220316173835.2039334-1-kafai@fb.com --- tools/testing/selftests/bpf/Makefile | 5 ++- .../selftests/bpf/prog_tests/bind_perm.c | 44 ++++--------------- 2 files changed, 11 insertions(+), 38 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 1c6e55740019..11f5883636c3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -25,7 +25,7 @@ CFLAGS += -g -O0 -rdynamic -Wall -Werror $(GENFLAGS) $(SAN_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) LDFLAGS += $(SAN_CFLAGS) -LDLIBS += -lcap -lelf -lz -lrt -lpthread +LDLIBS += -lelf -lz -lrt -lpthread # Silence some warnings when compiled with clang ifneq ($(LLVM),) @@ -480,7 +480,8 @@ TRUNNER_TESTS_DIR := prog_tests TRUNNER_BPF_PROGS_DIR := progs TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ network_helpers.c testing_helpers.c \ - btf_helpers.c flow_dissector_load.h + btf_helpers.c flow_dissector_load.h \ + cap_helpers.c TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ ima_setup.sh \ $(wildcard progs/btf_dump_test_case_*.c) diff --git a/tools/testing/selftests/bpf/prog_tests/bind_perm.c b/tools/testing/selftests/bpf/prog_tests/bind_perm.c index eac71fbb24ce..a1766a298bb7 100644 --- a/tools/testing/selftests/bpf/prog_tests/bind_perm.c +++ b/tools/testing/selftests/bpf/prog_tests/bind_perm.c @@ -4,9 +4,9 @@ #include #include #include -#include #include "test_progs.h" +#include "cap_helpers.h" #include "bind_perm.skel.h" static int duration; @@ -49,41 +49,11 @@ close_socket: close(fd); } -bool cap_net_bind_service(cap_flag_value_t flag) -{ - const cap_value_t cap_net_bind_service = CAP_NET_BIND_SERVICE; - cap_flag_value_t original_value; - bool was_effective = false; - cap_t caps; - - caps = cap_get_proc(); - if (CHECK(!caps, "cap_get_proc", "errno %d", errno)) - goto free_caps; - - if (CHECK(cap_get_flag(caps, CAP_NET_BIND_SERVICE, CAP_EFFECTIVE, - &original_value), - "cap_get_flag", "errno %d", errno)) - goto free_caps; - - was_effective = (original_value == CAP_SET); - - if (CHECK(cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_bind_service, - flag), - "cap_set_flag", "errno %d", errno)) - goto free_caps; - - if (CHECK(cap_set_proc(caps), "cap_set_proc", "errno %d", errno)) - goto free_caps; - -free_caps: - CHECK(cap_free(caps), "cap_free", "errno %d", errno); - return was_effective; -} - void test_bind_perm(void) { - bool cap_was_effective; + const __u64 net_bind_svc_cap = 1ULL << CAP_NET_BIND_SERVICE; struct bind_perm *skel; + __u64 old_caps = 0; int cgroup_fd; if (create_netns()) @@ -105,7 +75,8 @@ void test_bind_perm(void) if (!ASSERT_OK_PTR(skel, "bind_v6_prog")) goto close_skeleton; - cap_was_effective = cap_net_bind_service(CAP_CLEAR); + ASSERT_OK(cap_disable_effective(net_bind_svc_cap, &old_caps), + "cap_disable_effective"); try_bind(AF_INET, 110, EACCES); try_bind(AF_INET6, 110, EACCES); @@ -113,8 +84,9 @@ void test_bind_perm(void) try_bind(AF_INET, 111, 0); try_bind(AF_INET6, 111, 0); - if (cap_was_effective) - cap_net_bind_service(CAP_SET); + if (old_caps & net_bind_svc_cap) + ASSERT_OK(cap_enable_effective(net_bind_svc_cap, NULL), + "cap_enable_effective"); close_skeleton: bind_perm__destroy(skel); From 73e14451f39e54f83ea3badb6d6b8a423f901845 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 9 Mar 2022 20:33:18 +0800 Subject: [PATCH 068/137] bpf, x86: Fall back to interpreter mode when extra pass fails Extra pass for subprog jit may fail (e.g. due to bpf_jit_harden race), but bpf_func is not cleared for the subprog and jit_subprogs will succeed. The running of the bpf program may lead to oops because the memory for the jited subprog image has already been freed. So fall back to interpreter mode by clearing bpf_func/jited/jited_len when extra pass fails. Signed-off-by: Hou Tao Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220309123321.2400262-2-houtao1@huawei.com --- arch/x86/net/bpf_jit_comp.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index e6ff8f4f9ea4..ec3f00be2ac5 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2335,7 +2335,13 @@ out_image: sizeof(rw_header->size)); bpf_jit_binary_pack_free(header, rw_header); } + /* Fall back to interpreter mode */ prog = orig_prog; + if (extra_pass) { + prog->bpf_func = NULL; + prog->jited = 0; + prog->jited_len = 0; + } goto out_addrs; } if (image) { @@ -2384,8 +2390,9 @@ out_image: * Both cases are serious bugs and justify WARN_ON. */ if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { - prog = orig_prog; - goto out_addrs; + /* header has been freed */ + header = NULL; + goto out_image; } bpf_tail_call_direct_fixup(prog); From d2a3b7c5becc3992f8e7d2b9bf5eacceeedb9a48 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 9 Mar 2022 20:33:20 +0800 Subject: [PATCH 069/137] bpf: Fix net.core.bpf_jit_harden race It is the bpf_jit_harden counterpart to commit 60b58afc96c9 ("bpf: fix net.core.bpf_jit_enable race"). bpf_jit_harden will be tested twice for each subprog if there are subprogs in bpf program and constant blinding may increase the length of program, so when running "./test_progs -t subprogs" and toggling bpf_jit_harden between 0 and 2, jit_subprogs may fail because constant blinding increases the length of subprog instructions during extra passs. So cache the value of bpf_jit_blinding_enabled() during program allocation, and use the cached value during constant blinding, subprog JITing and args tracking of tail call. Signed-off-by: Hou Tao Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220309123321.2400262-4-houtao1@huawei.com --- include/linux/filter.h | 1 + kernel/bpf/core.c | 3 ++- kernel/bpf/verifier.c | 5 +++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 05ed9bd31b45..ed0c0ff42ad5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -566,6 +566,7 @@ struct bpf_prog { gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ + blinding_requested:1, /* needs constant blinding */ blinded:1, /* Was blinded */ is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ab630f773ec1..1324f9523e7c 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -105,6 +105,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag fp->aux = aux; fp->aux->prog = fp; fp->jit_requested = ebpf_jit_enabled(); + fp->blinding_requested = bpf_jit_blinding_enabled(fp); INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); mutex_init(&fp->aux->used_maps_mutex); @@ -1382,7 +1383,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) struct bpf_insn *insn; int i, rewritten; - if (!bpf_jit_blinding_enabled(prog) || prog->blinded) + if (!prog->blinding_requested || prog->blinded) return prog; clone = bpf_prog_clone_create(prog, GFP_USER); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0db6cd8dcb35..cf92f9c01556 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13023,6 +13023,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) func[i]->aux->name[0] = 'F'; func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; + func[i]->blinding_requested = prog->blinding_requested; func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; func[i]->aux->linfo = prog->aux->linfo; @@ -13146,6 +13147,7 @@ out_free: out_undo_insn: /* cleanup main prog to be interpreted */ prog->jit_requested = 0; + prog->blinding_requested = 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (!bpf_pseudo_call(insn)) continue; @@ -13239,7 +13241,6 @@ static int do_misc_fixups(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; enum bpf_attach_type eatype = prog->expected_attach_type; - bool expect_blinding = bpf_jit_blinding_enabled(prog); enum bpf_prog_type prog_type = resolve_prog_type(prog); struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; @@ -13403,7 +13404,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) insn->code = BPF_JMP | BPF_TAIL_CALL; aux = &env->insn_aux_data[i + delta]; - if (env->bpf_capable && !expect_blinding && + if (env->bpf_capable && !prog->blinding_requested && prog->jit_requested && !bpf_map_key_poisoned(aux) && !bpf_map_ptr_poisoned(aux) && From ad13baf4569152b00de11949b8c93aaa83c1243f Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 9 Mar 2022 20:33:21 +0800 Subject: [PATCH 070/137] selftests/bpf: Test subprog jit when toggle bpf_jit_harden repeatedly When bpf_jit_harden is toggled between 0 and 2, subprog jit may fail due to inconsistent twice read values of bpf_jit_harden during jit. So add a test to ensure the problem is fixed. Signed-off-by: Hou Tao Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220309123321.2400262-5-houtao1@huawei.com --- .../selftests/bpf/prog_tests/subprogs.c | 77 ++++++++++++++++--- 1 file changed, 68 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/subprogs.c b/tools/testing/selftests/bpf/prog_tests/subprogs.c index 3f3d2ac4dd57..903f35a9e62e 100644 --- a/tools/testing/selftests/bpf/prog_tests/subprogs.c +++ b/tools/testing/selftests/bpf/prog_tests/subprogs.c @@ -1,32 +1,83 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ #include -#include #include "test_subprogs.skel.h" #include "test_subprogs_unused.skel.h" -static int duration; +struct toggler_ctx { + int fd; + bool stop; +}; -void test_subprogs(void) +static void *toggle_jit_harden(void *arg) +{ + struct toggler_ctx *ctx = arg; + char two = '2'; + char zero = '0'; + + while (!ctx->stop) { + lseek(ctx->fd, SEEK_SET, 0); + write(ctx->fd, &two, sizeof(two)); + lseek(ctx->fd, SEEK_SET, 0); + write(ctx->fd, &zero, sizeof(zero)); + } + + return NULL; +} + +static void test_subprogs_with_jit_harden_toggling(void) +{ + struct toggler_ctx ctx; + pthread_t toggler; + int err; + unsigned int i, loop = 10; + + ctx.fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR); + if (!ASSERT_GE(ctx.fd, 0, "open bpf_jit_harden")) + return; + + ctx.stop = false; + err = pthread_create(&toggler, NULL, toggle_jit_harden, &ctx); + if (!ASSERT_OK(err, "new toggler")) + goto out; + + /* Make toggler thread to run */ + usleep(1); + + for (i = 0; i < loop; i++) { + struct test_subprogs *skel = test_subprogs__open_and_load(); + + if (!ASSERT_OK_PTR(skel, "skel open")) + break; + test_subprogs__destroy(skel); + } + + ctx.stop = true; + pthread_join(toggler, NULL); +out: + close(ctx.fd); +} + +static void test_subprogs_alone(void) { struct test_subprogs *skel; struct test_subprogs_unused *skel2; int err; skel = test_subprogs__open_and_load(); - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + if (!ASSERT_OK_PTR(skel, "skel_open")) return; err = test_subprogs__attach(skel); - if (CHECK(err, "skel_attach", "failed to attach skeleton: %d\n", err)) + if (!ASSERT_OK(err, "skel attach")) goto cleanup; usleep(1); - CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12); - CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17); - CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19); - CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36); + ASSERT_EQ(skel->bss->res1, 12, "res1"); + ASSERT_EQ(skel->bss->res2, 17, "res2"); + ASSERT_EQ(skel->bss->res3, 19, "res3"); + ASSERT_EQ(skel->bss->res4, 36, "res4"); skel2 = test_subprogs_unused__open_and_load(); ASSERT_OK_PTR(skel2, "unused_progs_skel"); @@ -35,3 +86,11 @@ void test_subprogs(void) cleanup: test_subprogs__destroy(skel); } + +void test_subprogs(void) +{ + if (test__start_subtest("subprogs_alone")) + test_subprogs_alone(); + if (test__start_subtest("subprogs_and_jit_harden")) + test_subprogs_with_jit_harden_toggling(); +} From 5142239a22219921a7863cf00c9ab853c00689d8 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 11 Mar 2022 10:14:18 +0100 Subject: [PATCH 071/137] net: veth: Account total xdp_frame len running ndo_xdp_xmit Even if this is a theoretical issue since it is not possible to perform XDP_REDIRECT on a non-linear xdp_frame, veth driver does not account paged area in ndo_xdp_xmit function pointer. Introduce xdp_get_frame_len utility routine to get the xdp_frame full length and account total frame size running XDP_REDIRECT of a non-linear xdp frame into a veth device. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Toke Hoiland-Jorgensen Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/54f9fd3bb65d190daf2c0bbae2f852ff16cfbaa0.1646989407.git.lorenzo@kernel.org --- drivers/net/veth.c | 4 ++-- include/net/xdp.h | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 58b20ea171dd..b77ce3fdcfe8 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -494,7 +494,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n, struct xdp_frame *frame = frames[i]; void *ptr = veth_xdp_to_ptr(frame); - if (unlikely(frame->len > max_len || + if (unlikely(xdp_get_frame_len(frame) > max_len || __ptr_ring_produce(&rq->xdp_ring, ptr))) break; nxmit++; @@ -855,7 +855,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget, /* ndo_xdp_xmit */ struct xdp_frame *frame = veth_ptr_to_xdp(ptr); - stats->xdp_bytes += frame->len; + stats->xdp_bytes += xdp_get_frame_len(frame); frame = veth_xdp_rcv_one(rq, frame, bq, stats); if (frame) { /* XDP_PASS */ diff --git a/include/net/xdp.h b/include/net/xdp.h index b7721c3e4d1f..04c852c7a77f 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -343,6 +343,20 @@ out: __xdp_release_frame(xdpf->data, mem); } +static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo; + unsigned int len = xdpf->len; + + if (likely(!xdp_frame_has_frags(xdpf))) + goto out; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + len += sinfo->xdp_frags_size; +out: + return len; +} + int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, u32 queue_index, unsigned int napi_id, u32 frag_size); From 718a18a0c8a67f97781e40bdef7cdd055c430996 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 11 Mar 2022 10:14:19 +0100 Subject: [PATCH 072/137] veth: Rework veth_xdp_rcv_skb in order to accept non-linear skb MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce veth_convert_skb_to_xdp_buff routine in order to convert a non-linear skb into a xdp buffer. If the received skb is cloned or shared, veth_convert_skb_to_xdp_buff will copy it in a new skb composed by order-0 pages for the linear and the fragmented area. Moreover veth_convert_skb_to_xdp_buff guarantees we have enough headroom for xdp. This is a preliminary patch to allow attaching xdp programs with frags support on veth devices. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Toke Høiland-Jørgensen Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/8d228b106bc1903571afd1d77e797bffe9a5ea7c.1646989407.git.lorenzo@kernel.org --- drivers/net/veth.c | 203 ++++++++++++++++++++++++++++++--------------- net/core/xdp.c | 1 + 2 files changed, 135 insertions(+), 69 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index b77ce3fdcfe8..bfae15ec902b 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -433,21 +433,6 @@ static void veth_set_multicast_list(struct net_device *dev) { } -static struct sk_buff *veth_build_skb(void *head, int headroom, int len, - int buflen) -{ - struct sk_buff *skb; - - skb = build_skb(head, buflen); - if (!skb) - return NULL; - - skb_reserve(skb, headroom); - skb_put(skb, len); - - return skb; -} - static int veth_select_rxq(struct net_device *dev) { return smp_processor_id() % dev->real_num_rx_queues; @@ -695,16 +680,130 @@ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames, } } +static void veth_xdp_get(struct xdp_buff *xdp) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + int i; + + get_page(virt_to_page(xdp->data)); + if (likely(!xdp_buff_has_frags(xdp))) + return; + + for (i = 0; i < sinfo->nr_frags; i++) + __skb_frag_ref(&sinfo->frags[i]); +} + +static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, + struct xdp_buff *xdp, + struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + u32 frame_sz; + + if (skb_shared(skb) || skb_head_is_locked(skb) || + skb_shinfo(skb)->nr_frags) { + u32 size, len, max_head_size, off; + struct sk_buff *nskb; + struct page *page; + int i, head_off; + + /* We need a private copy of the skb and data buffers since + * the ebpf program can modify it. We segment the original skb + * into order-0 pages without linearize it. + * + * Make sure we have enough space for linear and paged area + */ + max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - + VETH_XDP_HEADROOM); + if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size) + goto drop; + + /* Allocate skb head */ + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (!page) + goto drop; + + nskb = build_skb(page_address(page), PAGE_SIZE); + if (!nskb) { + put_page(page); + goto drop; + } + + skb_reserve(nskb, VETH_XDP_HEADROOM); + size = min_t(u32, skb->len, max_head_size); + if (skb_copy_bits(skb, 0, nskb->data, size)) { + consume_skb(nskb); + goto drop; + } + skb_put(nskb, size); + + skb_copy_header(nskb, skb); + head_off = skb_headroom(nskb) - skb_headroom(skb); + skb_headers_offset_update(nskb, head_off); + + /* Allocate paged area of new skb */ + off = size; + len = skb->len - off; + + for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (!page) { + consume_skb(nskb); + goto drop; + } + + size = min_t(u32, len, PAGE_SIZE); + skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE); + if (skb_copy_bits(skb, off, page_address(page), + size)) { + consume_skb(nskb); + goto drop; + } + + len -= size; + off += size; + } + + consume_skb(skb); + skb = nskb; + } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM && + pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) { + goto drop; + } + + /* SKB "head" area always have tailroom for skb_shared_info */ + frame_sz = skb_end_pointer(skb) - skb->head; + frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); + xdp_prepare_buff(xdp, skb->head, skb_headroom(skb), + skb_headlen(skb), true); + + if (skb_is_nonlinear(skb)) { + skb_shinfo(skb)->xdp_frags_size = skb->data_len; + xdp_buff_set_frags_flag(xdp); + } else { + xdp_buff_clear_frags_flag(xdp); + } + *pskb = skb; + + return 0; +drop: + consume_skb(skb); + *pskb = NULL; + + return -ENOMEM; +} + static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, struct veth_xdp_tx_bq *bq, struct veth_stats *stats) { - u32 pktlen, headroom, act, metalen, frame_sz; void *orig_data, *orig_data_end; struct bpf_prog *xdp_prog; - int mac_len, delta, off; struct xdp_buff xdp; + u32 act, metalen; + int off; skb_prepare_for_gro(skb); @@ -715,52 +814,9 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, goto out; } - mac_len = skb->data - skb_mac_header(skb); - pktlen = skb->len + mac_len; - headroom = skb_headroom(skb) - mac_len; - - if (skb_shared(skb) || skb_head_is_locked(skb) || - skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) { - struct sk_buff *nskb; - int size, head_off; - void *head, *start; - struct page *page; - - size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - if (size > PAGE_SIZE) - goto drop; - - page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); - if (!page) - goto drop; - - head = page_address(page); - start = head + VETH_XDP_HEADROOM; - if (skb_copy_bits(skb, -mac_len, start, pktlen)) { - page_frag_free(head); - goto drop; - } - - nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len, - skb->len, PAGE_SIZE); - if (!nskb) { - page_frag_free(head); - goto drop; - } - - skb_copy_header(nskb, skb); - head_off = skb_headroom(nskb) - skb_headroom(skb); - skb_headers_offset_update(nskb, head_off); - consume_skb(skb); - skb = nskb; - } - - /* SKB "head" area always have tailroom for skb_shared_info */ - frame_sz = skb_end_pointer(skb) - skb->head; - frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq); - xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true); + __skb_push(skb, skb->data - skb_mac_header(skb)); + if (veth_convert_skb_to_xdp_buff(rq, &xdp, &skb)) + goto drop; orig_data = xdp.data; orig_data_end = xdp.data_end; @@ -771,7 +827,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, case XDP_PASS: break; case XDP_TX: - get_page(virt_to_page(xdp.data)); + veth_xdp_get(&xdp); consume_skb(skb); xdp.rxq->mem = rq->xdp_mem; if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) { @@ -783,7 +839,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: - get_page(virt_to_page(xdp.data)); + veth_xdp_get(&xdp); consume_skb(skb); xdp.rxq->mem = rq->xdp_mem; if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) { @@ -806,18 +862,27 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, rcu_read_unlock(); /* check if bpf_xdp_adjust_head was used */ - delta = orig_data - xdp.data; - off = mac_len + delta; + off = orig_data - xdp.data; if (off > 0) __skb_push(skb, off); else if (off < 0) __skb_pull(skb, -off); - skb->mac_header -= delta; + + skb_reset_mac_header(skb); /* check if bpf_xdp_adjust_tail was used */ off = xdp.data_end - orig_data_end; if (off != 0) __skb_put(skb, off); /* positive on grow, negative on shrink */ + + /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers + * (e.g. bpf_xdp_adjust_tail), we need to update data_len here. + */ + if (xdp_buff_has_frags(&xdp)) + skb->data_len = skb_shinfo(skb)->xdp_frags_size; + else + skb->data_len = 0; + skb->protocol = eth_type_trans(skb, rq->dev); metalen = xdp.data - xdp.data_meta; @@ -833,7 +898,7 @@ xdp_drop: return NULL; err_xdp: rcu_read_unlock(); - page_frag_free(xdp.data); + xdp_return_buff(&xdp); xdp_xmit: return NULL; } diff --git a/net/core/xdp.c b/net/core/xdp.c index 361df312ee7f..b5f2d428d856 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -528,6 +528,7 @@ void xdp_return_buff(struct xdp_buff *xdp) out: __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); } +EXPORT_SYMBOL_GPL(xdp_return_buff); /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ void __xdp_release_frame(void *data, struct xdp_mem_info *mem) From 7cda76d858a4e71ac4a04066c093679a12e1312c Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 11 Mar 2022 10:14:20 +0100 Subject: [PATCH 073/137] veth: Allow jumbo frames in xdp mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow increasing the MTU over page boundaries on veth devices if the attached xdp program declares to support xdp fragments. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Toke Høiland-Jørgensen Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/d5dc039c3d4123426e7023a488c449181a7bc57f.1646989407.git.lorenzo@kernel.org --- drivers/net/veth.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index bfae15ec902b..1b5714926d81 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1528,9 +1528,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, goto err; } - max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM - - peer->hard_header_len - - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) - + peer->hard_header_len; + /* Allow increasing the max_mtu if the program supports + * XDP fragments. + */ + if (prog->aux->xdp_has_frags) + max_mtu += PAGE_SIZE * MAX_SKB_FRAGS; + if (peer->mtu > max_mtu) { NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP"); err = -ERANGE; From e0999c8e590935b13dd598a6480685eae9c1b3c5 Mon Sep 17 00:00:00 2001 From: Kaixi Fan Date: Mon, 14 Mar 2022 00:41:16 +0800 Subject: [PATCH 074/137] selftests/bpf: Fix tunnel remote IP comments In namespace at_ns0, the IP address of tnl dev is 10.1.1.100 which is the overlay IP, and the ip address of veth0 is 172.16.1.100 which is the vtep IP. When doing 'ping 10.1.1.100' from root namespace, the remote_ip should be 172.16.1.100. Fixes: 933a741e3b82 ("selftests/bpf: bpf tunnel test.") Signed-off-by: Kaixi Fan Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20220313164116.5889-1-fankaixi.li@bytedance.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_tunnel.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh index ca1372924023..2817d9948d59 100755 --- a/tools/testing/selftests/bpf/test_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tunnel.sh @@ -39,7 +39,7 @@ # from root namespace, the following operations happen: # 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev. # 2) Tnl device's egress BPF program is triggered and set the tunnel metadata, -# with remote_ip=172.16.1.200 and others. +# with remote_ip=172.16.1.100 and others. # 3) Outer tunnel header is prepended and route the packet to veth1's egress # 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0 # 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet From 4f554e955614f19425cee86de4669351741a6280 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 15 Mar 2022 23:00:26 +0900 Subject: [PATCH 075/137] ftrace: Add ftrace_set_filter_ips function Adding ftrace_set_filter_ips function to be able to set filter on multiple ip addresses at once. With the kprobe multi attach interface we have cases where we need to initialize ftrace_ops object with thousands of functions, so having single function diving into ftrace_hash_move_and_update_ops with ftrace_lock is faster. The functions ips are passed as unsigned long array with count. Signed-off-by: Jiri Olsa Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735282673.1084943.18310504594134769804.stgit@devnote2 --- include/linux/ftrace.h | 3 +++ kernel/trace/ftrace.c | 58 +++++++++++++++++++++++++++++++++++------- 2 files changed, 52 insertions(+), 9 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9999e29187de..60847cbce0da 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -512,6 +512,8 @@ struct dyn_ftrace { int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset); +int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, + unsigned int cnt, int remove, int reset); int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, @@ -802,6 +804,7 @@ static inline unsigned long ftrace_location(unsigned long ip) #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) +#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; }) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_free_filter(ops) do { } while (0) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a4b462b6f944..93e992962ada 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -4958,7 +4958,7 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, } static int -ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) +__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) { struct ftrace_func_entry *entry; @@ -4976,9 +4976,30 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) return add_hash_entry(hash, ip); } +static int +ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, + unsigned int cnt, int remove) +{ + unsigned int i; + int err; + + for (i = 0; i < cnt; i++) { + err = __ftrace_match_addr(hash, ips[i], remove); + if (err) { + /* + * This expects the @hash is a temporary hash and if this + * fails the caller must free the @hash. + */ + return err; + } + } + return 0; +} + static int ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, - unsigned long ip, int remove, int reset, int enable) + unsigned long *ips, unsigned int cnt, + int remove, int reset, int enable) { struct ftrace_hash **orig_hash; struct ftrace_hash *hash; @@ -5008,8 +5029,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, ret = -EINVAL; goto out_regex_unlock; } - if (ip) { - ret = ftrace_match_addr(hash, ip, remove); + if (ips) { + ret = ftrace_match_addr(hash, ips, cnt, remove); if (ret < 0) goto out_regex_unlock; } @@ -5026,10 +5047,10 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, } static int -ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, - int reset, int enable) +ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt, + int remove, int reset, int enable) { - return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable); + return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable); } #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS @@ -5634,10 +5655,29 @@ int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset) { ftrace_ops_init(ops); - return ftrace_set_addr(ops, ip, remove, reset, 1); + return ftrace_set_addr(ops, &ip, 1, remove, reset, 1); } EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); +/** + * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses + * @ops - the ops to set the filter with + * @ips - the array of addresses to add to or remove from the filter. + * @cnt - the number of addresses in @ips + * @remove - non zero to remove ips from the filter + * @reset - non zero to reset all filters before applying this filter. + * + * Filters denote which functions should be enabled when tracing is enabled + * If @ips array or any ip specified within is NULL , it fails to update filter. + */ +int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, + unsigned int cnt, int remove, int reset) +{ + ftrace_ops_init(ops); + return ftrace_set_addr(ops, ips, cnt, remove, reset, 1); +} +EXPORT_SYMBOL_GPL(ftrace_set_filter_ips); + /** * ftrace_ops_set_global_filter - setup ops to use global filters * @ops - the ops which will use the global filters @@ -5659,7 +5699,7 @@ static int ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, int reset, int enable) { - return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); + return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable); } /** From cad9931f64dc7f5dbdec12cae9f30063360f9855 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:00:38 +0900 Subject: [PATCH 076/137] fprobe: Add ftrace based probe APIs The fprobe is a wrapper API for ftrace function tracer. Unlike kprobes, this probes only supports the function entry, but this can probe multiple functions by one fprobe. The usage is similar, user will set their callback to fprobe::entry_handler and call register_fprobe*() with probed functions. There are 3 registration interfaces, - register_fprobe() takes filtering patterns of the functin names. - register_fprobe_ips() takes an array of ftrace-location addresses. - register_fprobe_syms() takes an array of function names. The registered fprobes can be unregistered with unregister_fprobe(). e.g. struct fprobe fp = { .entry_handler = user_handler }; const char *targets[] = { "func1", "func2", "func3"}; ... ret = register_fprobe_syms(&fp, targets, ARRAY_SIZE(targets)); ... unregister_fprobe(&fp); Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735283857.1084943.1154436951479395551.stgit@devnote2 --- include/linux/fprobe.h | 87 +++++++++++++++++ kernel/trace/Kconfig | 12 +++ kernel/trace/Makefile | 1 + kernel/trace/fprobe.c | 211 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 311 insertions(+) create mode 100644 include/linux/fprobe.h create mode 100644 kernel/trace/fprobe.c diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h new file mode 100644 index 000000000000..2ba099aff041 --- /dev/null +++ b/include/linux/fprobe.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Simple ftrace probe wrapper */ +#ifndef _LINUX_FPROBE_H +#define _LINUX_FPROBE_H + +#include +#include + +/** + * struct fprobe - ftrace based probe. + * @ops: The ftrace_ops. + * @nmissed: The counter for missing events. + * @flags: The status flag. + * @entry_handler: The callback function for function entry. + */ +struct fprobe { +#ifdef CONFIG_FUNCTION_TRACER + /* + * If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too. + * But user of fprobe may keep embedding the struct fprobe on their own + * code. To avoid build error, this will keep the fprobe data structure + * defined here, but remove ftrace_ops data structure. + */ + struct ftrace_ops ops; +#endif + unsigned long nmissed; + unsigned int flags; + void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); +}; + +#define FPROBE_FL_DISABLED 1 + +static inline bool fprobe_disabled(struct fprobe *fp) +{ + return (fp) ? fp->flags & FPROBE_FL_DISABLED : false; +} + +#ifdef CONFIG_FPROBE +int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter); +int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num); +int register_fprobe_syms(struct fprobe *fp, const char **syms, int num); +int unregister_fprobe(struct fprobe *fp); +#else +static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) +{ + return -EOPNOTSUPP; +} +static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num) +{ + return -EOPNOTSUPP; +} +static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num) +{ + return -EOPNOTSUPP; +} +static inline int unregister_fprobe(struct fprobe *fp) +{ + return -EOPNOTSUPP; +} +#endif + +/** + * disable_fprobe() - Disable fprobe + * @fp: The fprobe to be disabled. + * + * This will soft-disable @fp. Note that this doesn't remove the ftrace + * hooks from the function entry. + */ +static inline void disable_fprobe(struct fprobe *fp) +{ + if (fp) + fp->flags |= FPROBE_FL_DISABLED; +} + +/** + * enable_fprobe() - Enable fprobe + * @fp: The fprobe to be enabled. + * + * This will soft-enable @fp. + */ +static inline void enable_fprobe(struct fprobe *fp) +{ + if (fp) + fp->flags &= ~FPROBE_FL_DISABLED; +} + +#endif diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a5eb5e7fd624..7ce31abc542b 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -236,6 +236,18 @@ config DYNAMIC_FTRACE_WITH_ARGS depends on DYNAMIC_FTRACE depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS +config FPROBE + bool "Kernel Function Probe (fprobe)" + depends on FUNCTION_TRACER + depends on DYNAMIC_FTRACE_WITH_REGS + default n + help + This option enables kernel function probe (fprobe) based on ftrace, + which is similar to kprobes, but probes only for kernel function + entries and it can probe multiple functions by one fprobe. + + If unsure, say N. + config FUNCTION_PROFILER bool "Kernel function profiler" depends on FUNCTION_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index bedc5caceec7..79255f9de9a4 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -97,6 +97,7 @@ obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o +obj-$(CONFIG_FPROBE) += fprobe.o obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c new file mode 100644 index 000000000000..7e8ceee339a0 --- /dev/null +++ b/kernel/trace/fprobe.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * fprobe - Simple ftrace probe wrapper for function entry. + */ +#define pr_fmt(fmt) "fprobe: " fmt + +#include +#include +#include +#include +#include +#include + +static void fprobe_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct fprobe *fp; + int bit; + + fp = container_of(ops, struct fprobe, ops); + if (fprobe_disabled(fp)) + return; + + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) { + fp->nmissed++; + return; + } + + if (fp->entry_handler) + fp->entry_handler(fp, ip, ftrace_get_regs(fregs)); + + ftrace_test_recursion_unlock(bit); +} +NOKPROBE_SYMBOL(fprobe_handler); + +/* Convert ftrace location address from symbols */ +static unsigned long *get_ftrace_locations(const char **syms, int num) +{ + unsigned long addr, size; + unsigned long *addrs; + int i; + + /* Convert symbols to symbol address */ + addrs = kcalloc(num, sizeof(*addrs), GFP_KERNEL); + if (!addrs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < num; i++) { + addr = kallsyms_lookup_name(syms[i]); + if (!addr) /* Maybe wrong symbol */ + goto error; + + /* Convert symbol address to ftrace location. */ + if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size) + goto error; + + addr = ftrace_location_range(addr, addr + size - 1); + if (!addr) /* No dynamic ftrace there. */ + goto error; + + addrs[i] = addr; + } + + return addrs; + +error: + kfree(addrs); + + return ERR_PTR(-ENOENT); +} + +static void fprobe_init(struct fprobe *fp) +{ + fp->nmissed = 0; + fp->ops.func = fprobe_handler; + fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS; +} + +/** + * register_fprobe() - Register fprobe to ftrace by pattern. + * @fp: A fprobe data structure to be registered. + * @filter: A wildcard pattern of probed symbols. + * @notfilter: A wildcard pattern of NOT probed symbols. + * + * Register @fp to ftrace for enabling the probe on the symbols matched to @filter. + * If @notfilter is not NULL, the symbols matched the @notfilter are not probed. + * + * Return 0 if @fp is registered successfully, -errno if not. + */ +int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) +{ + unsigned char *str; + int ret, len; + + if (!fp || !filter) + return -EINVAL; + + fprobe_init(fp); + + len = strlen(filter); + str = kstrdup(filter, GFP_KERNEL); + ret = ftrace_set_filter(&fp->ops, str, len, 0); + kfree(str); + if (ret) + return ret; + + if (notfilter) { + len = strlen(notfilter); + str = kstrdup(notfilter, GFP_KERNEL); + ret = ftrace_set_notrace(&fp->ops, str, len, 0); + kfree(str); + if (ret) + goto out; + } + + ret = register_ftrace_function(&fp->ops); +out: + if (ret) + ftrace_free_filter(&fp->ops); + return ret; +} +EXPORT_SYMBOL_GPL(register_fprobe); + +/** + * register_fprobe_ips() - Register fprobe to ftrace by address. + * @fp: A fprobe data structure to be registered. + * @addrs: An array of target ftrace location addresses. + * @num: The number of entries of @addrs. + * + * Register @fp to ftrace for enabling the probe on the address given by @addrs. + * The @addrs must be the addresses of ftrace location address, which may be + * the symbol address + arch-dependent offset. + * If you unsure what this mean, please use other registration functions. + * + * Return 0 if @fp is registered successfully, -errno if not. + */ +int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num) +{ + int ret; + + if (!fp || !addrs || num <= 0) + return -EINVAL; + + fprobe_init(fp); + + ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0); + if (!ret) + ret = register_ftrace_function(&fp->ops); + + if (ret) + ftrace_free_filter(&fp->ops); + + return ret; +} +EXPORT_SYMBOL_GPL(register_fprobe_ips); + +/** + * register_fprobe_syms() - Register fprobe to ftrace by symbols. + * @fp: A fprobe data structure to be registered. + * @syms: An array of target symbols. + * @num: The number of entries of @syms. + * + * Register @fp to the symbols given by @syms array. This will be useful if + * you are sure the symbols exist in the kernel. + * + * Return 0 if @fp is registered successfully, -errno if not. + */ +int register_fprobe_syms(struct fprobe *fp, const char **syms, int num) +{ + unsigned long *addrs; + int ret; + + if (!fp || !syms || num <= 0) + return -EINVAL; + + addrs = get_ftrace_locations(syms, num); + if (IS_ERR(addrs)) + return PTR_ERR(addrs); + + ret = register_fprobe_ips(fp, addrs, num); + + kfree(addrs); + + return ret; +} +EXPORT_SYMBOL_GPL(register_fprobe_syms); + +/** + * unregister_fprobe() - Unregister fprobe from ftrace + * @fp: A fprobe data structure to be unregistered. + * + * Unregister fprobe (and remove ftrace hooks from the function entries). + * + * Return 0 if @fp is unregistered successfully, -errno if not. + */ +int unregister_fprobe(struct fprobe *fp) +{ + int ret; + + if (!fp || fp->ops.func != fprobe_handler) + return -EINVAL; + + ret = unregister_ftrace_function(&fp->ops); + + if (!ret) + ftrace_free_filter(&fp->ops); + + return ret; +} +EXPORT_SYMBOL_GPL(unregister_fprobe); From 54ecbe6f1ed5138c895bdff55608cf502755b20e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:00:50 +0900 Subject: [PATCH 077/137] rethook: Add a generic return hook Add a return hook framework which hooks the function return. Most of the logic came from the kretprobe, but this is independent from kretprobe. Note that this is expected to be used with other function entry hooking feature, like ftrace, fprobe, adn kprobes. Eventually this will replace the kretprobe (e.g. kprobe + rethook = kretprobe), but at this moment, this is just an additional hook. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735285066.1084943.9259661137330166643.stgit@devnote2 --- include/linux/rethook.h | 100 +++++++++++++ include/linux/sched.h | 3 + kernel/exit.c | 2 + kernel/fork.c | 3 + kernel/trace/Kconfig | 11 ++ kernel/trace/Makefile | 1 + kernel/trace/rethook.c | 317 ++++++++++++++++++++++++++++++++++++++++ 7 files changed, 437 insertions(+) create mode 100644 include/linux/rethook.h create mode 100644 kernel/trace/rethook.c diff --git a/include/linux/rethook.h b/include/linux/rethook.h new file mode 100644 index 000000000000..c8ac1e5afcd1 --- /dev/null +++ b/include/linux/rethook.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Return hooking with list-based shadow stack. + */ +#ifndef _LINUX_RETHOOK_H +#define _LINUX_RETHOOK_H + +#include +#include +#include +#include +#include +#include + +struct rethook_node; + +typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs *); + +/** + * struct rethook - The rethook management data structure. + * @data: The user-defined data storage. + * @handler: The user-defined return hook handler. + * @pool: The pool of struct rethook_node. + * @ref: The reference counter. + * @rcu: The rcu_head for deferred freeing. + * + * Don't embed to another data structure, because this is a self-destructive + * data structure when all rethook_node are freed. + */ +struct rethook { + void *data; + rethook_handler_t handler; + struct freelist_head pool; + refcount_t ref; + struct rcu_head rcu; +}; + +/** + * struct rethook_node - The rethook shadow-stack entry node. + * @freelist: The freelist, linked to struct rethook::pool. + * @rcu: The rcu_head for deferred freeing. + * @llist: The llist, linked to a struct task_struct::rethooks. + * @rethook: The pointer to the struct rethook. + * @ret_addr: The storage for the real return address. + * @frame: The storage for the frame pointer. + * + * You can embed this to your extended data structure to store any data + * on each entry of the shadow stack. + */ +struct rethook_node { + union { + struct freelist_node freelist; + struct rcu_head rcu; + }; + struct llist_node llist; + struct rethook *rethook; + unsigned long ret_addr; + unsigned long frame; +}; + +struct rethook *rethook_alloc(void *data, rethook_handler_t handler); +void rethook_free(struct rethook *rh); +void rethook_add_node(struct rethook *rh, struct rethook_node *node); +struct rethook_node *rethook_try_get(struct rethook *rh); +void rethook_recycle(struct rethook_node *node); +void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount); +unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame, + struct llist_node **cur); + +/* Arch dependent code must implement arch_* and trampoline code */ +void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs, bool mcount); +void arch_rethook_trampoline(void); + +/** + * is_rethook_trampoline() - Check whether the address is rethook trampoline + * @addr: The address to be checked + * + * Return true if the @addr is the rethook trampoline address. + */ +static inline bool is_rethook_trampoline(unsigned long addr) +{ + return addr == (unsigned long)dereference_symbol_descriptor(arch_rethook_trampoline); +} + +/* If the architecture needs to fixup the return address, implement it. */ +void arch_rethook_fixup_return(struct pt_regs *regs, + unsigned long correct_ret_addr); + +/* Generic trampoline handler, arch code must prepare asm stub */ +unsigned long rethook_trampoline_handler(struct pt_regs *regs, + unsigned long frame); + +#ifdef CONFIG_RETHOOK +void rethook_flush_task(struct task_struct *tk); +#else +#define rethook_flush_task(tsk) do { } while (0) +#endif + +#endif + diff --git a/include/linux/sched.h b/include/linux/sched.h index 75ba8aa60248..7034f53404e3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1481,6 +1481,9 @@ struct task_struct { #ifdef CONFIG_KRETPROBES struct llist_head kretprobe_instances; #endif +#ifdef CONFIG_RETHOOK + struct llist_head rethooks; +#endif #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH /* diff --git a/kernel/exit.c b/kernel/exit.c index b00a25bb4ab9..2d1803fa8fe6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -64,6 +64,7 @@ #include #include #include +#include #include #include @@ -169,6 +170,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp) struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); kprobe_flush_task(tsk); + rethook_flush_task(tsk); perf_event_delayed_put(tsk); trace_sched_process_free(tsk); put_task_struct(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index a024bf6254df..3db1a4110a25 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2255,6 +2255,9 @@ static __latent_entropy struct task_struct *copy_process( #ifdef CONFIG_KRETPROBES p->kretprobe_instances.first = NULL; #endif +#ifdef CONFIG_RETHOOK + p->rethooks.first = NULL; +#endif /* * Ensure that the cgroup subsystem policies allow the new process to be diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 7ce31abc542b..e75504e42ab8 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -10,6 +10,17 @@ config USER_STACKTRACE_SUPPORT config NOP_TRACER bool +config HAVE_RETHOOK + bool + +config RETHOOK + bool + depends on HAVE_RETHOOK + help + Enable generic return hooking feature. This is an internal + API, which will be used by other function-entry hooking + features like fprobe and kprobes. + config HAVE_FUNCTION_TRACER bool help diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 79255f9de9a4..c6f11a139eac 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -98,6 +98,7 @@ obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o obj-$(CONFIG_FPROBE) += fprobe.o +obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c new file mode 100644 index 000000000000..ab463a4d2b23 --- /dev/null +++ b/kernel/trace/rethook.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "rethook: " fmt + +#include +#include +#include +#include +#include +#include +#include + +/* Return hook list (shadow stack by list) */ + +/* + * This function is called from delayed_put_task_struct() when a task is + * dead and cleaned up to recycle any kretprobe instances associated with + * this task. These left over instances represent probed functions that + * have been called but will never return. + */ +void rethook_flush_task(struct task_struct *tk) +{ + struct rethook_node *rhn; + struct llist_node *node; + + node = __llist_del_all(&tk->rethooks); + while (node) { + rhn = container_of(node, struct rethook_node, llist); + node = node->next; + preempt_disable(); + rethook_recycle(rhn); + preempt_enable(); + } +} + +static void rethook_free_rcu(struct rcu_head *head) +{ + struct rethook *rh = container_of(head, struct rethook, rcu); + struct rethook_node *rhn; + struct freelist_node *node; + int count = 1; + + node = rh->pool.head; + while (node) { + rhn = container_of(node, struct rethook_node, freelist); + node = node->next; + kfree(rhn); + count++; + } + + /* The rh->ref is the number of pooled node + 1 */ + if (refcount_sub_and_test(count, &rh->ref)) + kfree(rh); +} + +/** + * rethook_free() - Free struct rethook. + * @rh: the struct rethook to be freed. + * + * Free the rethook. Before calling this function, user must ensure the + * @rh::data is cleaned if needed (or, the handler can access it after + * calling this function.) This function will set the @rh to be freed + * after all rethook_node are freed (not soon). And the caller must + * not touch @rh after calling this. + */ +void rethook_free(struct rethook *rh) +{ + rcu_assign_pointer(rh->handler, NULL); + + call_rcu(&rh->rcu, rethook_free_rcu); +} + +/** + * rethook_alloc() - Allocate struct rethook. + * @data: a data to pass the @handler when hooking the return. + * @handler: the return hook callback function. + * + * Allocate and initialize a new rethook with @data and @handler. + * Return NULL if memory allocation fails or @handler is NULL. + * Note that @handler == NULL means this rethook is going to be freed. + */ +struct rethook *rethook_alloc(void *data, rethook_handler_t handler) +{ + struct rethook *rh = kzalloc(sizeof(struct rethook), GFP_KERNEL); + + if (!rh || !handler) + return NULL; + + rh->data = data; + rh->handler = handler; + rh->pool.head = NULL; + refcount_set(&rh->ref, 1); + + return rh; +} + +/** + * rethook_add_node() - Add a new node to the rethook. + * @rh: the struct rethook. + * @node: the struct rethook_node to be added. + * + * Add @node to @rh. User must allocate @node (as a part of user's + * data structure.) The @node fields are initialized in this function. + */ +void rethook_add_node(struct rethook *rh, struct rethook_node *node) +{ + node->rethook = rh; + freelist_add(&node->freelist, &rh->pool); + refcount_inc(&rh->ref); +} + +static void free_rethook_node_rcu(struct rcu_head *head) +{ + struct rethook_node *node = container_of(head, struct rethook_node, rcu); + + if (refcount_dec_and_test(&node->rethook->ref)) + kfree(node->rethook); + kfree(node); +} + +/** + * rethook_recycle() - return the node to rethook. + * @node: The struct rethook_node to be returned. + * + * Return back the @node to @node::rethook. If the @node::rethook is already + * marked as freed, this will free the @node. + */ +void rethook_recycle(struct rethook_node *node) +{ + lockdep_assert_preemption_disabled(); + + if (likely(READ_ONCE(node->rethook->handler))) + freelist_add(&node->freelist, &node->rethook->pool); + else + call_rcu(&node->rcu, free_rethook_node_rcu); +} +NOKPROBE_SYMBOL(rethook_recycle); + +/** + * rethook_try_get() - get an unused rethook node. + * @rh: The struct rethook which pools the nodes. + * + * Get an unused rethook node from @rh. If the node pool is empty, this + * will return NULL. Caller must disable preemption. + */ +struct rethook_node *rethook_try_get(struct rethook *rh) +{ + rethook_handler_t handler = READ_ONCE(rh->handler); + struct freelist_node *fn; + + lockdep_assert_preemption_disabled(); + + /* Check whether @rh is going to be freed. */ + if (unlikely(!handler)) + return NULL; + + fn = freelist_try_get(&rh->pool); + if (!fn) + return NULL; + + return container_of(fn, struct rethook_node, freelist); +} +NOKPROBE_SYMBOL(rethook_try_get); + +/** + * rethook_hook() - Hook the current function return. + * @node: The struct rethook node to hook the function return. + * @regs: The struct pt_regs for the function entry. + * @mcount: True if this is called from mcount(ftrace) context. + * + * Hook the current running function return. This must be called when the + * function entry (or at least @regs must be the registers of the function + * entry.) @mcount is used for identifying the context. If this is called + * from ftrace (mcount) callback, @mcount must be set true. If this is called + * from the real function entry (e.g. kprobes) @mcount must be set false. + * This is because the way to hook the function return depends on the context. + */ +void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount) +{ + arch_rethook_prepare(node, regs, mcount); + __llist_add(&node->llist, ¤t->rethooks); +} +NOKPROBE_SYMBOL(rethook_hook); + +/* This assumes the 'tsk' is the current task or is not running. */ +static unsigned long __rethook_find_ret_addr(struct task_struct *tsk, + struct llist_node **cur) +{ + struct rethook_node *rh = NULL; + struct llist_node *node = *cur; + + if (!node) + node = tsk->rethooks.first; + else + node = node->next; + + while (node) { + rh = container_of(node, struct rethook_node, llist); + if (rh->ret_addr != (unsigned long)arch_rethook_trampoline) { + *cur = node; + return rh->ret_addr; + } + node = node->next; + } + return 0; +} +NOKPROBE_SYMBOL(__rethook_find_ret_addr); + +/** + * rethook_find_ret_addr -- Find correct return address modified by rethook + * @tsk: Target task + * @frame: A frame pointer + * @cur: a storage of the loop cursor llist_node pointer for next call + * + * Find the correct return address modified by a rethook on @tsk in unsigned + * long type. + * The @tsk must be 'current' or a task which is not running. @frame is a hint + * to get the currect return address - which is compared with the + * rethook::frame field. The @cur is a loop cursor for searching the + * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the + * first call, but '@cur' itself must NOT NULL. + * + * Returns found address value or zero if not found. + */ +unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame, + struct llist_node **cur) +{ + struct rethook_node *rhn = NULL; + unsigned long ret; + + if (WARN_ON_ONCE(!cur)) + return 0; + + if (WARN_ON_ONCE(tsk != current && task_is_running(tsk))) + return 0; + + do { + ret = __rethook_find_ret_addr(tsk, cur); + if (!ret) + break; + rhn = container_of(*cur, struct rethook_node, llist); + } while (rhn->frame != frame); + + return ret; +} +NOKPROBE_SYMBOL(rethook_find_ret_addr); + +void __weak arch_rethook_fixup_return(struct pt_regs *regs, + unsigned long correct_ret_addr) +{ + /* + * Do nothing by default. If the architecture which uses a + * frame pointer to record real return address on the stack, + * it should fill this function to fixup the return address + * so that stacktrace works from the rethook handler. + */ +} + +/* This function will be called from each arch-defined trampoline. */ +unsigned long rethook_trampoline_handler(struct pt_regs *regs, + unsigned long frame) +{ + struct llist_node *first, *node = NULL; + unsigned long correct_ret_addr; + rethook_handler_t handler; + struct rethook_node *rhn; + + correct_ret_addr = __rethook_find_ret_addr(current, &node); + if (!correct_ret_addr) { + pr_err("rethook: Return address not found! Maybe there is a bug in the kernel\n"); + BUG_ON(1); + } + + instruction_pointer_set(regs, correct_ret_addr); + + /* + * These loops must be protected from rethook_free_rcu() because those + * are accessing 'rhn->rethook'. + */ + preempt_disable(); + + /* + * Run the handler on the shadow stack. Do not unlink the list here because + * stackdump inside the handlers needs to decode it. + */ + first = current->rethooks.first; + while (first) { + rhn = container_of(first, struct rethook_node, llist); + if (WARN_ON_ONCE(rhn->frame != frame)) + break; + handler = READ_ONCE(rhn->rethook->handler); + if (handler) + handler(rhn, rhn->rethook->data, regs); + + if (first == node) + break; + first = first->next; + } + + /* Fixup registers for returning to correct address. */ + arch_rethook_fixup_return(regs, correct_ret_addr); + + /* Unlink used shadow stack */ + first = current->rethooks.first; + current->rethooks.first = node->next; + node->next = NULL; + + while (first) { + rhn = container_of(first, struct rethook_node, llist); + first = first->next; + rethook_recycle(rhn); + } + preempt_enable(); + + return correct_ret_addr; +} +NOKPROBE_SYMBOL(rethook_trampoline_handler); From 75caf33eda242e2f34f61e475d666359749ae5ff Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:01:02 +0900 Subject: [PATCH 078/137] rethook: x86: Add rethook x86 implementation Add rethook for x86 implementation. Most of the code has been copied from kretprobes on x86. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735286243.1084943.7477055110527046644.stgit@devnote2 --- arch/x86/Kconfig | 1 + arch/x86/include/asm/unwind.h | 8 ++- arch/x86/kernel/Makefile | 1 + arch/x86/kernel/kprobes/common.h | 1 + arch/x86/kernel/rethook.c | 119 +++++++++++++++++++++++++++++++ 5 files changed, 129 insertions(+), 1 deletion(-) create mode 100644 arch/x86/kernel/rethook.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9b356da6f46b..1270b65a5546 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -221,6 +221,7 @@ config X86 select HAVE_KPROBES_ON_FTRACE select HAVE_FUNCTION_ERROR_INJECTION select HAVE_KRETPROBES + select HAVE_RETHOOK select HAVE_KVM select HAVE_LIVEPATCH if X86_64 select HAVE_MIXED_BREAKPOINTS_REGS diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 2a1f8734416d..192df5b2094d 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -16,7 +17,7 @@ struct unwind_state { unsigned long stack_mask; struct task_struct *task; int graph_idx; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) struct llist_node *kr_cur; #endif bool error; @@ -107,6 +108,11 @@ static inline unsigned long unwind_recover_kretprobe(struct unwind_state *state, unsigned long addr, unsigned long *addr_p) { +#ifdef CONFIG_RETHOOK + if (is_rethook_trampoline(addr)) + return rethook_find_ret_addr(state->task, (unsigned long)addr_p, + &state->kr_cur); +#endif #ifdef CONFIG_KRETPROBES return is_kretprobe_trampoline(addr) ? kretprobe_find_ret_addr(state->task, addr_p, &state->kr_cur) : diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 6aef9ee28a39..792a893a5cc5 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -106,6 +106,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_X86_TSC) += trace_clock.o obj-$(CONFIG_TRACING) += trace.o +obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index 7d3a2e2daf01..c993521d4933 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -6,6 +6,7 @@ #include #include +#include #ifdef CONFIG_X86_64 diff --git a/arch/x86/kernel/rethook.c b/arch/x86/kernel/rethook.c new file mode 100644 index 000000000000..f0f2f0608282 --- /dev/null +++ b/arch/x86/kernel/rethook.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * x86 implementation of rethook. Mostly copied from arch/x86/kernel/kprobes/core.c. + */ +#include +#include +#include + +#include "kprobes/common.h" + +__visible void arch_rethook_trampoline_callback(struct pt_regs *regs); + +/* + * When a target function returns, this code saves registers and calls + * arch_rethook_trampoline_callback(), which calls the rethook handler. + */ +asm( + ".text\n" + ".global arch_rethook_trampoline\n" + ".type arch_rethook_trampoline, @function\n" + "arch_rethook_trampoline:\n" +#ifdef CONFIG_X86_64 + /* Push a fake return address to tell the unwinder it's a kretprobe. */ + " pushq $arch_rethook_trampoline\n" + UNWIND_HINT_FUNC + /* Save the 'sp - 8', this will be fixed later. */ + " pushq %rsp\n" + " pushfq\n" + SAVE_REGS_STRING + " movq %rsp, %rdi\n" + " call arch_rethook_trampoline_callback\n" + RESTORE_REGS_STRING + /* In the callback function, 'regs->flags' is copied to 'regs->sp'. */ + " addq $8, %rsp\n" + " popfq\n" +#else + /* Push a fake return address to tell the unwinder it's a kretprobe. */ + " pushl $arch_rethook_trampoline\n" + UNWIND_HINT_FUNC + /* Save the 'sp - 4', this will be fixed later. */ + " pushl %esp\n" + " pushfl\n" + SAVE_REGS_STRING + " movl %esp, %eax\n" + " call arch_rethook_trampoline_callback\n" + RESTORE_REGS_STRING + /* In the callback function, 'regs->flags' is copied to 'regs->sp'. */ + " addl $4, %esp\n" + " popfl\n" +#endif + " ret\n" + ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n" +); +NOKPROBE_SYMBOL(arch_rethook_trampoline); + +/* + * Called from arch_rethook_trampoline + */ +__used __visible void arch_rethook_trampoline_callback(struct pt_regs *regs) +{ + unsigned long *frame_pointer; + + /* fixup registers */ + regs->cs = __KERNEL_CS; +#ifdef CONFIG_X86_32 + regs->gs = 0; +#endif + regs->ip = (unsigned long)&arch_rethook_trampoline; + regs->orig_ax = ~0UL; + regs->sp += sizeof(long); + frame_pointer = ®s->sp + 1; + + /* + * The return address at 'frame_pointer' is recovered by the + * arch_rethook_fixup_return() which called from this + * rethook_trampoline_handler(). + */ + rethook_trampoline_handler(regs, (unsigned long)frame_pointer); + + /* + * Copy FLAGS to 'pt_regs::sp' so that arch_rethook_trapmoline() + * can do RET right after POPF. + */ + regs->sp = regs->flags; +} +NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); + +/* + * arch_rethook_trampoline() skips updating frame pointer. The frame pointer + * saved in arch_rethook_trampoline_callback() points to the real caller + * function's frame pointer. Thus the arch_rethook_trampoline() doesn't have + * a standard stack frame with CONFIG_FRAME_POINTER=y. + * Let's mark it non-standard function. Anyway, FP unwinder can correctly + * unwind without the hint. + */ +STACK_FRAME_NON_STANDARD_FP(arch_rethook_trampoline); + +/* This is called from rethook_trampoline_handler(). */ +void arch_rethook_fixup_return(struct pt_regs *regs, + unsigned long correct_ret_addr) +{ + unsigned long *frame_pointer = ®s->sp + 1; + + /* Replace fake return address with real one. */ + *frame_pointer = correct_ret_addr; +} +NOKPROBE_SYMBOL(arch_rethook_fixup_return); + +void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) +{ + unsigned long *stack = (unsigned long *)regs->sp; + + rh->ret_addr = stack[0]; + rh->frame = regs->sp; + + /* Replace the return addr with trampoline addr */ + stack[0] = (unsigned long) arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_rethook_prepare); From 83acdce6894908337ca82973149d9709d28204d7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:01:13 +0900 Subject: [PATCH 079/137] arm64: rethook: Add arm64 rethook implementation Add rethook arm64 implementation. Most of the code has been copied from kretprobes on arm64. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735287344.1084943.9787335632585653418.stgit@devnote2 --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/stacktrace.h | 2 +- arch/arm64/kernel/probes/Makefile | 1 + arch/arm64/kernel/probes/rethook.c | 25 ++++++ arch/arm64/kernel/probes/rethook_trampoline.S | 87 +++++++++++++++++++ arch/arm64/kernel/stacktrace.c | 7 +- 6 files changed, 121 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/kernel/probes/rethook.c create mode 100644 arch/arm64/kernel/probes/rethook_trampoline.S diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 09b885cc4db5..62cff9d0a5bb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -201,6 +201,7 @@ config ARM64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_KPROBES select HAVE_KRETPROBES + select HAVE_RETHOOK select HAVE_GENERIC_VDSO select IOMMU_DMA if IOMMU_SUPPORT select IRQ_DOMAIN diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index e77cdef9ca29..bf04107da97c 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -58,7 +58,7 @@ struct stackframe { DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); unsigned long prev_fp; enum stack_type prev_type; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) struct llist_node *kr_cur; #endif }; diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile index 8e4be92e25b1..24e689f44c32 100644 --- a/arch/arm64/kernel/probes/Makefile +++ b/arch/arm64/kernel/probes/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \ simulate-insn.o obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \ simulate-insn.o +obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o diff --git a/arch/arm64/kernel/probes/rethook.c b/arch/arm64/kernel/probes/rethook.c new file mode 100644 index 000000000000..edc6b804ad6a --- /dev/null +++ b/arch/arm64/kernel/probes/rethook.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic return hook for arm64. + * Most of the code is copied from arch/arm64/kernel/probes/kprobes.c + */ + +#include +#include + +/* This is called from arch_rethook_trampoline() */ +unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs) +{ + return rethook_trampoline_handler(regs, regs->regs[29]); +} +NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); + +void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount) +{ + rhn->ret_addr = regs->regs[30]; + rhn->frame = regs->regs[29]; + + /* replace return addr (x30) with trampoline */ + regs->regs[30] = (u64)arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_rethook_prepare); diff --git a/arch/arm64/kernel/probes/rethook_trampoline.S b/arch/arm64/kernel/probes/rethook_trampoline.S new file mode 100644 index 000000000000..610f520ee72b --- /dev/null +++ b/arch/arm64/kernel/probes/rethook_trampoline.S @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * trampoline entry and return code for rethook. + * Copied from arch/arm64/kernel/probes/kprobes_trampoline.S + */ + +#include +#include +#include + + .text + + .macro save_all_base_regs + stp x0, x1, [sp, #S_X0] + stp x2, x3, [sp, #S_X2] + stp x4, x5, [sp, #S_X4] + stp x6, x7, [sp, #S_X6] + stp x8, x9, [sp, #S_X8] + stp x10, x11, [sp, #S_X10] + stp x12, x13, [sp, #S_X12] + stp x14, x15, [sp, #S_X14] + stp x16, x17, [sp, #S_X16] + stp x18, x19, [sp, #S_X18] + stp x20, x21, [sp, #S_X20] + stp x22, x23, [sp, #S_X22] + stp x24, x25, [sp, #S_X24] + stp x26, x27, [sp, #S_X26] + stp x28, x29, [sp, #S_X28] + add x0, sp, #PT_REGS_SIZE + stp lr, x0, [sp, #S_LR] + /* + * Construct a useful saved PSTATE + */ + mrs x0, nzcv + mrs x1, daif + orr x0, x0, x1 + mrs x1, CurrentEL + orr x0, x0, x1 + mrs x1, SPSel + orr x0, x0, x1 + stp xzr, x0, [sp, #S_PC] + .endm + + .macro restore_all_base_regs + ldr x0, [sp, #S_PSTATE] + and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT) + msr nzcv, x0 + ldp x0, x1, [sp, #S_X0] + ldp x2, x3, [sp, #S_X2] + ldp x4, x5, [sp, #S_X4] + ldp x6, x7, [sp, #S_X6] + ldp x8, x9, [sp, #S_X8] + ldp x10, x11, [sp, #S_X10] + ldp x12, x13, [sp, #S_X12] + ldp x14, x15, [sp, #S_X14] + ldp x16, x17, [sp, #S_X16] + ldp x18, x19, [sp, #S_X18] + ldp x20, x21, [sp, #S_X20] + ldp x22, x23, [sp, #S_X22] + ldp x24, x25, [sp, #S_X24] + ldp x26, x27, [sp, #S_X26] + ldp x28, x29, [sp, #S_X28] + .endm + +SYM_CODE_START(arch_rethook_trampoline) + sub sp, sp, #PT_REGS_SIZE + + save_all_base_regs + + /* Setup a frame pointer. */ + add x29, sp, #S_FP + + mov x0, sp + bl arch_rethook_trampoline_callback + /* + * Replace trampoline address in lr with actual orig_ret_addr return + * address. + */ + mov lr, x0 + + /* The frame pointer (x29) is restored with other registers. */ + restore_all_base_regs + + add sp, sp, #PT_REGS_SIZE + ret + +SYM_CODE_END(arch_rethook_trampoline) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index e4103e085681..efb5b85b57c2 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -38,7 +39,7 @@ static notrace void start_backtrace(struct stackframe *frame, unsigned long fp, { frame->fp = fp; frame->pc = pc; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) frame->kr_cur = NULL; #endif @@ -138,6 +139,10 @@ static int notrace unwind_frame(struct task_struct *tsk, if (is_kretprobe_trampoline(frame->pc)) frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur); #endif +#ifdef CONFIG_RETHOOK + if (is_rethook_trampoline(frame->pc)) + frame->pc = rethook_find_ret_addr(tsk, frame->fp, &frame->kr_cur); +#endif return 0; } From 02752bd99dc2daae05c12f7063bf0632e22b4c1c Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:01:25 +0900 Subject: [PATCH 080/137] powerpc: Add rethook support Add rethook powerpc64 implementation. Most of the code has been copied from kretprobes on powerpc64. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735288495.1084943.539630613772422267.stgit@devnote2 --- arch/powerpc/Kconfig | 1 + arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/rethook.c | 72 +++++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+) create mode 100644 arch/powerpc/kernel/rethook.c diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b779603978e1..5feaa241fb56 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -229,6 +229,7 @@ config PPC select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RETHOOK if KPROBES select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE select HAVE_RSEQ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 4d7829399570..feb24ea83ca6 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -115,6 +115,7 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o +obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o diff --git a/arch/powerpc/kernel/rethook.c b/arch/powerpc/kernel/rethook.c new file mode 100644 index 000000000000..a8a128748efa --- /dev/null +++ b/arch/powerpc/kernel/rethook.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PowerPC implementation of rethook. This depends on kprobes. + */ + +#include +#include + +/* + * Function return trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe + * causes the handlers to fire + */ +asm(".global arch_rethook_trampoline\n" + ".type arch_rethook_trampoline, @function\n" + "arch_rethook_trampoline:\n" + "nop\n" + "blr\n" + ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n"); + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int trampoline_rethook_handler(struct kprobe *p, struct pt_regs *regs) +{ + unsigned long orig_ret_address; + + orig_ret_address = rethook_trampoline_handler(regs, 0); + /* + * We get here through one of two paths: + * 1. by taking a trap -> kprobe_handler() -> here + * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here + * + * When going back through (1), we need regs->nip to be setup properly + * as it is used to determine the return address from the trap. + * For (2), since nip is not honoured with optprobes, we instead setup + * the link register properly so that the subsequent 'blr' in + * __kretprobe_trampoline jumps back to the right instruction. + * + * For nip, we should set the address to the previous instruction since + * we end up emulating it in kprobe_handler(), which increments the nip + * again. + */ + regs_set_return_ip(regs, orig_ret_address - 4); + regs->link = orig_ret_address; + + return 0; +} +NOKPROBE_SYMBOL(trampoline_rethook_handler); + +void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) +{ + rh->ret_addr = regs->link; + rh->frame = 0; + + /* Replace the return addr with trampoline addr */ + regs->link = (unsigned long)arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_prepare_kretprobe); + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) &arch_rethook_trampoline, + .pre_handler = trampoline_rethook_handler +}; + +static int init_arch_rethook(void) +{ + return register_kprobe(&trampoline_p); +} + +core_initcall(init_arch_rethook); From 515a49173b80a4aabcbad9a4fa2a247042378ea1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:01:36 +0900 Subject: [PATCH 081/137] ARM: rethook: Add rethook arm implementation Add rethook arm implementation. Most of the code has been copied from kretprobes on arm. Since the arm's ftrace implementation is a bit special, this needs a special care using from fprobe. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735289643.1084943.15184590256680485720.stgit@devnote2 --- arch/arm/Kconfig | 1 + arch/arm/include/asm/stacktrace.h | 4 +- arch/arm/kernel/stacktrace.c | 6 ++ arch/arm/probes/Makefile | 1 + arch/arm/probes/rethook.c | 103 ++++++++++++++++++++++++++++++ 5 files changed, 113 insertions(+), 2 deletions(-) create mode 100644 arch/arm/probes/rethook.c diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4c97cb40eebb..440f69ee8af5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -107,6 +107,7 @@ config ARM select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_OPTPROBES if !THUMB2_KERNEL + select HAVE_RETHOOK select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h index 8f54f9ad8a9b..babed1707ca8 100644 --- a/arch/arm/include/asm/stacktrace.h +++ b/arch/arm/include/asm/stacktrace.h @@ -14,7 +14,7 @@ struct stackframe { unsigned long sp; unsigned long lr; unsigned long pc; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) struct llist_node *kr_cur; struct task_struct *tsk; #endif @@ -27,7 +27,7 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame) frame->sp = regs->ARM_sp; frame->lr = regs->ARM_lr; frame->pc = regs->ARM_pc; -#ifdef CONFIG_KRETPROBES +#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) frame->kr_cur = NULL; frame->tsk = current; #endif diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 75e905508f27..f509c6be4f57 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only #include #include +#include #include #include #include @@ -66,6 +67,11 @@ int notrace unwind_frame(struct stackframe *frame) frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); #endif +#ifdef CONFIG_RETHOOK + if (is_rethook_trampoline(frame->pc)) + frame->pc = rethook_find_ret_addr(frame->tsk, frame->fp, + &frame->kr_cur); +#endif #ifdef CONFIG_KRETPROBES if (is_kretprobe_trampoline(frame->pc)) frame->pc = kretprobe_find_ret_addr(frame->tsk, diff --git a/arch/arm/probes/Makefile b/arch/arm/probes/Makefile index 8b0ea5ace100..10c083a22223 100644 --- a/arch/arm/probes/Makefile +++ b/arch/arm/probes/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_KPROBES) += decode-thumb.o else obj-$(CONFIG_KPROBES) += decode-arm.o endif +obj-$(CONFIG_RETHOOK) += rethook.o diff --git a/arch/arm/probes/rethook.c b/arch/arm/probes/rethook.c new file mode 100644 index 000000000000..1c1357a86365 --- /dev/null +++ b/arch/arm/probes/rethook.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arm implementation of rethook. Mostly copied from arch/arm/probes/kprobes/core.c + */ + +#include +#include + +/* Called from arch_rethook_trampoline */ +static __used unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs) +{ + return rethook_trampoline_handler(regs, regs->ARM_fp); +} +NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); + +/* + * When a rethook'ed function returns, it returns to arch_rethook_trampoline + * which calls rethook callback. We construct a struct pt_regs to + * give a view of registers r0-r11, sp, lr, and pc to the user + * return-handler. This is not a complete pt_regs structure, but that + * should be enough for stacktrace from the return handler with or + * without pt_regs. + */ +asm( + ".text\n" + ".global arch_rethook_trampoline\n" + ".type arch_rethook_trampoline, %function\n" + "arch_rethook_trampoline:\n" +#ifdef CONFIG_FRAME_POINTER + "ldr lr, =arch_rethook_trampoline \n\t" + /* this makes a framepointer on pt_regs. */ +#ifdef CONFIG_CC_IS_CLANG + "stmdb sp, {sp, lr, pc} \n\t" + "sub sp, sp, #12 \n\t" + /* In clang case, pt_regs->ip = lr. */ + "stmdb sp!, {r0 - r11, lr} \n\t" + /* fp points regs->r11 (fp) */ + "add fp, sp, #44 \n\t" +#else /* !CONFIG_CC_IS_CLANG */ + /* In gcc case, pt_regs->ip = fp. */ + "stmdb sp, {fp, sp, lr, pc} \n\t" + "sub sp, sp, #16 \n\t" + "stmdb sp!, {r0 - r11} \n\t" + /* fp points regs->r15 (pc) */ + "add fp, sp, #60 \n\t" +#endif /* CONFIG_CC_IS_CLANG */ +#else /* !CONFIG_FRAME_POINTER */ + "sub sp, sp, #16 \n\t" + "stmdb sp!, {r0 - r11} \n\t" +#endif /* CONFIG_FRAME_POINTER */ + "mov r0, sp \n\t" + "bl arch_rethook_trampoline_callback \n\t" + "mov lr, r0 \n\t" + "ldmia sp!, {r0 - r11} \n\t" + "add sp, sp, #16 \n\t" +#ifdef CONFIG_THUMB2_KERNEL + "bx lr \n\t" +#else + "mov pc, lr \n\t" +#endif + ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n" +); +NOKPROBE_SYMBOL(arch_rethook_trampoline); + +/* + * At the entry of function with mcount. The stack and registers are prepared + * for the mcount function as below. + * + * mov ip, sp + * push {fp, ip, lr, pc} + * sub fp, ip, #4 ; FP[0] = PC, FP[-4] = LR, and FP[-12] = call-site FP. + * push {lr} + * bl <__gnu_mcount_nc> ; call ftrace + * + * And when returning from the function, call-site FP, SP and PC are restored + * from stack as below; + * + * ldm sp, {fp, sp, pc} + * + * Thus, if the arch_rethook_prepare() is called from real function entry, + * it must change the LR and save FP in pt_regs. But if it is called via + * mcount context (ftrace), it must change the LR on stack, which is next + * to the PC (= FP[-4]), and save the FP value at FP[-12]. + */ +void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) +{ + unsigned long *ret_addr, *frame; + + if (mcount) { + ret_addr = (unsigned long *)(regs->ARM_fp - 4); + frame = (unsigned long *)(regs->ARM_fp - 12); + } else { + ret_addr = ®s->ARM_lr; + frame = ®s->ARM_fp; + } + + rh->ret_addr = *ret_addr; + rh->frame = *frame; + + /* Replace the return addr with trampoline addr. */ + *ret_addr = (unsigned long)arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_rethook_prepare); From 5b0ab78998e32564a011b14c4c7f9c81e2d42b9d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:01:48 +0900 Subject: [PATCH 082/137] fprobe: Add exit_handler support Add exit_handler to fprobe. fprobe + rethook allows us to hook the kernel function return. The rethook will be enabled only if the fprobe::exit_handler is set. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735290790.1084943.10601965782208052202.stgit@devnote2 --- include/linux/fprobe.h | 6 +++ kernel/trace/Kconfig | 9 ++-- kernel/trace/fprobe.c | 118 ++++++++++++++++++++++++++++++++++++++--- 3 files changed, 123 insertions(+), 10 deletions(-) diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index 2ba099aff041..8eefec2b485e 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -5,13 +5,16 @@ #include #include +#include /** * struct fprobe - ftrace based probe. * @ops: The ftrace_ops. * @nmissed: The counter for missing events. * @flags: The status flag. + * @rethook: The rethook data structure. (internal data) * @entry_handler: The callback function for function entry. + * @exit_handler: The callback function for function exit. */ struct fprobe { #ifdef CONFIG_FUNCTION_TRACER @@ -25,7 +28,10 @@ struct fprobe { #endif unsigned long nmissed; unsigned int flags; + struct rethook *rethook; + void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); + void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); }; #define FPROBE_FL_DISABLED 1 diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e75504e42ab8..99dd4ca63d68 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -251,11 +251,14 @@ config FPROBE bool "Kernel Function Probe (fprobe)" depends on FUNCTION_TRACER depends on DYNAMIC_FTRACE_WITH_REGS + depends on HAVE_RETHOOK + select RETHOOK default n help - This option enables kernel function probe (fprobe) based on ftrace, - which is similar to kprobes, but probes only for kernel function - entries and it can probe multiple functions by one fprobe. + This option enables kernel function probe (fprobe) based on ftrace. + The fprobe is similar to kprobes, but probes only for kernel function + entries and exits. This also can probe multiple functions by one + fprobe. If unsure, say N. diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 7e8ceee339a0..38073632bfe4 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -8,12 +8,22 @@ #include #include #include +#include #include #include +#include "trace.h" + +struct fprobe_rethook_node { + struct rethook_node node; + unsigned long entry_ip; +}; + static void fprobe_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { + struct fprobe_rethook_node *fpr; + struct rethook_node *rh; struct fprobe *fp; int bit; @@ -30,10 +40,37 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip, if (fp->entry_handler) fp->entry_handler(fp, ip, ftrace_get_regs(fregs)); + if (fp->exit_handler) { + rh = rethook_try_get(fp->rethook); + if (!rh) { + fp->nmissed++; + goto out; + } + fpr = container_of(rh, struct fprobe_rethook_node, node); + fpr->entry_ip = ip; + rethook_hook(rh, ftrace_get_regs(fregs), true); + } + +out: ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(fprobe_handler); +static void fprobe_exit_handler(struct rethook_node *rh, void *data, + struct pt_regs *regs) +{ + struct fprobe *fp = (struct fprobe *)data; + struct fprobe_rethook_node *fpr; + + if (!fp || fprobe_disabled(fp)) + return; + + fpr = container_of(rh, struct fprobe_rethook_node, node); + + fp->exit_handler(fp, fpr->entry_ip, regs); +} +NOKPROBE_SYMBOL(fprobe_exit_handler); + /* Convert ftrace location address from symbols */ static unsigned long *get_ftrace_locations(const char **syms, int num) { @@ -77,6 +114,48 @@ static void fprobe_init(struct fprobe *fp) fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS; } +static int fprobe_init_rethook(struct fprobe *fp, int num) +{ + int i, size; + + if (num < 0) + return -EINVAL; + + if (!fp->exit_handler) { + fp->rethook = NULL; + return 0; + } + + /* Initialize rethook if needed */ + size = num * num_possible_cpus() * 2; + if (size < 0) + return -E2BIG; + + fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler); + for (i = 0; i < size; i++) { + struct rethook_node *node; + + node = kzalloc(sizeof(struct fprobe_rethook_node), GFP_KERNEL); + if (!node) { + rethook_free(fp->rethook); + fp->rethook = NULL; + return -ENOMEM; + } + rethook_add_node(fp->rethook, node); + } + return 0; +} + +static void fprobe_fail_cleanup(struct fprobe *fp) +{ + if (fp->rethook) { + /* Don't need to cleanup rethook->handler because this is not used. */ + rethook_free(fp->rethook); + fp->rethook = NULL; + } + ftrace_free_filter(&fp->ops); +} + /** * register_fprobe() - Register fprobe to ftrace by pattern. * @fp: A fprobe data structure to be registered. @@ -90,6 +169,7 @@ static void fprobe_init(struct fprobe *fp) */ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) { + struct ftrace_hash *hash; unsigned char *str; int ret, len; @@ -114,10 +194,21 @@ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter goto out; } - ret = register_ftrace_function(&fp->ops); + /* TODO: + * correctly calculate the total number of filtered symbols + * from both filter and notfilter. + */ + hash = fp->ops.local_hash.filter_hash; + if (WARN_ON_ONCE(!hash)) + goto out; + + ret = fprobe_init_rethook(fp, (int)hash->count); + if (!ret) + ret = register_ftrace_function(&fp->ops); + out: if (ret) - ftrace_free_filter(&fp->ops); + fprobe_fail_cleanup(fp); return ret; } EXPORT_SYMBOL_GPL(register_fprobe); @@ -145,12 +236,15 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num) fprobe_init(fp); ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0); + if (ret) + return ret; + + ret = fprobe_init_rethook(fp, num); if (!ret) ret = register_ftrace_function(&fp->ops); if (ret) - ftrace_free_filter(&fp->ops); - + fprobe_fail_cleanup(fp); return ret; } EXPORT_SYMBOL_GPL(register_fprobe_ips); @@ -201,10 +295,20 @@ int unregister_fprobe(struct fprobe *fp) if (!fp || fp->ops.func != fprobe_handler) return -EINVAL; - ret = unregister_ftrace_function(&fp->ops); + /* + * rethook_free() starts disabling the rethook, but the rethook handlers + * may be running on other processors at this point. To make sure that all + * current running handlers are finished, call unregister_ftrace_function() + * after this. + */ + if (fp->rethook) + rethook_free(fp->rethook); - if (!ret) - ftrace_free_filter(&fp->ops); + ret = unregister_ftrace_function(&fp->ops); + if (ret < 0) + return ret; + + ftrace_free_filter(&fp->ops); return ret; } From 6ee64cc3020b5b5537827c71d8eaac814ad4d346 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:02:00 +0900 Subject: [PATCH 083/137] fprobe: Add sample program for fprobe Add a sample program for the fprobe. The sample_fprobe puts a fprobe on kernel_clone() by default. This dump stack and some called address info at the function entry and exit. The sample_fprobe.ko gets 2 parameters. - symbol: you can specify the comma separated symbols or wildcard symbol pattern (in this case you can not use comma) - stackdump: a bool value to enable or disable stack dump in the fprobe handler. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735291987.1084943.4449670993752806840.stgit@devnote2 --- samples/Kconfig | 7 ++ samples/Makefile | 1 + samples/fprobe/Makefile | 3 + samples/fprobe/fprobe_example.c | 120 ++++++++++++++++++++++++++++++++ 4 files changed, 131 insertions(+) create mode 100644 samples/fprobe/Makefile create mode 100644 samples/fprobe/fprobe_example.c diff --git a/samples/Kconfig b/samples/Kconfig index 22cc921ae291..8415d60ea5f4 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -73,6 +73,13 @@ config SAMPLE_HW_BREAKPOINT help This builds kernel hardware breakpoint example modules. +config SAMPLE_FPROBE + tristate "Build fprobe examples -- loadable modules only" + depends on FPROBE && m + help + This builds a fprobe example module. This module has an option 'symbol'. + You can specify a probed symbol or symbols separated with ','. + config SAMPLE_KFIFO tristate "Build kfifo examples -- loadable modules only" depends on m diff --git a/samples/Makefile b/samples/Makefile index 1ae4de99c983..6d662965be5b 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -33,3 +33,4 @@ subdir-$(CONFIG_SAMPLE_WATCHDOG) += watchdog subdir-$(CONFIG_SAMPLE_WATCH_QUEUE) += watch_queue obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak/ obj-$(CONFIG_SAMPLE_CORESIGHT_SYSCFG) += coresight/ +obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/ diff --git a/samples/fprobe/Makefile b/samples/fprobe/Makefile new file mode 100644 index 000000000000..ecccbfa6e99b --- /dev/null +++ b/samples/fprobe/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_SAMPLE_FPROBE) += fprobe_example.o diff --git a/samples/fprobe/fprobe_example.c b/samples/fprobe/fprobe_example.c new file mode 100644 index 000000000000..24d3cf109140 --- /dev/null +++ b/samples/fprobe/fprobe_example.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Here's a sample kernel module showing the use of fprobe to dump a + * stack trace and selected registers when kernel_clone() is called. + * + * For more information on theory of operation of kprobes, see + * Documentation/trace/kprobes.rst + * + * You will see the trace data in /var/log/messages and on the console + * whenever kernel_clone() is invoked to create a new process. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include + +#define BACKTRACE_DEPTH 16 +#define MAX_SYMBOL_LEN 4096 +struct fprobe sample_probe; + +static char symbol[MAX_SYMBOL_LEN] = "kernel_clone"; +module_param_string(symbol, symbol, sizeof(symbol), 0644); +static char nosymbol[MAX_SYMBOL_LEN] = ""; +module_param_string(nosymbol, nosymbol, sizeof(nosymbol), 0644); +static bool stackdump = true; +module_param(stackdump, bool, 0644); + +static void show_backtrace(void) +{ + unsigned long stacks[BACKTRACE_DEPTH]; + unsigned int len; + + len = stack_trace_save(stacks, BACKTRACE_DEPTH, 2); + stack_trace_print(stacks, len, 24); +} + +static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) +{ + pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip); + if (stackdump) + show_backtrace(); +} + +static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) +{ + unsigned long rip = instruction_pointer(regs); + + pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n", + (void *)ip, (void *)ip, (void *)rip, (void *)rip); + if (stackdump) + show_backtrace(); +} + +static int __init fprobe_init(void) +{ + char *p, *symbuf = NULL; + const char **syms; + int ret, count, i; + + sample_probe.entry_handler = sample_entry_handler; + sample_probe.exit_handler = sample_exit_handler; + + if (strchr(symbol, '*')) { + /* filter based fprobe */ + ret = register_fprobe(&sample_probe, symbol, + nosymbol[0] == '\0' ? NULL : nosymbol); + goto out; + } else if (!strchr(symbol, ',')) { + symbuf = symbol; + ret = register_fprobe_syms(&sample_probe, (const char **)&symbuf, 1); + goto out; + } + + /* Comma separated symbols */ + symbuf = kstrdup(symbol, GFP_KERNEL); + if (!symbuf) + return -ENOMEM; + p = symbuf; + count = 1; + while ((p = strchr(++p, ',')) != NULL) + count++; + + pr_info("%d symbols found\n", count); + + syms = kcalloc(count, sizeof(char *), GFP_KERNEL); + if (!syms) { + kfree(symbuf); + return -ENOMEM; + } + + p = symbuf; + for (i = 0; i < count; i++) + syms[i] = strsep(&p, ","); + + ret = register_fprobe_syms(&sample_probe, syms, count); + kfree(syms); + kfree(symbuf); +out: + if (ret < 0) + pr_err("register_fprobe failed, returned %d\n", ret); + else + pr_info("Planted fprobe at %s\n", symbol); + + return ret; +} + +static void __exit fprobe_exit(void) +{ + unregister_fprobe(&sample_probe); + + pr_info("fprobe at %s unregistered\n", symbol); +} + +module_init(fprobe_init) +module_exit(fprobe_exit) +MODULE_LICENSE("GPL"); From ab51e15d535e07be9839e0df056a4ebe9c5bac83 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:02:11 +0900 Subject: [PATCH 084/137] fprobe: Introduce FPROBE_FL_KPROBE_SHARED flag for fprobe Introduce FPROBE_FL_KPROBE_SHARED flag for sharing fprobe callback with kprobes safely from the viewpoint of recursion. Since the recursion safety of the fprobe (and ftrace) is a bit different from the kprobes, this may cause an issue if user wants to run the same code from the fprobe and the kprobes. The kprobes has per-cpu 'current_kprobe' variable which protects the kprobe handler from recursion in any case. On the other hand, the fprobe uses only ftrace_test_recursion_trylock(), which will allow interrupt context calls another (or same) fprobe during the fprobe user handler is running. This is not a matter in cases if the common callback shared among the kprobes and the fprobe has its own recursion detection, or it can handle the recursion in the different contexts (normal/interrupt/NMI.) But if it relies on the 'current_kprobe' recursion lock, it has to check kprobe_running() and use kprobe_busy_*() APIs. Fprobe has FPROBE_FL_KPROBE_SHARED flag to do this. If your common callback code will be shared with kprobes, please set FPROBE_FL_KPROBE_SHARED *before* registering the fprobe, like; fprobe.flags = FPROBE_FL_KPROBE_SHARED; register_fprobe(&fprobe, "func*", NULL); This will protect your common callback from the nested call. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735293127.1084943.15687374237275817599.stgit@devnote2 --- include/linux/fprobe.h | 12 ++++++++++++ include/linux/kprobes.h | 3 +++ kernel/trace/fprobe.c | 19 ++++++++++++++++++- 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index 8eefec2b485e..1c2bde0ead73 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -34,13 +34,25 @@ struct fprobe { void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); }; +/* This fprobe is soft-disabled. */ #define FPROBE_FL_DISABLED 1 +/* + * This fprobe handler will be shared with kprobes. + * This flag must be set before registering. + */ +#define FPROBE_FL_KPROBE_SHARED 2 + static inline bool fprobe_disabled(struct fprobe *fp) { return (fp) ? fp->flags & FPROBE_FL_DISABLED : false; } +static inline bool fprobe_shared_with_kprobes(struct fprobe *fp) +{ + return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false; +} + #ifdef CONFIG_FPROBE int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter); int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 19b884353b15..5f1859836deb 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -427,6 +427,9 @@ static inline struct kprobe *kprobe_running(void) { return NULL; } +#define kprobe_busy_begin() do {} while (0) +#define kprobe_busy_end() do {} while (0) + static inline int register_kprobe(struct kprobe *p) { return -EOPNOTSUPP; diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 38073632bfe4..8b2dd5b9dcd1 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -56,6 +56,20 @@ out: } NOKPROBE_SYMBOL(fprobe_handler); +static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct fprobe *fp = container_of(ops, struct fprobe, ops); + + if (unlikely(kprobe_running())) { + fp->nmissed++; + return; + } + kprobe_busy_begin(); + fprobe_handler(ip, parent_ip, ops, fregs); + kprobe_busy_end(); +} + static void fprobe_exit_handler(struct rethook_node *rh, void *data, struct pt_regs *regs) { @@ -110,7 +124,10 @@ error: static void fprobe_init(struct fprobe *fp) { fp->nmissed = 0; - fp->ops.func = fprobe_handler; + if (fprobe_shared_with_kprobes(fp)) + fp->ops.func = fprobe_kprobe_handler; + else + fp->ops.func = fprobe_handler; fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS; } From aba09b44a985f963f3ce22132694161f79e18984 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:02:22 +0900 Subject: [PATCH 085/137] docs: fprobe: Add fprobe description to ftrace-use.rst Add a documentation of fprobe for the user who needs this interface. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735294272.1084943.12372175959382037397.stgit@devnote2 --- Documentation/trace/fprobe.rst | 174 +++++++++++++++++++++++++++++++++ Documentation/trace/index.rst | 1 + 2 files changed, 175 insertions(+) create mode 100644 Documentation/trace/fprobe.rst diff --git a/Documentation/trace/fprobe.rst b/Documentation/trace/fprobe.rst new file mode 100644 index 000000000000..b64bec1ce144 --- /dev/null +++ b/Documentation/trace/fprobe.rst @@ -0,0 +1,174 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================== +Fprobe - Function entry/exit probe +================================== + +.. Author: Masami Hiramatsu + +Introduction +============ + +Fprobe is a function entry/exit probe mechanism based on ftrace. +Instead of using ftrace full feature, if you only want to attach callbacks +on function entry and exit, similar to the kprobes and kretprobes, you can +use fprobe. Compared with kprobes and kretprobes, fprobe gives faster +instrumentation for multiple functions with single handler. This document +describes how to use fprobe. + +The usage of fprobe +=================== + +The fprobe is a wrapper of ftrace (+ kretprobe-like return callback) to +attach callbacks to multiple function entry and exit. User needs to set up +the `struct fprobe` and pass it to `register_fprobe()`. + +Typically, `fprobe` data structure is initialized with the `entry_handler` +and/or `exit_handler` as below. + +.. code-block:: c + + struct fprobe fp = { + .entry_handler = my_entry_callback, + .exit_handler = my_exit_callback, + }; + +To enable the fprobe, call one of register_fprobe(), register_fprobe_ips(), and +register_fprobe_syms(). These functions register the fprobe with different types +of parameters. + +The register_fprobe() enables a fprobe by function-name filters. +E.g. this enables @fp on "func*()" function except "func2()".:: + + register_fprobe(&fp, "func*", "func2"); + +The register_fprobe_ips() enables a fprobe by ftrace-location addresses. +E.g. + +.. code-block:: c + + unsigned long ips[] = { 0x.... }; + + register_fprobe_ips(&fp, ips, ARRAY_SIZE(ips)); + +And the register_fprobe_syms() enables a fprobe by symbol names. +E.g. + +.. code-block:: c + + char syms[] = {"func1", "func2", "func3"}; + + register_fprobe_syms(&fp, syms, ARRAY_SIZE(syms)); + +To disable (remove from functions) this fprobe, call:: + + unregister_fprobe(&fp); + +You can temporally (soft) disable the fprobe by:: + + disable_fprobe(&fp); + +and resume by:: + + enable_fprobe(&fp); + +The above is defined by including the header:: + + #include + +Same as ftrace, the registered callbacks will start being called some time +after the register_fprobe() is called and before it returns. See +:file:`Documentation/trace/ftrace.rst`. + +Also, the unregister_fprobe() will guarantee that the both enter and exit +handlers are no longer being called by functions after unregister_fprobe() +returns as same as unregister_ftrace_function(). + +The fprobe entry/exit handler +============================= + +The prototype of the entry/exit callback function is as follows: + +.. code-block:: c + + void callback_func(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); + +Note that both entry and exit callbacks have same ptototype. The @entry_ip is +saved at function entry and passed to exit handler. + +@fp + This is the address of `fprobe` data structure related to this handler. + You can embed the `fprobe` to your data structure and get it by + container_of() macro from @fp. The @fp must not be NULL. + +@entry_ip + This is the ftrace address of the traced function (both entry and exit). + Note that this may not be the actual entry address of the function but + the address where the ftrace is instrumented. + +@regs + This is the `pt_regs` data structure at the entry and exit. Note that + the instruction pointer of @regs may be different from the @entry_ip + in the entry_handler. If you need traced instruction pointer, you need + to use @entry_ip. On the other hand, in the exit_handler, the instruction + pointer of @regs is set to the currect return address. + +Share the callbacks with kprobes +================================ + +Since the recursion safeness of the fprobe (and ftrace) is a bit different +from the kprobes, this may cause an issue if user wants to run the same +code from the fprobe and the kprobes. + +Kprobes has per-cpu 'current_kprobe' variable which protects the kprobe +handler from recursion in all cases. On the other hand, fprobe uses +only ftrace_test_recursion_trylock(). This allows interrupt context to +call another (or same) fprobe while the fprobe user handler is running. + +This is not a matter if the common callback code has its own recursion +detection, or it can handle the recursion in the different contexts +(normal/interrupt/NMI.) +But if it relies on the 'current_kprobe' recursion lock, it has to check +kprobe_running() and use kprobe_busy_*() APIs. + +Fprobe has FPROBE_FL_KPROBE_SHARED flag to do this. If your common callback +code will be shared with kprobes, please set FPROBE_FL_KPROBE_SHARED +*before* registering the fprobe, like: + +.. code-block:: c + + fprobe.flags = FPROBE_FL_KPROBE_SHARED; + + register_fprobe(&fprobe, "func*", NULL); + +This will protect your common callback from the nested call. + +The missed counter +================== + +The `fprobe` data structure has `fprobe::nmissed` counter field as same as +kprobes. +This counter counts up when; + + - fprobe fails to take ftrace_recursion lock. This usually means that a function + which is traced by other ftrace users is called from the entry_handler. + + - fprobe fails to setup the function exit because of the shortage of rethook + (the shadow stack for hooking the function return.) + +The `fprobe::nmissed` field counts up in both cases. Therefore, the former +skips both of entry and exit callback and the latter skips the exit +callback, but in both case the counter will increase by 1. + +Note that if you set the FTRACE_OPS_FL_RECURSION and/or FTRACE_OPS_FL_RCU to +`fprobe::ops::flags` (ftrace_ops::flags) when registering the fprobe, this +counter may not work correctly, because ftrace skips the fprobe function which +increase the counter. + + +Functions and structures +======================== + +.. kernel-doc:: include/linux/fprobe.h +.. kernel-doc:: kernel/trace/fprobe.c + diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst index 3769b9b7aed8..b9f3757f8269 100644 --- a/Documentation/trace/index.rst +++ b/Documentation/trace/index.rst @@ -9,6 +9,7 @@ Linux Tracing Technologies tracepoint-analysis ftrace ftrace-uses + fprobe kprobes kprobetrace uprobetracer From f4616fabab39e084b5d7e15a32151d9a9aeab537 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 15 Mar 2022 23:02:35 +0900 Subject: [PATCH 086/137] fprobe: Add a selftest for fprobe Add a KUnit based selftest for fprobe interface. Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (Google) Tested-by: Steven Rostedt (Google) Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/164735295554.1084943.18347620679928750960.stgit@devnote2 --- lib/Kconfig.debug | 12 ++++ lib/Makefile | 2 + lib/test_fprobe.c | 174 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 188 insertions(+) create mode 100644 lib/test_fprobe.c diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 72ca4684beda..b0bf0d224b2c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2118,6 +2118,18 @@ config KPROBES_SANITY_TEST Say N if you are unsure. +config FPROBE_SANITY_TEST + bool "Self test for fprobe" + depends on DEBUG_KERNEL + depends on FPROBE + depends on KUNIT=y + help + This option will enable testing the fprobe when the system boot. + A series of tests are made to verify that the fprobe is functioning + properly. + + Say N if you are unsure. + config BACKTRACE_SELF_TEST tristate "Self test for the backtrace code" depends on DEBUG_KERNEL diff --git a/lib/Makefile b/lib/Makefile index 300f569c626b..154008764b16 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -103,6 +103,8 @@ obj-$(CONFIG_TEST_HMM) += test_hmm.o obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o +CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE) +obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o # # CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns # off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c new file mode 100644 index 000000000000..ed70637a2ffa --- /dev/null +++ b/lib/test_fprobe.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * test_fprobe.c - simple sanity test for fprobe + */ + +#include +#include +#include +#include + +#define div_factor 3 + +static struct kunit *current_test; + +static u32 rand1, entry_val, exit_val; + +/* Use indirect calls to avoid inlining the target functions */ +static u32 (*target)(u32 value); +static u32 (*target2)(u32 value); +static unsigned long target_ip; +static unsigned long target2_ip; + +static noinline u32 fprobe_selftest_target(u32 value) +{ + return (value / div_factor); +} + +static noinline u32 fprobe_selftest_target2(u32 value) +{ + return (value / div_factor) + 1; +} + +static notrace void fp_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) +{ + KUNIT_EXPECT_FALSE(current_test, preemptible()); + /* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */ + if (ip != target_ip) + KUNIT_EXPECT_EQ(current_test, ip, target2_ip); + entry_val = (rand1 / div_factor); +} + +static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs) +{ + unsigned long ret = regs_return_value(regs); + + KUNIT_EXPECT_FALSE(current_test, preemptible()); + if (ip != target_ip) { + KUNIT_EXPECT_EQ(current_test, ip, target2_ip); + KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1); + } else + KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor)); + KUNIT_EXPECT_EQ(current_test, entry_val, (rand1 / div_factor)); + exit_val = entry_val + div_factor; +} + +/* Test entry only (no rethook) */ +static void test_fprobe_entry(struct kunit *test) +{ + struct fprobe fp_entry = { + .entry_handler = fp_entry_handler, + }; + + current_test = test; + + /* Before register, unregister should be failed. */ + KUNIT_EXPECT_NE(test, 0, unregister_fprobe(&fp_entry)); + KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp_entry, "fprobe_selftest_target*", NULL)); + + entry_val = 0; + exit_val = 0; + target(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, 0, exit_val); + + entry_val = 0; + exit_val = 0; + target2(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, 0, exit_val); + + KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp_entry)); +} + +static void test_fprobe(struct kunit *test) +{ + struct fprobe fp = { + .entry_handler = fp_entry_handler, + .exit_handler = fp_exit_handler, + }; + + current_test = test; + KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target*", NULL)); + + entry_val = 0; + exit_val = 0; + target(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val); + + entry_val = 0; + exit_val = 0; + target2(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val); + + KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp)); +} + +static void test_fprobe_syms(struct kunit *test) +{ + static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_target2"}; + struct fprobe fp = { + .entry_handler = fp_entry_handler, + .exit_handler = fp_exit_handler, + }; + + current_test = test; + KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2)); + + entry_val = 0; + exit_val = 0; + target(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val); + + entry_val = 0; + exit_val = 0; + target2(rand1); + KUNIT_EXPECT_NE(test, 0, entry_val); + KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val); + + KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp)); +} + +static unsigned long get_ftrace_location(void *func) +{ + unsigned long size, addr = (unsigned long)func; + + if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size) + return 0; + + return ftrace_location_range(addr, addr + size - 1); +} + +static int fprobe_test_init(struct kunit *test) +{ + do { + rand1 = prandom_u32(); + } while (rand1 <= div_factor); + + target = fprobe_selftest_target; + target2 = fprobe_selftest_target2; + target_ip = get_ftrace_location(target); + target2_ip = get_ftrace_location(target2); + + return 0; +} + +static struct kunit_case fprobe_testcases[] = { + KUNIT_CASE(test_fprobe_entry), + KUNIT_CASE(test_fprobe), + KUNIT_CASE(test_fprobe_syms), + {} +}; + +static struct kunit_suite fprobe_test_suite = { + .name = "fprobe_test", + .init = fprobe_test_init, + .test_cases = fprobe_testcases, +}; + +kunit_test_suites(&fprobe_test_suite); + +MODULE_LICENSE("GPL"); From a0019cd7d41a191253859349535d76ee28ab7d96 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:07 +0100 Subject: [PATCH 087/137] lib/sort: Add priv pointer to swap function Adding support to have priv pointer in swap callback function. Following the initial change on cmp callback functions [1] and adding SWAP_WRAPPER macro to identify sort call of sort_r. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Reviewed-by: Masami Hiramatsu Link: https://lore.kernel.org/bpf/20220316122419.933957-2-jolsa@kernel.org [1] 4333fb96ca10 ("media: lib/sort.c: implement sort() variant taking context argument") --- include/linux/sort.h | 2 +- include/linux/types.h | 1 + lib/sort.c | 40 ++++++++++++++++++++++++++++++---------- 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/include/linux/sort.h b/include/linux/sort.h index b5898725fe9d..e163287ac6c1 100644 --- a/include/linux/sort.h +++ b/include/linux/sort.h @@ -6,7 +6,7 @@ void sort_r(void *base, size_t num, size_t size, cmp_r_func_t cmp_func, - swap_func_t swap_func, + swap_r_func_t swap_func, const void *priv); void sort(void *base, size_t num, size_t size, diff --git a/include/linux/types.h b/include/linux/types.h index ac825ad90e44..ea8cf60a8a79 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -226,6 +226,7 @@ struct callback_head { typedef void (*rcu_callback_t)(struct rcu_head *head); typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); +typedef void (*swap_r_func_t)(void *a, void *b, int size, const void *priv); typedef void (*swap_func_t)(void *a, void *b, int size); typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); diff --git a/lib/sort.c b/lib/sort.c index aa18153864d2..b399bf10d675 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -122,16 +122,27 @@ static void swap_bytes(void *a, void *b, size_t n) * a pointer, but small integers make for the smallest compare * instructions. */ -#define SWAP_WORDS_64 (swap_func_t)0 -#define SWAP_WORDS_32 (swap_func_t)1 -#define SWAP_BYTES (swap_func_t)2 +#define SWAP_WORDS_64 (swap_r_func_t)0 +#define SWAP_WORDS_32 (swap_r_func_t)1 +#define SWAP_BYTES (swap_r_func_t)2 +#define SWAP_WRAPPER (swap_r_func_t)3 + +struct wrapper { + cmp_func_t cmp; + swap_func_t swap; +}; /* * The function pointer is last to make tail calls most efficient if the * compiler decides not to inline this function. */ -static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func) +static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv) { + if (swap_func == SWAP_WRAPPER) { + ((const struct wrapper *)priv)->swap(a, b, (int)size); + return; + } + if (swap_func == SWAP_WORDS_64) swap_words_64(a, b, size); else if (swap_func == SWAP_WORDS_32) @@ -139,7 +150,7 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func) else if (swap_func == SWAP_BYTES) swap_bytes(a, b, size); else - swap_func(a, b, (int)size); + swap_func(a, b, (int)size, priv); } #define _CMP_WRAPPER ((cmp_r_func_t)0L) @@ -147,7 +158,7 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func) static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv) { if (cmp == _CMP_WRAPPER) - return ((cmp_func_t)(priv))(a, b); + return ((const struct wrapper *)priv)->cmp(a, b); return cmp(a, b, priv); } @@ -198,7 +209,7 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size) */ void sort_r(void *base, size_t num, size_t size, cmp_r_func_t cmp_func, - swap_func_t swap_func, + swap_r_func_t swap_func, const void *priv) { /* pre-scale counters for performance */ @@ -208,6 +219,10 @@ void sort_r(void *base, size_t num, size_t size, if (!a) /* num < 2 || size == 0 */ return; + /* called from 'sort' without swap function, let's pick the default */ + if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap) + swap_func = NULL; + if (!swap_func) { if (is_aligned(base, size, 8)) swap_func = SWAP_WORDS_64; @@ -230,7 +245,7 @@ void sort_r(void *base, size_t num, size_t size, if (a) /* Building heap: sift down --a */ a -= size; else if (n -= size) /* Sorting: Extract root to --n */ - do_swap(base, base + n, size, swap_func); + do_swap(base, base + n, size, swap_func, priv); else /* Sort complete */ break; @@ -257,7 +272,7 @@ void sort_r(void *base, size_t num, size_t size, c = b; /* Where "a" belongs */ while (b != a) { /* Shift it into place */ b = parent(b, lsbit, size); - do_swap(base + b, base + c, size, swap_func); + do_swap(base + b, base + c, size, swap_func, priv); } } } @@ -267,6 +282,11 @@ void sort(void *base, size_t num, size_t size, cmp_func_t cmp_func, swap_func_t swap_func) { - return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func); + struct wrapper w = { + .cmp = cmp_func, + .swap = swap_func, + }; + + return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w); } EXPORT_SYMBOL(sort); From aecf489f2ce51436402818c96639ed6303b540f8 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:08 +0100 Subject: [PATCH 088/137] kallsyms: Skip the name search for empty string When kallsyms_lookup_name is called with empty string, it will do futile search for it through all the symbols. Skipping the search for empty string. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-3-jolsa@kernel.org --- kernel/kallsyms.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 951c93216fc4..79f2eb617a62 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -212,6 +212,10 @@ unsigned long kallsyms_lookup_name(const char *name) unsigned long i; unsigned int off; + /* Skip the search for empty string. */ + if (!*name) + return 0; + for (i = 0, off = 0; i < kallsyms_num_syms; i++) { off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); From 0dcac272540613d41c05e89679e4ddb978b612f1 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:09 +0100 Subject: [PATCH 089/137] bpf: Add multi kprobe link Adding new link type BPF_LINK_TYPE_KPROBE_MULTI that attaches kprobe program through fprobe API. The fprobe API allows to attach probe on multiple functions at once very fast, because it works on top of ftrace. On the other hand this limits the probe point to the function entry or return. The kprobe program gets the same pt_regs input ctx as when it's attached through the perf API. Adding new attach type BPF_TRACE_KPROBE_MULTI that allows attachment kprobe to multiple function with new link. User provides array of addresses or symbols with count to attach the kprobe program to. The new link_create uapi interface looks like: struct { __u32 flags; __u32 cnt; __aligned_u64 syms; __aligned_u64 addrs; } kprobe_multi; The flags field allows single BPF_TRACE_KPROBE_MULTI bit to create return multi kprobe. Signed-off-by: Masami Hiramatsu Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220316122419.933957-4-jolsa@kernel.org --- include/linux/bpf_types.h | 1 + include/linux/trace_events.h | 7 ++ include/uapi/linux/bpf.h | 13 ++ kernel/bpf/syscall.c | 26 +++- kernel/trace/bpf_trace.c | 211 +++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 13 ++ 6 files changed, 266 insertions(+), 5 deletions(-) diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 48a91c51c015..3e24ad0c4b3c 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -140,3 +140,4 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) #ifdef CONFIG_PERF_EVENTS BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif +BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index dcea51fb60e2..8f0e9e7cb493 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -15,6 +15,7 @@ struct array_buffer; struct tracer; struct dentry; struct bpf_prog; +union bpf_attr; const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, unsigned long flags, @@ -738,6 +739,7 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -779,6 +781,11 @@ static inline int bpf_get_perf_event_info(const struct perf_event *event, { return -EOPNOTSUPP; } +static inline int +bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif enum { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 99fab54ae9c0..d77f47af7752 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -997,6 +997,7 @@ enum bpf_attach_type { BPF_SK_REUSEPORT_SELECT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, + BPF_TRACE_KPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1011,6 +1012,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, MAX_BPF_LINK_TYPE, }; @@ -1118,6 +1120,11 @@ enum bpf_link_type { */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) +/* link_create.kprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_KPROBE_MULTI attach type to create return probe. + */ +#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -1475,6 +1482,12 @@ union bpf_attr { */ __u64 bpf_cookie; } perf_event; + struct { + __u32 flags; + __u32 cnt; + __aligned_u64 syms; + __aligned_u64 addrs; + } kprobe_multi; }; } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 9beb585be5a6..b8bb67ee6c57 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -32,6 +32,7 @@ #include #include #include +#include #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ @@ -3022,6 +3023,11 @@ out_put_file: fput(perf_file); return err; } +#else +static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_PERF_EVENTS */ #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd @@ -4255,7 +4261,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr, return -EINVAL; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len +#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.addrs static int link_create(union bpf_attr *attr, bpfptr_t uattr) { enum bpf_prog_type ptype; @@ -4279,7 +4285,6 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) ret = tracing_bpf_link_attach(attr, uattr, prog); goto out; case BPF_PROG_TYPE_PERF_EVENT: - case BPF_PROG_TYPE_KPROBE: case BPF_PROG_TYPE_TRACEPOINT: if (attr->link_create.attach_type != BPF_PERF_EVENT) { ret = -EINVAL; @@ -4287,6 +4292,14 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) } ptype = prog->type; break; + case BPF_PROG_TYPE_KPROBE: + if (attr->link_create.attach_type != BPF_PERF_EVENT && + attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) { + ret = -EINVAL; + goto out; + } + ptype = prog->type; + break; default: ptype = attach_type_to_prog_type(attr->link_create.attach_type); if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { @@ -4318,13 +4331,16 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) ret = bpf_xdp_link_attach(attr, prog); break; #endif -#ifdef CONFIG_PERF_EVENTS case BPF_PROG_TYPE_PERF_EVENT: case BPF_PROG_TYPE_TRACEPOINT: - case BPF_PROG_TYPE_KPROBE: ret = bpf_perf_link_attach(attr, prog); break; -#endif + case BPF_PROG_TYPE_KPROBE: + if (attr->link_create.attach_type == BPF_PERF_EVENT) + ret = bpf_perf_link_attach(attr, prog); + else + ret = bpf_kprobe_multi_link_attach(attr, prog); + break; default: ret = -EINVAL; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a2024ba32a20..fffa2171fae4 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -2181,3 +2182,213 @@ static int __init bpf_event_init(void) fs_initcall(bpf_event_init); #endif /* CONFIG_MODULES */ + +#ifdef CONFIG_FPROBE +struct bpf_kprobe_multi_link { + struct bpf_link link; + struct fprobe fp; + unsigned long *addrs; +}; + +static void bpf_kprobe_multi_link_release(struct bpf_link *link) +{ + struct bpf_kprobe_multi_link *kmulti_link; + + kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); + unregister_fprobe(&kmulti_link->fp); +} + +static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) +{ + struct bpf_kprobe_multi_link *kmulti_link; + + kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); + kvfree(kmulti_link->addrs); + kfree(kmulti_link); +} + +static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { + .release = bpf_kprobe_multi_link_release, + .dealloc = bpf_kprobe_multi_link_dealloc, +}; + +static int +kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, + struct pt_regs *regs) +{ + int err; + + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { + err = 0; + goto out; + } + + migrate_disable(); + rcu_read_lock(); + err = bpf_prog_run(link->link.prog, regs); + rcu_read_unlock(); + migrate_enable(); + + out: + __this_cpu_dec(bpf_prog_active); + return err; +} + +static void +kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip, + struct pt_regs *regs) +{ + unsigned long saved_ip = instruction_pointer(regs); + struct bpf_kprobe_multi_link *link; + + /* + * Because fprobe's regs->ip is set to the next instruction of + * dynamic-ftrace instruction, correct entry ip must be set, so + * that the bpf program can access entry address via regs as same + * as kprobes. + * + * Both kprobe and kretprobe see the entry ip of traced function + * as instruction pointer. + */ + instruction_pointer_set(regs, entry_ip); + + link = container_of(fp, struct bpf_kprobe_multi_link, fp); + kprobe_multi_link_prog_run(link, regs); + + instruction_pointer_set(regs, saved_ip); +} + +static int +kprobe_multi_resolve_syms(const void *usyms, u32 cnt, + unsigned long *addrs) +{ + unsigned long addr, size; + const char **syms; + int err = -ENOMEM; + unsigned int i; + char *func; + + size = cnt * sizeof(*syms); + syms = kvzalloc(size, GFP_KERNEL); + if (!syms) + return -ENOMEM; + + func = kmalloc(KSYM_NAME_LEN, GFP_KERNEL); + if (!func) + goto error; + + if (copy_from_user(syms, usyms, size)) { + err = -EFAULT; + goto error; + } + + for (i = 0; i < cnt; i++) { + err = strncpy_from_user(func, syms[i], KSYM_NAME_LEN); + if (err == KSYM_NAME_LEN) + err = -E2BIG; + if (err < 0) + goto error; + err = -EINVAL; + addr = kallsyms_lookup_name(func); + if (!addr) + goto error; + if (!kallsyms_lookup_size_offset(addr, &size, NULL)) + goto error; + addr = ftrace_location_range(addr, addr + size - 1); + if (!addr) + goto error; + addrs[i] = addr; + } + + err = 0; +error: + kvfree(syms); + kfree(func); + return err; +} + +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_kprobe_multi_link *link = NULL; + struct bpf_link_primer link_primer; + unsigned long *addrs; + u32 flags, cnt, size; + void __user *uaddrs; + void __user *usyms; + int err; + + /* no support for 32bit archs yet */ + if (sizeof(u64) != sizeof(void *)) + return -EOPNOTSUPP; + + if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) + return -EINVAL; + + flags = attr->link_create.kprobe_multi.flags; + if (flags & ~BPF_F_KPROBE_MULTI_RETURN) + return -EINVAL; + + uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); + usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); + if (!!uaddrs == !!usyms) + return -EINVAL; + + cnt = attr->link_create.kprobe_multi.cnt; + if (!cnt) + return -EINVAL; + + size = cnt * sizeof(*addrs); + addrs = kvmalloc(size, GFP_KERNEL); + if (!addrs) + return -ENOMEM; + + if (uaddrs) { + if (copy_from_user(addrs, uaddrs, size)) { + err = -EFAULT; + goto error; + } + } else { + err = kprobe_multi_resolve_syms(usyms, cnt, addrs); + if (err) + goto error; + } + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + err = -ENOMEM; + goto error; + } + + bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, + &bpf_kprobe_multi_link_lops, prog); + + err = bpf_link_prime(&link->link, &link_primer); + if (err) + goto error; + + if (flags & BPF_F_KPROBE_MULTI_RETURN) + link->fp.exit_handler = kprobe_multi_link_handler; + else + link->fp.entry_handler = kprobe_multi_link_handler; + + link->addrs = addrs; + + err = register_fprobe_ips(&link->fp, addrs, cnt); + if (err) { + bpf_link_cleanup(&link_primer); + return err; + } + + return bpf_link_settle(&link_primer); + +error: + kfree(link); + kvfree(addrs); + return err; +} +#else /* !CONFIG_FPROBE */ +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +#endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 99fab54ae9c0..d77f47af7752 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -997,6 +997,7 @@ enum bpf_attach_type { BPF_SK_REUSEPORT_SELECT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, + BPF_TRACE_KPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1011,6 +1012,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, MAX_BPF_LINK_TYPE, }; @@ -1118,6 +1120,11 @@ enum bpf_link_type { */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) +/* link_create.kprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_KPROBE_MULTI attach type to create return probe. + */ +#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -1475,6 +1482,12 @@ union bpf_attr { */ __u64 bpf_cookie; } perf_event; + struct { + __u32 flags; + __u32 cnt; + __aligned_u64 syms; + __aligned_u64 addrs; + } kprobe_multi; }; } link_create; From 42a5712094e89ef0a125ac0f9d0873f9233368b1 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:10 +0100 Subject: [PATCH 090/137] bpf: Add bpf_get_func_ip kprobe helper for multi kprobe link Adding support to call bpf_get_func_ip helper from kprobe programs attached by multi kprobe link. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220316122419.933957-5-jolsa@kernel.org --- kernel/trace/bpf_trace.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index fffa2171fae4..250750932228 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1037,6 +1037,18 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) +{ + return instruction_pointer(regs); +} + +static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { + .func = bpf_get_func_ip_kprobe_multi, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) { struct bpf_trace_run_ctx *run_ctx; @@ -1280,7 +1292,9 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_override_return_proto; #endif case BPF_FUNC_get_func_ip: - return &bpf_get_func_ip_proto_kprobe; + return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? + &bpf_get_func_ip_proto_kprobe_multi : + &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: return &bpf_get_attach_cookie_proto_trace; default: From 97ee4d20ee67eb462581a7af01442de6586e390b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:11 +0100 Subject: [PATCH 091/137] bpf: Add support to inline bpf_get_func_ip helper on x86 Adding support to inline it on x86, because it's single load instruction. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-6-jolsa@kernel.org --- kernel/bpf/verifier.c | 21 ++++++++++++++++++++- kernel/trace/bpf_trace.c | 1 + 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cf92f9c01556..0287176bfe9a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13678,7 +13678,7 @@ patch_map_ops_generic: continue; } - /* Implement bpf_get_func_ip inline. */ + /* Implement tracing bpf_get_func_ip inline. */ if (prog_type == BPF_PROG_TYPE_TRACING && insn->imm == BPF_FUNC_get_func_ip) { /* Load IP address from ctx - 16 */ @@ -13693,6 +13693,25 @@ patch_map_ops_generic: continue; } +#ifdef CONFIG_X86 + /* Implement kprobe_multi bpf_get_func_ip inline. */ + if (prog_type == BPF_PROG_TYPE_KPROBE && + eatype == BPF_TRACE_KPROBE_MULTI && + insn->imm == BPF_FUNC_get_func_ip) { + /* Load IP address from ctx (struct pt_regs) ip */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct pt_regs, ip)); + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); + if (!new_prog) + return -ENOMEM; + + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } +#endif + patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 250750932228..0e7f8c9bc756 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1039,6 +1039,7 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) { + /* This helper call is inlined by verifier on x86. */ return instruction_pointer(regs); } From ca74823c6e16dd42b7cf60d9fdde80e2a81a67bb Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:12 +0100 Subject: [PATCH 092/137] bpf: Add cookie support to programs attached with kprobe multi link Adding support to call bpf_get_attach_cookie helper from kprobe programs attached with kprobe multi link. The cookie is provided by array of u64 values, where each value is paired with provided function address or symbol with the same array index. When cookie array is provided it's sorted together with addresses (check bpf_kprobe_multi_cookie_swap). This way we can find cookie based on the address in bpf_get_attach_cookie helper. Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-7-jolsa@kernel.org --- include/uapi/linux/bpf.h | 1 + kernel/bpf/syscall.c | 2 +- kernel/trace/bpf_trace.c | 114 ++++++++++++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 1 + 4 files changed, 116 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d77f47af7752..7604e7d5438f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1487,6 +1487,7 @@ union bpf_attr { __u32 cnt; __aligned_u64 syms; __aligned_u64 addrs; + __aligned_u64 cookies; } kprobe_multi; }; } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b8bb67ee6c57..cdaa1152436a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -4261,7 +4261,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr, return -EINVAL; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.addrs +#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies static int link_create(union bpf_attr *attr, bpfptr_t uattr) { enum bpf_prog_type ptype; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0e7f8c9bc756..9a7b6be655e4 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include @@ -78,6 +80,7 @@ u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags, const struct btf **btf, s32 *btf_id); +static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip); /** * trace_call_bpf - invoke BPF program @@ -1050,6 +1053,18 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) +{ + return bpf_kprobe_multi_cookie(current->bpf_ctx, instruction_pointer(regs)); +} + +static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { + .func = bpf_get_attach_cookie_kprobe_multi, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) { struct bpf_trace_run_ctx *run_ctx; @@ -1297,7 +1312,9 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) &bpf_get_func_ip_proto_kprobe_multi : &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: - return &bpf_get_attach_cookie_proto_trace; + return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? + &bpf_get_attach_cookie_proto_kmulti : + &bpf_get_attach_cookie_proto_trace; default: return bpf_tracing_func_proto(func_id, prog); } @@ -2203,6 +2220,13 @@ struct bpf_kprobe_multi_link { struct bpf_link link; struct fprobe fp; unsigned long *addrs; + /* + * The run_ctx here is used to get struct bpf_kprobe_multi_link in + * get_attach_cookie helper, so it can't be used to store data. + */ + struct bpf_run_ctx run_ctx; + u64 *cookies; + u32 cnt; }; static void bpf_kprobe_multi_link_release(struct bpf_link *link) @@ -2219,6 +2243,7 @@ static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); kvfree(kmulti_link->addrs); + kvfree(kmulti_link->cookies); kfree(kmulti_link); } @@ -2227,10 +2252,60 @@ static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { .dealloc = bpf_kprobe_multi_link_dealloc, }; +static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) +{ + const struct bpf_kprobe_multi_link *link = priv; + unsigned long *addr_a = a, *addr_b = b; + u64 *cookie_a, *cookie_b; + unsigned long tmp1; + u64 tmp2; + + cookie_a = link->cookies + (addr_a - link->addrs); + cookie_b = link->cookies + (addr_b - link->addrs); + + /* swap addr_a/addr_b and cookie_a/cookie_b values */ + tmp1 = *addr_a; *addr_a = *addr_b; *addr_b = tmp1; + tmp2 = *cookie_a; *cookie_a = *cookie_b; *cookie_b = tmp2; +} + +static int __bpf_kprobe_multi_cookie_cmp(const void *a, const void *b) +{ + const unsigned long *addr_a = a, *addr_b = b; + + if (*addr_a == *addr_b) + return 0; + return *addr_a < *addr_b ? -1 : 1; +} + +static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) +{ + return __bpf_kprobe_multi_cookie_cmp(a, b); +} + +static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip) +{ + struct bpf_kprobe_multi_link *link; + unsigned long *addr; + u64 *cookie; + + if (WARN_ON_ONCE(!ctx)) + return 0; + link = container_of(ctx, struct bpf_kprobe_multi_link, run_ctx); + if (!link->cookies) + return 0; + addr = bsearch(&ip, link->addrs, link->cnt, sizeof(ip), + __bpf_kprobe_multi_cookie_cmp); + if (!addr) + return 0; + cookie = link->cookies + (addr - link->addrs); + return *cookie; +} + static int kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, struct pt_regs *regs) { + struct bpf_run_ctx *old_run_ctx; int err; if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { @@ -2240,7 +2315,9 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, migrate_disable(); rcu_read_lock(); + old_run_ctx = bpf_set_run_ctx(&link->run_ctx); err = bpf_prog_run(link->link.prog, regs); + bpf_reset_run_ctx(old_run_ctx); rcu_read_unlock(); migrate_enable(); @@ -2326,9 +2403,11 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr { struct bpf_kprobe_multi_link *link = NULL; struct bpf_link_primer link_primer; + void __user *ucookies; unsigned long *addrs; u32 flags, cnt, size; void __user *uaddrs; + u64 *cookies = NULL; void __user *usyms; int err; @@ -2368,6 +2447,19 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr goto error; } + ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); + if (ucookies) { + cookies = kvmalloc(size, GFP_KERNEL); + if (!cookies) { + err = -ENOMEM; + goto error; + } + if (copy_from_user(cookies, ucookies, size)) { + err = -EFAULT; + goto error; + } + } + link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { err = -ENOMEM; @@ -2387,6 +2479,21 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr link->fp.entry_handler = kprobe_multi_link_handler; link->addrs = addrs; + link->cookies = cookies; + link->cnt = cnt; + + if (cookies) { + /* + * Sorting addresses will trigger sorting cookies as well + * (check bpf_kprobe_multi_cookie_swap). This way we can + * find cookie based on the address in bpf_get_attach_cookie + * helper. + */ + sort_r(addrs, cnt, sizeof(*addrs), + bpf_kprobe_multi_cookie_cmp, + bpf_kprobe_multi_cookie_swap, + link); + } err = register_fprobe_ips(&link->fp, addrs, cnt); if (err) { @@ -2399,6 +2506,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr error: kfree(link); kvfree(addrs); + kvfree(cookies); return err; } #else /* !CONFIG_FPROBE */ @@ -2406,4 +2514,8 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr { return -EOPNOTSUPP; } +static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip) +{ + return 0; +} #endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d77f47af7752..7604e7d5438f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1487,6 +1487,7 @@ union bpf_attr { __u32 cnt; __aligned_u64 syms; __aligned_u64 addrs; + __aligned_u64 cookies; } kprobe_multi; }; } link_create; From 85153ac06283408e6ccaf002b02fd85f0bdab94b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:13 +0100 Subject: [PATCH 093/137] libbpf: Add libbpf_kallsyms_parse function Move the kallsyms parsing in internal libbpf_kallsyms_parse function, so it can be used from other places. It will be used in following changes. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220316122419.933957-8-jolsa@kernel.org --- tools/lib/bpf/libbpf.c | 62 ++++++++++++++++++++------------- tools/lib/bpf/libbpf_internal.h | 5 +++ 2 files changed, 43 insertions(+), 24 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 43161fdd44bb..1ca520a29fdb 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7171,12 +7171,10 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj) return 0; } -static int bpf_object__read_kallsyms_file(struct bpf_object *obj) +int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx) { char sym_type, sym_name[500]; unsigned long long sym_addr; - const struct btf_type *t; - struct extern_desc *ext; int ret, err = 0; FILE *f; @@ -7195,35 +7193,51 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj) if (ret != 3) { pr_warn("failed to read kallsyms entry: %d\n", ret); err = -EINVAL; - goto out; + break; } - ext = find_extern_by_name(obj, sym_name); - if (!ext || ext->type != EXT_KSYM) - continue; - - t = btf__type_by_id(obj->btf, ext->btf_id); - if (!btf_is_var(t)) - continue; - - if (ext->is_set && ext->ksym.addr != sym_addr) { - pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", - sym_name, ext->ksym.addr, sym_addr); - err = -EINVAL; - goto out; - } - if (!ext->is_set) { - ext->is_set = true; - ext->ksym.addr = sym_addr; - pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); - } + err = cb(sym_addr, sym_type, sym_name, ctx); + if (err) + break; } -out: fclose(f); return err; } +static int kallsyms_cb(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx) +{ + struct bpf_object *obj = ctx; + const struct btf_type *t; + struct extern_desc *ext; + + ext = find_extern_by_name(obj, sym_name); + if (!ext || ext->type != EXT_KSYM) + return 0; + + t = btf__type_by_id(obj->btf, ext->btf_id); + if (!btf_is_var(t)) + return 0; + + if (ext->is_set && ext->ksym.addr != sym_addr) { + pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", + sym_name, ext->ksym.addr, sym_addr); + return -EINVAL; + } + if (!ext->is_set) { + ext->is_set = true; + ext->ksym.addr = sym_addr; + pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); + } + return 0; +} + +static int bpf_object__read_kallsyms_file(struct bpf_object *obj) +{ + return libbpf_kallsyms_parse(kallsyms_cb, obj); +} + static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, __u16 kind, struct btf **res_btf, struct module_btf **res_mod_btf) diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 4fda8bdf0a0d..b6247dc7f8eb 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -449,6 +449,11 @@ __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, extern enum libbpf_strict_mode libbpf_mode; +typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx); + +int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *arg); + /* handle direct returned errors */ static inline int libbpf_err(int ret) { From 5117c26e877352bcabd4871e6bcdebed4857a88d Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:14 +0100 Subject: [PATCH 094/137] libbpf: Add bpf_link_create support for multi kprobes Adding new kprobe_multi struct to bpf_link_create_opts object to pass multiple kprobe data to link_create attr uapi. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-9-jolsa@kernel.org --- tools/lib/bpf/bpf.c | 9 +++++++++ tools/lib/bpf/bpf.h | 9 ++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index f69ce3a01385..cf27251adb92 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -854,6 +854,15 @@ int bpf_link_create(int prog_fd, int target_fd, if (!OPTS_ZEROED(opts, perf_event)) return libbpf_err(-EINVAL); break; + case BPF_TRACE_KPROBE_MULTI: + attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0); + attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0); + attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0)); + attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0)); + attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0)); + if (!OPTS_ZEROED(opts, kprobe_multi)) + return libbpf_err(-EINVAL); + break; default: if (!OPTS_ZEROED(opts, flags)) return libbpf_err(-EINVAL); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 5253cb4a4c0a..f4b4afb6d4ba 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -413,10 +413,17 @@ struct bpf_link_create_opts { struct { __u64 bpf_cookie; } perf_event; + struct { + __u32 flags; + __u32 cnt; + const char **syms; + const unsigned long *addrs; + const __u64 *cookies; + } kprobe_multi; }; size_t :0; }; -#define bpf_link_create_opts__last_field perf_event +#define bpf_link_create_opts__last_field kprobe_multi.cookies LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, From ddc6b04989eb099368d3e6b6eaf5a6b181a36f91 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:15 +0100 Subject: [PATCH 095/137] libbpf: Add bpf_program__attach_kprobe_multi_opts function Adding bpf_program__attach_kprobe_multi_opts function for attaching kprobe program to multiple functions. struct bpf_link * bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, const char *pattern, const struct bpf_kprobe_multi_opts *opts); User can specify functions to attach with 'pattern' argument that allows wildcards (*?' supported) or provide symbols or addresses directly through opts argument. These 3 options are mutually exclusive. When using symbols or addresses, user can also provide cookie value for each symbol/address that can be retrieved later in bpf program with bpf_get_attach_cookie helper. struct bpf_kprobe_multi_opts { size_t sz; const char **syms; const unsigned long *addrs; const __u64 *cookies; size_t cnt; bool retprobe; size_t :0; }; Symbols, addresses and cookies are provided through opts object (syms/addrs/cookies) as array pointers with specified count (cnt). Each cookie value is paired with provided function address or symbol with the same array index. The program can be also attached as return probe if 'retprobe' is set. For quick usage with NULL opts argument, like: bpf_program__attach_kprobe_multi_opts(prog, "ksys_*", NULL) the 'prog' will be attached as kprobe to 'ksys_*' functions. Also adding new program sections for automatic attachment: kprobe.multi/ kretprobe.multi/ The symbol_pattern is used as 'pattern' argument in bpf_program__attach_kprobe_multi_opts function. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-10-jolsa@kernel.org --- tools/lib/bpf/libbpf.c | 160 +++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf.h | 23 ++++++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 184 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 1ca520a29fdb..f3a31478e23b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8610,6 +8610,7 @@ static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); @@ -8621,6 +8622,8 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE), SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe), SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE), + SEC_DEF("kprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), + SEC_DEF("kretprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED), SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX), @@ -10224,6 +10227,139 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, return bpf_program__attach_kprobe_opts(prog, func_name, &opts); } +/* Adapted from perf/util/string.c */ +static bool glob_match(const char *str, const char *pat) +{ + while (*str && *pat && *pat != '*') { + if (*pat == '?') { /* Matches any single character */ + str++; + pat++; + continue; + } + if (*str != *pat) + return false; + str++; + pat++; + } + /* Check wild card */ + if (*pat == '*') { + while (*pat == '*') + pat++; + if (!*pat) /* Tail wild card matches all */ + return true; + while (*str) + if (glob_match(str++, pat)) + return true; + } + return !*str && !*pat; +} + +struct kprobe_multi_resolve { + const char *pattern; + unsigned long *addrs; + size_t cap; + size_t cnt; +}; + +static int +resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx) +{ + struct kprobe_multi_resolve *res = ctx; + int err; + + if (!glob_match(sym_name, res->pattern)) + return 0; + + err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long), + res->cnt + 1); + if (err) + return err; + + res->addrs[res->cnt++] = (unsigned long) sym_addr; + return 0; +} + +struct bpf_link * +bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, + const char *pattern, + const struct bpf_kprobe_multi_opts *opts) +{ + LIBBPF_OPTS(bpf_link_create_opts, lopts); + struct kprobe_multi_resolve res = { + .pattern = pattern, + }; + struct bpf_link *link = NULL; + char errmsg[STRERR_BUFSIZE]; + const unsigned long *addrs; + int err, link_fd, prog_fd; + const __u64 *cookies; + const char **syms; + bool retprobe; + size_t cnt; + + if (!OPTS_VALID(opts, bpf_kprobe_multi_opts)) + return libbpf_err_ptr(-EINVAL); + + syms = OPTS_GET(opts, syms, false); + addrs = OPTS_GET(opts, addrs, false); + cnt = OPTS_GET(opts, cnt, false); + cookies = OPTS_GET(opts, cookies, false); + + if (!pattern && !addrs && !syms) + return libbpf_err_ptr(-EINVAL); + if (pattern && (addrs || syms || cookies || cnt)) + return libbpf_err_ptr(-EINVAL); + if (!pattern && !cnt) + return libbpf_err_ptr(-EINVAL); + if (addrs && syms) + return libbpf_err_ptr(-EINVAL); + + if (pattern) { + err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res); + if (err) + goto error; + if (!res.cnt) { + err = -ENOENT; + goto error; + } + addrs = res.addrs; + cnt = res.cnt; + } + + retprobe = OPTS_GET(opts, retprobe, false); + + lopts.kprobe_multi.syms = syms; + lopts.kprobe_multi.addrs = addrs; + lopts.kprobe_multi.cookies = cookies; + lopts.kprobe_multi.cnt = cnt; + lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0; + + link = calloc(1, sizeof(*link)); + if (!link) { + err = -ENOMEM; + goto error; + } + link->detach = &bpf_link__detach_fd; + + prog_fd = bpf_program__fd(prog); + link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts); + if (link_fd < 0) { + err = -errno; + pr_warn("prog '%s': failed to attach: %s\n", + prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto error; + } + link->fd = link_fd; + free(res.addrs); + return link; + +error: + free(link); + free(res.addrs); + return libbpf_err_ptr(err); +} + static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) { DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); @@ -10255,6 +10391,30 @@ static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf return libbpf_get_error(*link); } +static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + const char *spec; + char *pattern; + int n; + + opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/"); + if (opts.retprobe) + spec = prog->sec_name + sizeof("kretprobe.multi/") - 1; + else + spec = prog->sec_name + sizeof("kprobe.multi/") - 1; + + n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); + if (n < 1) { + pr_warn("kprobe multi pattern is invalid: %s\n", pattern); + return -EINVAL; + } + + *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); + free(pattern); + return libbpf_get_error(*link); +} + static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, const char *binary_path, uint64_t offset) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c1b0c2ef14d8..d5239fb4abdc 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -425,6 +425,29 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, const char *func_name, const struct bpf_kprobe_opts *opts); +struct bpf_kprobe_multi_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + /* array of function symbols to attach */ + const char **syms; + /* array of function addresses to attach */ + const unsigned long *addrs; + /* array of user-provided values fetchable through bpf_get_attach_cookie */ + const __u64 *cookies; + /* number of elements in syms/addrs/cookies arrays */ + size_t cnt; + /* create return kprobes */ + bool retprobe; + size_t :0; +}; + +#define bpf_kprobe_multi_opts__last_field retprobe + +LIBBPF_API struct bpf_link * +bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, + const char *pattern, + const struct bpf_kprobe_multi_opts *opts); + struct bpf_uprobe_opts { /* size of this struct, for forward/backward compatiblity */ size_t sz; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index df1b947792c8..554c56e6e5d3 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -444,4 +444,5 @@ LIBBPF_0.8.0 { global: libbpf_register_prog_handler; libbpf_unregister_prog_handler; + bpf_program__attach_kprobe_multi_opts; } LIBBPF_0.7.0; From f7a11eeccb11185437f4da1c80b66b857d1e906f Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:16 +0100 Subject: [PATCH 096/137] selftests/bpf: Add kprobe_multi attach test Adding kprobe_multi attach test that uses new fprobe interface to attach kprobe program to multiple functions. The test is attaching programs to bpf_fentry_test* functions and uses single trampoline program bpf_prog_test_run to trigger bpf_fentry_test* functions. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-11-jolsa@kernel.org --- .../bpf/prog_tests/kprobe_multi_test.c | 141 ++++++++++++++++++ .../selftests/bpf/progs/kprobe_multi.c | 95 ++++++++++++ tools/testing/selftests/bpf/trace_helpers.c | 7 + 3 files changed, 243 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c create mode 100644 tools/testing/selftests/bpf/progs/kprobe_multi.c diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c new file mode 100644 index 000000000000..ded6b8c8ec05 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "kprobe_multi.skel.h" +#include "trace_helpers.h" + +static void kprobe_multi_test_run(struct kprobe_multi *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + prog_fd = bpf_program__fd(skel->progs.trigger); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); + ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); + ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); + ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); + ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); + ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); + ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); + ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); + + ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); + ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); + ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); + ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); + ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); + ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); + ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); + ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); +} + +static void test_skel_api(void) +{ + struct kprobe_multi *skel = NULL; + int err; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + err = kprobe_multi__attach(skel); + if (!ASSERT_OK(err, "kprobe_multi__attach")) + goto cleanup; + + kprobe_multi_test_run(skel); + +cleanup: + kprobe_multi__destroy(skel); +} + +static void test_link_api(struct bpf_link_create_opts *opts) +{ + int prog_fd, link1_fd = -1, link2_fd = -1; + struct kprobe_multi *skel = NULL; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + prog_fd = bpf_program__fd(skel->progs.test_kprobe); + link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts); + if (!ASSERT_GE(link1_fd, 0, "link_fd")) + goto cleanup; + + opts->kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.test_kretprobe); + link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts); + if (!ASSERT_GE(link2_fd, 0, "link_fd")) + goto cleanup; + + kprobe_multi_test_run(skel); + +cleanup: + if (link1_fd != -1) + close(link1_fd); + if (link2_fd != -1) + close(link2_fd); + kprobe_multi__destroy(skel); +} + +#define GET_ADDR(__sym, __addr) ({ \ + __addr = ksym_get_addr(__sym); \ + if (!ASSERT_NEQ(__addr, 0, "kallsyms load failed for " #__sym)) \ + return; \ +}) + +static void test_link_api_addrs(void) +{ + LIBBPF_OPTS(bpf_link_create_opts, opts); + unsigned long long addrs[8]; + + GET_ADDR("bpf_fentry_test1", addrs[0]); + GET_ADDR("bpf_fentry_test2", addrs[1]); + GET_ADDR("bpf_fentry_test3", addrs[2]); + GET_ADDR("bpf_fentry_test4", addrs[3]); + GET_ADDR("bpf_fentry_test5", addrs[4]); + GET_ADDR("bpf_fentry_test6", addrs[5]); + GET_ADDR("bpf_fentry_test7", addrs[6]); + GET_ADDR("bpf_fentry_test8", addrs[7]); + + opts.kprobe_multi.addrs = (const unsigned long*) addrs; + opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); + test_link_api(&opts); +} + +static void test_link_api_syms(void) +{ + LIBBPF_OPTS(bpf_link_create_opts, opts); + const char *syms[8] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + "bpf_fentry_test3", + "bpf_fentry_test4", + "bpf_fentry_test5", + "bpf_fentry_test6", + "bpf_fentry_test7", + "bpf_fentry_test8", + }; + + opts.kprobe_multi.syms = syms; + opts.kprobe_multi.cnt = ARRAY_SIZE(syms); + test_link_api(&opts); +} + +void test_kprobe_multi_test(void) +{ + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + return; + + if (test__start_subtest("skel_api")) + test_skel_api(); + if (test__start_subtest("link_api_addrs")) + test_link_api_syms(); + if (test__start_subtest("link_api_syms")) + test_link_api_addrs(); +} diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c new file mode 100644 index 000000000000..6616c082c8c2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +extern const void bpf_fentry_test1 __ksym; +extern const void bpf_fentry_test2 __ksym; +extern const void bpf_fentry_test3 __ksym; +extern const void bpf_fentry_test4 __ksym; +extern const void bpf_fentry_test5 __ksym; +extern const void bpf_fentry_test6 __ksym; +extern const void bpf_fentry_test7 __ksym; +extern const void bpf_fentry_test8 __ksym; + +int pid = 0; + +__u64 kprobe_test1_result = 0; +__u64 kprobe_test2_result = 0; +__u64 kprobe_test3_result = 0; +__u64 kprobe_test4_result = 0; +__u64 kprobe_test5_result = 0; +__u64 kprobe_test6_result = 0; +__u64 kprobe_test7_result = 0; +__u64 kprobe_test8_result = 0; + +__u64 kretprobe_test1_result = 0; +__u64 kretprobe_test2_result = 0; +__u64 kretprobe_test3_result = 0; +__u64 kretprobe_test4_result = 0; +__u64 kretprobe_test5_result = 0; +__u64 kretprobe_test6_result = 0; +__u64 kretprobe_test7_result = 0; +__u64 kretprobe_test8_result = 0; + +static void kprobe_multi_check(void *ctx, bool is_return) +{ + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + __u64 addr = bpf_get_func_ip(ctx); + +#define SET(__var, __addr) ({ \ + if ((const void *) addr == __addr) \ + __var = 1; \ +}) + + if (is_return) { + SET(kretprobe_test1_result, &bpf_fentry_test1); + SET(kretprobe_test2_result, &bpf_fentry_test2); + SET(kretprobe_test3_result, &bpf_fentry_test3); + SET(kretprobe_test4_result, &bpf_fentry_test4); + SET(kretprobe_test5_result, &bpf_fentry_test5); + SET(kretprobe_test6_result, &bpf_fentry_test6); + SET(kretprobe_test7_result, &bpf_fentry_test7); + SET(kretprobe_test8_result, &bpf_fentry_test8); + } else { + SET(kprobe_test1_result, &bpf_fentry_test1); + SET(kprobe_test2_result, &bpf_fentry_test2); + SET(kprobe_test3_result, &bpf_fentry_test3); + SET(kprobe_test4_result, &bpf_fentry_test4); + SET(kprobe_test5_result, &bpf_fentry_test5); + SET(kprobe_test6_result, &bpf_fentry_test6); + SET(kprobe_test7_result, &bpf_fentry_test7); + SET(kprobe_test8_result, &bpf_fentry_test8); + } + +#undef SET +} + +/* + * No tests in here, just to trigger 'bpf_fentry_test*' + * through tracing test_run + */ +SEC("fentry/bpf_modify_return_test") +int BPF_PROG(trigger) +{ + return 0; +} + +SEC("kprobe.multi/bpf_fentry_tes??") +int test_kprobe(struct pt_regs *ctx) +{ + kprobe_multi_check(ctx, false); + return 0; +} + +SEC("kretprobe.multi/bpf_fentry_test*") +int test_kretprobe(struct pt_regs *ctx) +{ + kprobe_multi_check(ctx, true); + return 0; +} diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index ca6abae9b09c..3d6217e3aff7 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -34,6 +34,13 @@ int load_kallsyms(void) if (!f) return -ENOENT; + /* + * This is called/used from multiplace places, + * load symbols just once. + */ + if (sym_cnt) + return 0; + while (fgets(buf, sizeof(buf), f)) { if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3) break; From 2c6401c966ae1fbe07eb3b8a07256dc41b596928 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:17 +0100 Subject: [PATCH 097/137] selftests/bpf: Add kprobe_multi bpf_cookie test Adding bpf_cookie test for programs attached by kprobe_multi links. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-12-jolsa@kernel.org --- .../selftests/bpf/prog_tests/bpf_cookie.c | 109 ++++++++++++++++++ .../selftests/bpf/progs/kprobe_multi.c | 41 ++++--- 2 files changed, 131 insertions(+), 19 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 0612e79a9281..6671d4dc0b5d 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -7,6 +7,7 @@ #include #include #include "test_bpf_cookie.skel.h" +#include "kprobe_multi.skel.h" /* uprobe attach point */ static void trigger_func(void) @@ -63,6 +64,112 @@ cleanup: bpf_link__destroy(retlink2); } +static void kprobe_multi_test_run(struct kprobe_multi *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + prog_fd = bpf_program__fd(skel->progs.trigger); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); + ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); + ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); + ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); + ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); + ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); + ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); + ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); + + ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); + ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); + ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); + ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); + ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); + ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); + ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); + ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); +} + +static void kprobe_multi_link_api_subtest(void) +{ + int prog_fd, link1_fd = -1, link2_fd = -1; + struct kprobe_multi *skel = NULL; + LIBBPF_OPTS(bpf_link_create_opts, opts); + unsigned long long addrs[8]; + __u64 cookies[8]; + + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + goto cleanup; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + skel->bss->test_cookie = true; + +#define GET_ADDR(__sym, __addr) ({ \ + __addr = ksym_get_addr(__sym); \ + if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \ + goto cleanup; \ +}) + + GET_ADDR("bpf_fentry_test1", addrs[0]); + GET_ADDR("bpf_fentry_test2", addrs[1]); + GET_ADDR("bpf_fentry_test3", addrs[2]); + GET_ADDR("bpf_fentry_test4", addrs[3]); + GET_ADDR("bpf_fentry_test5", addrs[4]); + GET_ADDR("bpf_fentry_test6", addrs[5]); + GET_ADDR("bpf_fentry_test7", addrs[6]); + GET_ADDR("bpf_fentry_test8", addrs[7]); + +#undef GET_ADDR + + cookies[0] = 1; + cookies[1] = 2; + cookies[2] = 3; + cookies[3] = 4; + cookies[4] = 5; + cookies[5] = 6; + cookies[6] = 7; + cookies[7] = 8; + + opts.kprobe_multi.addrs = (const unsigned long *) &addrs; + opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); + opts.kprobe_multi.cookies = (const __u64 *) &cookies; + prog_fd = bpf_program__fd(skel->progs.test_kprobe); + + link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); + if (!ASSERT_GE(link1_fd, 0, "link1_fd")) + goto cleanup; + + cookies[0] = 8; + cookies[1] = 7; + cookies[2] = 6; + cookies[3] = 5; + cookies[4] = 4; + cookies[5] = 3; + cookies[6] = 2; + cookies[7] = 1; + + opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.test_kretprobe); + + link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); + if (!ASSERT_GE(link2_fd, 0, "link2_fd")) + goto cleanup; + + kprobe_multi_test_run(skel); + +cleanup: + close(link1_fd); + close(link2_fd); + kprobe_multi__destroy(skel); +} + static void uprobe_subtest(struct test_bpf_cookie *skel) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); @@ -249,6 +356,8 @@ void test_bpf_cookie(void) if (test__start_subtest("kprobe")) kprobe_subtest(skel); + if (test__start_subtest("multi_kprobe_link_api")) + kprobe_multi_link_api_subtest(); if (test__start_subtest("uprobe")) uprobe_subtest(skel); if (test__start_subtest("tracepoint")) diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c index 6616c082c8c2..af27d2c6fce8 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c @@ -16,6 +16,7 @@ extern const void bpf_fentry_test7 __ksym; extern const void bpf_fentry_test8 __ksym; int pid = 0; +bool test_cookie = false; __u64 kprobe_test1_result = 0; __u64 kprobe_test2_result = 0; @@ -40,31 +41,33 @@ static void kprobe_multi_check(void *ctx, bool is_return) if (bpf_get_current_pid_tgid() >> 32 != pid) return; + __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0; __u64 addr = bpf_get_func_ip(ctx); -#define SET(__var, __addr) ({ \ - if ((const void *) addr == __addr) \ - __var = 1; \ +#define SET(__var, __addr, __cookie) ({ \ + if (((const void *) addr == __addr) && \ + (!test_cookie || (cookie == __cookie))) \ + __var = 1; \ }) if (is_return) { - SET(kretprobe_test1_result, &bpf_fentry_test1); - SET(kretprobe_test2_result, &bpf_fentry_test2); - SET(kretprobe_test3_result, &bpf_fentry_test3); - SET(kretprobe_test4_result, &bpf_fentry_test4); - SET(kretprobe_test5_result, &bpf_fentry_test5); - SET(kretprobe_test6_result, &bpf_fentry_test6); - SET(kretprobe_test7_result, &bpf_fentry_test7); - SET(kretprobe_test8_result, &bpf_fentry_test8); + SET(kretprobe_test1_result, &bpf_fentry_test1, 8); + SET(kretprobe_test2_result, &bpf_fentry_test2, 7); + SET(kretprobe_test3_result, &bpf_fentry_test3, 6); + SET(kretprobe_test4_result, &bpf_fentry_test4, 5); + SET(kretprobe_test5_result, &bpf_fentry_test5, 4); + SET(kretprobe_test6_result, &bpf_fentry_test6, 3); + SET(kretprobe_test7_result, &bpf_fentry_test7, 2); + SET(kretprobe_test8_result, &bpf_fentry_test8, 1); } else { - SET(kprobe_test1_result, &bpf_fentry_test1); - SET(kprobe_test2_result, &bpf_fentry_test2); - SET(kprobe_test3_result, &bpf_fentry_test3); - SET(kprobe_test4_result, &bpf_fentry_test4); - SET(kprobe_test5_result, &bpf_fentry_test5); - SET(kprobe_test6_result, &bpf_fentry_test6); - SET(kprobe_test7_result, &bpf_fentry_test7); - SET(kprobe_test8_result, &bpf_fentry_test8); + SET(kprobe_test1_result, &bpf_fentry_test1, 1); + SET(kprobe_test2_result, &bpf_fentry_test2, 2); + SET(kprobe_test3_result, &bpf_fentry_test3, 3); + SET(kprobe_test4_result, &bpf_fentry_test4, 4); + SET(kprobe_test5_result, &bpf_fentry_test5, 5); + SET(kprobe_test6_result, &bpf_fentry_test6, 6); + SET(kprobe_test7_result, &bpf_fentry_test7, 7); + SET(kprobe_test8_result, &bpf_fentry_test8, 8); } #undef SET From 9271a0c7ae7a914782759d8fe22ef28fffab82fe Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:18 +0100 Subject: [PATCH 098/137] selftests/bpf: Add attach test for bpf_program__attach_kprobe_multi_opts Adding tests for bpf_program__attach_kprobe_multi_opts function, that test attach with pattern, symbols and addrs. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-13-jolsa@kernel.org --- .../bpf/prog_tests/kprobe_multi_test.c | 204 +++++++++++++++++- 1 file changed, 193 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index ded6b8c8ec05..b9876b55fc0c 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -3,7 +3,7 @@ #include "kprobe_multi.skel.h" #include "trace_helpers.h" -static void kprobe_multi_test_run(struct kprobe_multi *skel) +static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return) { LIBBPF_OPTS(bpf_test_run_opts, topts); int err, prog_fd; @@ -22,14 +22,16 @@ static void kprobe_multi_test_run(struct kprobe_multi *skel) ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); - ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); - ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); - ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); - ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); - ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); - ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); - ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); - ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); + if (test_return) { + ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); + ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); + ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); + ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); + ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); + ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); + ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); + ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); + } } static void test_skel_api(void) @@ -46,7 +48,7 @@ static void test_skel_api(void) if (!ASSERT_OK(err, "kprobe_multi__attach")) goto cleanup; - kprobe_multi_test_run(skel); + kprobe_multi_test_run(skel, true); cleanup: kprobe_multi__destroy(skel); @@ -73,7 +75,7 @@ static void test_link_api(struct bpf_link_create_opts *opts) if (!ASSERT_GE(link2_fd, 0, "link_fd")) goto cleanup; - kprobe_multi_test_run(skel); + kprobe_multi_test_run(skel, true); cleanup: if (link1_fd != -1) @@ -127,6 +129,178 @@ static void test_link_api_syms(void) test_link_api(&opts); } +static void +test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + struct kprobe_multi *skel = NULL; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + pattern, opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + if (opts) { + opts->retprobe = true; + link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, + pattern, opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + } + + kprobe_multi_test_run(skel, !!opts); + +cleanup: + bpf_link__destroy(link2); + bpf_link__destroy(link1); + kprobe_multi__destroy(skel); +} + +static void test_attach_api_pattern(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + + test_attach_api("bpf_fentry_test*", &opts); + test_attach_api("bpf_fentry_test?", NULL); +} + +static void test_attach_api_addrs(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + unsigned long long addrs[8]; + + GET_ADDR("bpf_fentry_test1", addrs[0]); + GET_ADDR("bpf_fentry_test2", addrs[1]); + GET_ADDR("bpf_fentry_test3", addrs[2]); + GET_ADDR("bpf_fentry_test4", addrs[3]); + GET_ADDR("bpf_fentry_test5", addrs[4]); + GET_ADDR("bpf_fentry_test6", addrs[5]); + GET_ADDR("bpf_fentry_test7", addrs[6]); + GET_ADDR("bpf_fentry_test8", addrs[7]); + + opts.addrs = (const unsigned long *) addrs; + opts.cnt = ARRAY_SIZE(addrs); + test_attach_api(NULL, &opts); +} + +static void test_attach_api_syms(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + const char *syms[8] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + "bpf_fentry_test3", + "bpf_fentry_test4", + "bpf_fentry_test5", + "bpf_fentry_test6", + "bpf_fentry_test7", + "bpf_fentry_test8", + }; + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + test_attach_api(NULL, &opts); +} + +static void test_attach_api_fails(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct kprobe_multi *skel = NULL; + struct bpf_link *link = NULL; + unsigned long long addrs[2]; + const char *syms[2] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + }; + __u64 cookies[2]; + + addrs[0] = ksym_get_addr("bpf_fentry_test1"); + addrs[1] = ksym_get_addr("bpf_fentry_test2"); + + if (!ASSERT_FALSE(!addrs[0] || !addrs[1], "ksym_get_addr")) + goto cleanup; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + + /* fail_1 - pattern and opts NULL */ + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + NULL, NULL); + if (!ASSERT_ERR_PTR(link, "fail_1")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_1_error")) + goto cleanup; + + /* fail_2 - both addrs and syms set */ + opts.addrs = (const unsigned long *) addrs; + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = NULL; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + NULL, &opts); + if (!ASSERT_ERR_PTR(link, "fail_2")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_2_error")) + goto cleanup; + + /* fail_3 - pattern and addrs set */ + opts.addrs = (const unsigned long *) addrs; + opts.syms = NULL; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = NULL; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + "ksys_*", &opts); + if (!ASSERT_ERR_PTR(link, "fail_3")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_3_error")) + goto cleanup; + + /* fail_4 - pattern and cnt set */ + opts.addrs = NULL; + opts.syms = NULL; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = NULL; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + "ksys_*", &opts); + if (!ASSERT_ERR_PTR(link, "fail_4")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_4_error")) + goto cleanup; + + /* fail_5 - pattern and cookies */ + opts.addrs = NULL; + opts.syms = NULL; + opts.cnt = 0; + opts.cookies = cookies; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + "ksys_*", &opts); + if (!ASSERT_ERR_PTR(link, "fail_5")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_5_error")) + goto cleanup; + +cleanup: + bpf_link__destroy(link); + kprobe_multi__destroy(skel); +} + void test_kprobe_multi_test(void) { if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) @@ -138,4 +312,12 @@ void test_kprobe_multi_test(void) test_link_api_syms(); if (test__start_subtest("link_api_syms")) test_link_api_addrs(); + if (test__start_subtest("attach_api_pattern")) + test_attach_api_pattern(); + if (test__start_subtest("attach_api_addrs")) + test_attach_api_addrs(); + if (test__start_subtest("attach_api_syms")) + test_attach_api_syms(); + if (test__start_subtest("attach_api_fails")) + test_attach_api_fails(); } From 318c812cebfcfdf42f254e6c1e6490a46e7714f8 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:19 +0100 Subject: [PATCH 099/137] selftests/bpf: Add cookie test for bpf_program__attach_kprobe_multi_opts Adding bpf_cookie test for programs attached by bpf_program__attach_kprobe_multi_opts API. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-14-jolsa@kernel.org --- .../selftests/bpf/prog_tests/bpf_cookie.c | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 6671d4dc0b5d..923a6139b2d8 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -170,6 +170,72 @@ cleanup: kprobe_multi__destroy(skel); } +static void kprobe_multi_attach_api_subtest(void) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct kprobe_multi *skel = NULL; + const char *syms[8] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + "bpf_fentry_test3", + "bpf_fentry_test4", + "bpf_fentry_test5", + "bpf_fentry_test6", + "bpf_fentry_test7", + "bpf_fentry_test8", + }; + __u64 cookies[8]; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + skel->bss->test_cookie = true; + + cookies[0] = 1; + cookies[1] = 2; + cookies[2] = 3; + cookies[3] = 4; + cookies[4] = 5; + cookies[5] = 6; + cookies[6] = 7; + cookies[7] = 8; + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = cookies; + + link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + NULL, &opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + cookies[0] = 8; + cookies[1] = 7; + cookies[2] = 6; + cookies[3] = 5; + cookies[4] = 4; + cookies[5] = 3; + cookies[6] = 2; + cookies[7] = 1; + + opts.retprobe = true; + + link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, + NULL, &opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + kprobe_multi_test_run(skel); + +cleanup: + bpf_link__destroy(link2); + bpf_link__destroy(link1); + kprobe_multi__destroy(skel); +} static void uprobe_subtest(struct test_bpf_cookie *skel) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); @@ -358,6 +424,8 @@ void test_bpf_cookie(void) kprobe_subtest(skel); if (test__start_subtest("multi_kprobe_link_api")) kprobe_multi_link_api_subtest(); + if (test__start_subtest("multi_kprobe_attach_api")) + kprobe_multi_attach_api_subtest(); if (test__start_subtest("uprobe")) uprobe_subtest(skel); if (test__start_subtest("tracepoint")) From bc380eb9d04812eda23fd1d2904389012b50d946 Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Wed, 16 Mar 2022 23:37:24 +0000 Subject: [PATCH 100/137] libbpf: .text routines are subprograms in strict mode Currently, libbpf considers a single routine in .text to be a program. This is particularly confusing when it comes to library objects - a single routine meant to be used as an extern will instead be considered a bpf_program. This patch hides this compatibility behavior behind the pre-existing SEC_NAME strict mode flag. Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/018de8d0d67c04bf436055270d35d394ba393505.1647473511.git.delyank@fb.com --- tools/lib/bpf/libbpf.c | 7 +++++++ tools/lib/bpf/libbpf_legacy.h | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f3a31478e23b..3b7eb6dcc2f4 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3832,7 +3832,14 @@ static bool prog_is_subprog(const struct bpf_object *obj, * .text programs are subprograms (even if they are not called from * other programs), because libbpf never explicitly supported mixing * SEC()-designated BPF programs and .text entry-point BPF programs. + * + * In libbpf 1.0 strict mode, we always consider .text + * programs to be subprograms. */ + + if (libbpf_mode & LIBBPF_STRICT_SEC_NAME) + return prog->sec_idx == obj->efile.text_shndx; + return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; } diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h index a283cf031665..d7bcbd01f66f 100644 --- a/tools/lib/bpf/libbpf_legacy.h +++ b/tools/lib/bpf/libbpf_legacy.h @@ -54,6 +54,10 @@ enum libbpf_strict_mode { * * Note, in this mode the program pin path will be based on the * function name instead of section name. + * + * Additionally, routines in the .text section are always considered + * sub-programs. Legacy behavior allows for a single routine in .text + * to be a program. */ LIBBPF_STRICT_SEC_NAME = 0x04, /* From 262cfb74ffdaed06e65b8b20e10241768a9a2e18 Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Wed, 16 Mar 2022 23:37:30 +0000 Subject: [PATCH 101/137] libbpf: Init btf_{key,value}_type_id on internal map open For internal and user maps, look up the key and value btf types on open() and not load(), so that `bpf_map_btf_value_type_id` is usable in `bpftool gen`. Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/78dbe4e457b4a05e098fc6c8f50014b680c86e4e.1647473511.git.delyank@fb.com --- tools/lib/bpf/libbpf.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 3b7eb6dcc2f4..9de42d1556ee 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1517,6 +1517,9 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name) return strdup(map_name); } +static int +bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map); + static int bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, const char *real_name, int sec_idx, void *data, size_t data_sz) @@ -1564,6 +1567,9 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, return err; } + /* failures are fine because of maps like .rodata.str1.1 */ + (void) bpf_map_find_btf_info(obj, map); + if (data) memcpy(map->mmaped, data, data_sz); @@ -2046,6 +2052,9 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) } memcpy(&map->def, def, sizeof(struct bpf_map_def)); } + + /* btf info may not exist but fill it in if it does exist */ + (void) bpf_map_find_btf_info(obj, map); } return 0; } @@ -2534,6 +2543,10 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, fill_map_from_def(map->inner_map, &inner_def); } + err = bpf_map_find_btf_info(obj, map); + if (err) + return err; + return 0; } @@ -4873,7 +4886,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b if (bpf_map__is_struct_ops(map)) create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; - if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { + if (obj->btf && btf__fd(obj->btf) >= 0) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; create_attr.btf_value_type_id = map->btf_value_type_id; From 430025e5dca5ad8902e7c3092b7c975acae154c5 Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Wed, 16 Mar 2022 23:37:33 +0000 Subject: [PATCH 102/137] libbpf: Add subskeleton scaffolding In symmetry with bpf_object__open_skeleton(), bpf_object__open_subskeleton() performs the actual walking and linking of maps, progs, and globals described by bpf_*_skeleton objects. Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/6942a46fbe20e7ebf970affcca307ba616985b15.1647473511.git.delyank@fb.com --- tools/lib/bpf/libbpf.c | 147 ++++++++++++++++++++++++++++++++------- tools/lib/bpf/libbpf.h | 29 ++++++++ tools/lib/bpf/libbpf.map | 2 + 3 files changed, 153 insertions(+), 25 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 9de42d1556ee..7526419e59e0 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -11989,6 +11989,49 @@ int libbpf_num_possible_cpus(void) return tmp_cpus; } +static int populate_skeleton_maps(const struct bpf_object *obj, + struct bpf_map_skeleton *maps, + size_t map_cnt) +{ + int i; + + for (i = 0; i < map_cnt; i++) { + struct bpf_map **map = maps[i].map; + const char *name = maps[i].name; + void **mmaped = maps[i].mmaped; + + *map = bpf_object__find_map_by_name(obj, name); + if (!*map) { + pr_warn("failed to find skeleton map '%s'\n", name); + return -ESRCH; + } + + /* externs shouldn't be pre-setup from user code */ + if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) + *mmaped = (*map)->mmaped; + } + return 0; +} + +static int populate_skeleton_progs(const struct bpf_object *obj, + struct bpf_prog_skeleton *progs, + size_t prog_cnt) +{ + int i; + + for (i = 0; i < prog_cnt; i++) { + struct bpf_program **prog = progs[i].prog; + const char *name = progs[i].name; + + *prog = bpf_object__find_program_by_name(obj, name); + if (!*prog) { + pr_warn("failed to find skeleton program '%s'\n", name); + return -ESRCH; + } + } + return 0; +} + int bpf_object__open_skeleton(struct bpf_object_skeleton *s, const struct bpf_object_open_opts *opts) { @@ -11996,7 +12039,7 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s, .object_name = s->name, ); struct bpf_object *obj; - int i, err; + int err; /* Attempt to preserve opts->object_name, unless overriden by user * explicitly. Overwriting object name for skeletons is discouraged, @@ -12019,37 +12062,91 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s, } *s->obj = obj; - - for (i = 0; i < s->map_cnt; i++) { - struct bpf_map **map = s->maps[i].map; - const char *name = s->maps[i].name; - void **mmaped = s->maps[i].mmaped; - - *map = bpf_object__find_map_by_name(obj, name); - if (!*map) { - pr_warn("failed to find skeleton map '%s'\n", name); - return libbpf_err(-ESRCH); - } - - /* externs shouldn't be pre-setup from user code */ - if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) - *mmaped = (*map)->mmaped; + err = populate_skeleton_maps(obj, s->maps, s->map_cnt); + if (err) { + pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err); + return libbpf_err(err); } - for (i = 0; i < s->prog_cnt; i++) { - struct bpf_program **prog = s->progs[i].prog; - const char *name = s->progs[i].name; - - *prog = bpf_object__find_program_by_name(obj, name); - if (!*prog) { - pr_warn("failed to find skeleton program '%s'\n", name); - return libbpf_err(-ESRCH); - } + err = populate_skeleton_progs(obj, s->progs, s->prog_cnt); + if (err) { + pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err); + return libbpf_err(err); } return 0; } +int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) +{ + int err, len, var_idx, i; + const char *var_name; + const struct bpf_map *map; + struct btf *btf; + __u32 map_type_id; + const struct btf_type *map_type, *var_type; + const struct bpf_var_skeleton *var_skel; + struct btf_var_secinfo *var; + + if (!s->obj) + return libbpf_err(-EINVAL); + + btf = bpf_object__btf(s->obj); + if (!btf) { + pr_warn("subskeletons require BTF at runtime (object %s)\n", + bpf_object__name(s->obj)); + return libbpf_err(-errno); + } + + err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt); + if (err) { + pr_warn("failed to populate subskeleton maps: %d\n", err); + return libbpf_err(err); + } + + err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt); + if (err) { + pr_warn("failed to populate subskeleton maps: %d\n", err); + return libbpf_err(err); + } + + for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { + var_skel = &s->vars[var_idx]; + map = *var_skel->map; + map_type_id = bpf_map__btf_value_type_id(map); + map_type = btf__type_by_id(btf, map_type_id); + + if (!btf_is_datasec(map_type)) { + pr_warn("type for map '%1$s' is not a datasec: %2$s", + bpf_map__name(map), + __btf_kind_str(btf_kind(map_type))); + return libbpf_err(-EINVAL); + } + + len = btf_vlen(map_type); + var = btf_var_secinfos(map_type); + for (i = 0; i < len; i++, var++) { + var_type = btf__type_by_id(btf, var->type); + var_name = btf__name_by_offset(btf, var_type->name_off); + if (strcmp(var_name, var_skel->name) == 0) { + *var_skel->addr = map->mmaped + var->offset; + break; + } + } + } + return 0; +} + +void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s) +{ + if (!s) + return; + free(s->maps); + free(s->progs); + free(s->vars); + free(s); +} + int bpf_object__load_skeleton(struct bpf_object_skeleton *s) { int i, err; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index d5239fb4abdc..05dde85e19a6 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -1312,6 +1312,35 @@ LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s); LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s); LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s); +struct bpf_var_skeleton { + const char *name; + struct bpf_map **map; + void **addr; +}; + +struct bpf_object_subskeleton { + size_t sz; /* size of this struct, for forward/backward compatibility */ + + const struct bpf_object *obj; + + int map_cnt; + int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ + struct bpf_map_skeleton *maps; + + int prog_cnt; + int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ + struct bpf_prog_skeleton *progs; + + int var_cnt; + int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */ + struct bpf_var_skeleton *vars; +}; + +LIBBPF_API int +bpf_object__open_subskeleton(struct bpf_object_subskeleton *s); +LIBBPF_API void +bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s); + struct gen_loader_opts { size_t sz; /* size of this struct, for forward/backward compatiblity */ const char *data; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 554c56e6e5d3..dd35ee58bfaa 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -442,6 +442,8 @@ LIBBPF_0.7.0 { LIBBPF_0.8.0 { global: + bpf_object__destroy_subskeleton; + bpf_object__open_subskeleton; libbpf_register_prog_handler; libbpf_unregister_prog_handler; bpf_program__attach_kprobe_multi_opts; From 00389c58ffe993782a8ba4bb5a34a102b1f6fe24 Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Wed, 16 Mar 2022 23:37:28 +0000 Subject: [PATCH 103/137] bpftool: Add support for subskeletons Subskeletons are headers which require an already loaded program to operate. For example, when a BPF library is linked into a larger BPF object file, the library userspace needs a way to access its own global variables without requiring knowledge about the larger program at build time. As a result, subskeletons require a loaded bpf_object to open(). Further, they find their own symbols in the larger program by walking BTF type data at run time. At this time, programs, maps, and globals are supported through non-owning pointers. Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/ca8a48b4841c72d285ecce82371bef4a899756cb.1647473511.git.delyank@fb.com --- .../bpf/bpftool/Documentation/bpftool-gen.rst | 25 + tools/bpf/bpftool/bash-completion/bpftool | 14 +- tools/bpf/bpftool/gen.c | 588 +++++++++++++++--- 3 files changed, 542 insertions(+), 85 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst index 18d646b571ec..68454ef28f58 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst @@ -25,6 +25,7 @@ GEN COMMANDS | **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...] | **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*] +| **bpftool** **gen subskeleton** *FILE* [**name** *OBJECT_NAME*] | **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...] | **bpftool** **gen help** @@ -150,6 +151,30 @@ DESCRIPTION (non-read-only) data from userspace, with same simplicity as for BPF side. + **bpftool gen subskeleton** *FILE* + Generate BPF subskeleton C header file for a given *FILE*. + + Subskeletons are similar to skeletons, except they do not own + the corresponding maps, programs, or global variables. They + require that the object file used to generate them is already + loaded into a *bpf_object* by some other means. + + This functionality is useful when a library is included into a + larger BPF program. A subskeleton for the library would have + access to all objects and globals defined in it, without + having to know about the larger program. + + Consequently, there are only two functions defined + for subskeletons: + + - **example__open(bpf_object\*)** + Instantiates a subskeleton from an already opened (but not + necessarily loaded) **bpf_object**. + + - **example__destroy()** + Frees the storage for the subskeleton but *does not* unload + any BPF programs or maps. + **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...] Generate a minimum BTF file as *OUTPUT*, derived from a given *INPUT* BTF file, containing all needed BTF types so one, or diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 958e1fd71b5c..5df8d72c5179 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -1003,13 +1003,25 @@ _bpftool() ;; esac ;; + subskeleton) + case $prev in + $command) + _filedir + return 0 + ;; + *) + _bpftool_once_attr 'name' + return 0 + ;; + esac + ;; min_core_btf) _filedir return 0 ;; *) [[ $prev == $object ]] && \ - COMPREPLY=( $( compgen -W 'object skeleton help min_core_btf' -- "$cur" ) ) + COMPREPLY=( $( compgen -W 'object skeleton subskeleton help min_core_btf' -- "$cur" ) ) ;; esac ;; diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 145734b4fe41..96bd2b33ccf6 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -64,11 +64,11 @@ static void get_obj_name(char *name, const char *file) sanitize_identifier(name); } -static void get_header_guard(char *guard, const char *obj_name) +static void get_header_guard(char *guard, const char *obj_name, const char *suffix) { int i; - sprintf(guard, "__%s_SKEL_H__", obj_name); + sprintf(guard, "__%s_%s__", obj_name, suffix); for (i = 0; guard[i]; i++) guard[i] = toupper(guard[i]); } @@ -231,6 +231,17 @@ static const struct btf_type *find_type_for_map(struct btf *btf, const char *map return NULL; } +static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz) +{ + if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) + return false; + + if (!get_map_ident(map, buf, sz)) + return false; + + return true; +} + static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) { struct btf *btf = bpf_object__btf(obj); @@ -247,12 +258,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) bpf_object__for_each_map(map, obj) { /* only generate definitions for memory-mapped internal maps */ - if (!bpf_map__is_internal(map)) - continue; - if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) - continue; - - if (!get_map_ident(map, map_ident, sizeof(map_ident))) + if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) continue; sec = find_type_for_map(btf, map_ident); @@ -280,6 +286,96 @@ out: return err; } +static bool btf_is_ptr_to_func_proto(const struct btf *btf, + const struct btf_type *v) +{ + return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type)); +} + +static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name) +{ + struct btf *btf = bpf_object__btf(obj); + struct btf_dump *d; + struct bpf_map *map; + const struct btf_type *sec, *var; + const struct btf_var_secinfo *sec_var; + int i, err = 0, vlen; + char map_ident[256], sec_ident[256]; + bool strip_mods = false, needs_typeof = false; + const char *sec_name, *var_name; + __u32 var_type_id; + + d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL); + if (!d) + return -errno; + + bpf_object__for_each_map(map, obj) { + /* only generate definitions for memory-mapped internal maps */ + if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) + continue; + + sec = find_type_for_map(btf, map_ident); + if (!sec) + continue; + + sec_name = btf__name_by_offset(btf, sec->name_off); + if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident))) + continue; + + strip_mods = strcmp(sec_name, ".kconfig") != 0; + printf(" struct %s__%s {\n", obj_name, sec_ident); + + sec_var = btf_var_secinfos(sec); + vlen = btf_vlen(sec); + for (i = 0; i < vlen; i++, sec_var++) { + DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, + .indent_level = 2, + .strip_mods = strip_mods, + /* we'll print the name separately */ + .field_name = "", + ); + + var = btf__type_by_id(btf, sec_var->type); + var_name = btf__name_by_offset(btf, var->name_off); + var_type_id = var->type; + + /* static variables are not exposed through BPF skeleton */ + if (btf_var(var)->linkage == BTF_VAR_STATIC) + continue; + + /* The datasec member has KIND_VAR but we want the + * underlying type of the variable (e.g. KIND_INT). + */ + var = skip_mods_and_typedefs(btf, var->type, NULL); + + printf("\t\t"); + /* Func and array members require special handling. + * Instead of producing `typename *var`, they produce + * `typeof(typename) *var`. This allows us to keep a + * similar syntax where the identifier is just prefixed + * by *, allowing us to ignore C declaration minutiae. + */ + needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var); + if (needs_typeof) + printf("typeof("); + + err = btf_dump__emit_type_decl(d, var_type_id, &opts); + if (err) + goto out; + + if (needs_typeof) + printf(")"); + + printf(" *%s;\n", var_name); + } + printf(" } %s;\n", sec_ident); + } + +out: + btf_dump__free(d); + return err; +} + static void codegen(const char *template, ...) { const char *src, *end; @@ -389,11 +485,7 @@ static void codegen_asserts(struct bpf_object *obj, const char *obj_name) ", obj_name); bpf_object__for_each_map(map, obj) { - if (!bpf_map__is_internal(map)) - continue; - if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) - continue; - if (!get_map_ident(map, map_ident, sizeof(map_ident))) + if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident))) continue; sec = find_type_for_map(btf, map_ident); @@ -608,11 +700,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h const void *mmap_data = NULL; size_t mmap_size = 0; - if (!get_map_ident(map, ident, sizeof(ident))) - continue; - - if (!bpf_map__is_internal(map) || - !(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) + if (!is_internal_mmapable_map(map, ident, sizeof(ident))) continue; codegen("\ @@ -671,11 +759,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h bpf_object__for_each_map(map, obj) { const char *mmap_flags; - if (!get_map_ident(map, ident, sizeof(ident))) - continue; - - if (!bpf_map__is_internal(map) || - !(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) + if (!is_internal_mmapable_map(map, ident, sizeof(ident))) continue; if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG) @@ -727,10 +811,95 @@ out: return err; } +static void +codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped) +{ + struct bpf_map *map; + char ident[256]; + size_t i; + + if (!map_cnt) + return; + + codegen("\ + \n\ + \n\ + /* maps */ \n\ + s->map_cnt = %zu; \n\ + s->map_skel_sz = sizeof(*s->maps); \n\ + s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ + if (!s->maps) \n\ + goto err; \n\ + ", + map_cnt + ); + i = 0; + bpf_object__for_each_map(map, obj) { + if (!get_map_ident(map, ident, sizeof(ident))) + continue; + + codegen("\ + \n\ + \n\ + s->maps[%zu].name = \"%s\"; \n\ + s->maps[%zu].map = &obj->maps.%s; \n\ + ", + i, bpf_map__name(map), i, ident); + /* memory-mapped internal maps */ + if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) { + printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", + i, ident); + } + i++; + } +} + +static void +codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links) +{ + struct bpf_program *prog; + int i; + + if (!prog_cnt) + return; + + codegen("\ + \n\ + \n\ + /* programs */ \n\ + s->prog_cnt = %zu; \n\ + s->prog_skel_sz = sizeof(*s->progs); \n\ + s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\ + if (!s->progs) \n\ + goto err; \n\ + ", + prog_cnt + ); + i = 0; + bpf_object__for_each_program(prog, obj) { + codegen("\ + \n\ + \n\ + s->progs[%1$zu].name = \"%2$s\"; \n\ + s->progs[%1$zu].prog = &obj->progs.%2$s;\n\ + ", + i, bpf_program__name(prog)); + + if (populate_links) { + codegen("\ + \n\ + s->progs[%1$zu].link = &obj->links.%2$s;\n\ + ", + i, bpf_program__name(prog)); + } + i++; + } +} + static int do_skeleton(int argc, char **argv) { char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; - size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz; + size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; struct bpf_object *obj = NULL; @@ -821,7 +990,7 @@ static int do_skeleton(int argc, char **argv) prog_cnt++; } - get_header_guard(header_guard, obj_name); + get_header_guard(header_guard, obj_name, "SKEL_H"); if (use_loader) { codegen("\ \n\ @@ -1024,66 +1193,10 @@ static int do_skeleton(int argc, char **argv) ", obj_name ); - if (map_cnt) { - codegen("\ - \n\ - \n\ - /* maps */ \n\ - s->map_cnt = %zu; \n\ - s->map_skel_sz = sizeof(*s->maps); \n\ - s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ - if (!s->maps) \n\ - goto err; \n\ - ", - map_cnt - ); - i = 0; - bpf_object__for_each_map(map, obj) { - if (!get_map_ident(map, ident, sizeof(ident))) - continue; - codegen("\ - \n\ - \n\ - s->maps[%zu].name = \"%s\"; \n\ - s->maps[%zu].map = &obj->maps.%s; \n\ - ", - i, bpf_map__name(map), i, ident); - /* memory-mapped internal maps */ - if (bpf_map__is_internal(map) && - (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) { - printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", - i, ident); - } - i++; - } - } - if (prog_cnt) { - codegen("\ - \n\ - \n\ - /* programs */ \n\ - s->prog_cnt = %zu; \n\ - s->prog_skel_sz = sizeof(*s->progs); \n\ - s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\ - if (!s->progs) \n\ - goto err; \n\ - ", - prog_cnt - ); - i = 0; - bpf_object__for_each_program(prog, obj) { - codegen("\ - \n\ - \n\ - s->progs[%1$zu].name = \"%2$s\"; \n\ - s->progs[%1$zu].prog = &obj->progs.%2$s;\n\ - s->progs[%1$zu].link = &obj->links.%2$s;\n\ - ", - i, bpf_program__name(prog)); - i++; - } - } + codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/); + codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/); + codegen("\ \n\ \n\ @@ -1141,6 +1254,311 @@ out: return err; } +/* Subskeletons are like skeletons, except they don't own the bpf_object, + * associated maps, links, etc. Instead, they know about the existence of + * variables, maps, programs and are able to find their locations + * _at runtime_ from an already loaded bpf_object. + * + * This allows for library-like BPF objects to have userspace counterparts + * with access to their own items without having to know anything about the + * final BPF object that the library was linked into. + */ +static int do_subskeleton(int argc, char **argv) +{ + char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")]; + size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); + char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; + struct bpf_object *obj = NULL; + const char *file, *var_name; + char ident[256]; + int fd, err = -1, map_type_id; + const struct bpf_map *map; + struct bpf_program *prog; + struct btf *btf; + const struct btf_type *map_type, *var_type; + const struct btf_var_secinfo *var; + struct stat st; + + if (!REQ_ARGS(1)) { + usage(); + return -1; + } + file = GET_ARG(); + + while (argc) { + if (!REQ_ARGS(2)) + return -1; + + if (is_prefix(*argv, "name")) { + NEXT_ARG(); + + if (obj_name[0] != '\0') { + p_err("object name already specified"); + return -1; + } + + strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1); + obj_name[MAX_OBJ_NAME_LEN - 1] = '\0'; + } else { + p_err("unknown arg %s", *argv); + return -1; + } + + NEXT_ARG(); + } + + if (argc) { + p_err("extra unknown arguments"); + return -1; + } + + if (use_loader) { + p_err("cannot use loader for subskeletons"); + return -1; + } + + if (stat(file, &st)) { + p_err("failed to stat() %s: %s", file, strerror(errno)); + return -1; + } + file_sz = st.st_size; + mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE)); + fd = open(file, O_RDONLY); + if (fd < 0) { + p_err("failed to open() %s: %s", file, strerror(errno)); + return -1; + } + obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0); + if (obj_data == MAP_FAILED) { + obj_data = NULL; + p_err("failed to mmap() %s: %s", file, strerror(errno)); + goto out; + } + if (obj_name[0] == '\0') + get_obj_name(obj_name, file); + + /* The empty object name allows us to use bpf_map__name and produce + * ELF section names out of it. (".data" instead of "obj.data") + */ + opts.object_name = ""; + obj = bpf_object__open_mem(obj_data, file_sz, &opts); + if (!obj) { + char err_buf[256]; + + libbpf_strerror(errno, err_buf, sizeof(err_buf)); + p_err("failed to open BPF object file: %s", err_buf); + obj = NULL; + goto out; + } + + btf = bpf_object__btf(obj); + if (!btf) { + err = -1; + p_err("need btf type information for %s", obj_name); + goto out; + } + + bpf_object__for_each_program(prog, obj) { + prog_cnt++; + } + + /* First, count how many variables we have to find. + * We need this in advance so the subskel can allocate the right + * amount of storage. + */ + bpf_object__for_each_map(map, obj) { + if (!get_map_ident(map, ident, sizeof(ident))) + continue; + + /* Also count all maps that have a name */ + map_cnt++; + + if (!is_internal_mmapable_map(map, ident, sizeof(ident))) + continue; + + map_type_id = bpf_map__btf_value_type_id(map); + if (map_type_id <= 0) { + err = map_type_id; + goto out; + } + map_type = btf__type_by_id(btf, map_type_id); + + var = btf_var_secinfos(map_type); + len = btf_vlen(map_type); + for (i = 0; i < len; i++, var++) { + var_type = btf__type_by_id(btf, var->type); + + if (btf_var(var_type)->linkage == BTF_VAR_STATIC) + continue; + + var_cnt++; + } + } + + get_header_guard(header_guard, obj_name, "SUBSKEL_H"); + codegen("\ + \n\ + /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\ + \n\ + /* THIS FILE IS AUTOGENERATED! */ \n\ + #ifndef %2$s \n\ + #define %2$s \n\ + \n\ + #include \n\ + #include \n\ + #include \n\ + \n\ + struct %1$s { \n\ + struct bpf_object *obj; \n\ + struct bpf_object_subskeleton *subskel; \n\ + ", obj_name, header_guard); + + if (map_cnt) { + printf("\tstruct {\n"); + bpf_object__for_each_map(map, obj) { + if (!get_map_ident(map, ident, sizeof(ident))) + continue; + printf("\t\tstruct bpf_map *%s;\n", ident); + } + printf("\t} maps;\n"); + } + + if (prog_cnt) { + printf("\tstruct {\n"); + bpf_object__for_each_program(prog, obj) { + printf("\t\tstruct bpf_program *%s;\n", + bpf_program__name(prog)); + } + printf("\t} progs;\n"); + } + + err = codegen_subskel_datasecs(obj, obj_name); + if (err) + goto out; + + /* emit code that will allocate enough storage for all symbols */ + codegen("\ + \n\ + \n\ + #ifdef __cplusplus \n\ + static inline struct %1$s *open(const struct bpf_object *src);\n\ + static inline void destroy(struct %1$s *skel); \n\ + #endif /* __cplusplus */ \n\ + }; \n\ + \n\ + static inline void \n\ + %1$s__destroy(struct %1$s *skel) \n\ + { \n\ + if (!skel) \n\ + return; \n\ + if (skel->subskel) \n\ + bpf_object__destroy_subskeleton(skel->subskel);\n\ + free(skel); \n\ + } \n\ + \n\ + static inline struct %1$s * \n\ + %1$s__open(const struct bpf_object *src) \n\ + { \n\ + struct %1$s *obj; \n\ + struct bpf_object_subskeleton *s; \n\ + int err; \n\ + \n\ + obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ + if (!obj) { \n\ + errno = ENOMEM; \n\ + goto err; \n\ + } \n\ + s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\ + if (!s) { \n\ + errno = ENOMEM; \n\ + goto err; \n\ + } \n\ + s->sz = sizeof(*s); \n\ + s->obj = src; \n\ + s->var_skel_sz = sizeof(*s->vars); \n\ + obj->subskel = s; \n\ + \n\ + /* vars */ \n\ + s->var_cnt = %2$d; \n\ + s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\ + if (!s->vars) { \n\ + errno = ENOMEM; \n\ + goto err; \n\ + } \n\ + ", + obj_name, var_cnt + ); + + /* walk through each symbol and emit the runtime representation */ + bpf_object__for_each_map(map, obj) { + if (!is_internal_mmapable_map(map, ident, sizeof(ident))) + continue; + + map_type_id = bpf_map__btf_value_type_id(map); + if (map_type_id <= 0) + /* skip over internal maps with no type*/ + continue; + + map_type = btf__type_by_id(btf, map_type_id); + var = btf_var_secinfos(map_type); + len = btf_vlen(map_type); + for (i = 0; i < len; i++, var++) { + var_type = btf__type_by_id(btf, var->type); + var_name = btf__name_by_offset(btf, var_type->name_off); + + if (btf_var(var_type)->linkage == BTF_VAR_STATIC) + continue; + + /* Note that we use the dot prefix in .data as the + * field access operator i.e. maps%s becomes maps.data + */ + codegen("\ + \n\ + \n\ + s->vars[%3$d].name = \"%1$s\"; \n\ + s->vars[%3$d].map = &obj->maps.%2$s; \n\ + s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\ + ", var_name, ident, var_idx); + + var_idx++; + } + } + + codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/); + codegen_progs_skeleton(obj, prog_cnt, false /*links*/); + + codegen("\ + \n\ + \n\ + err = bpf_object__open_subskeleton(s); \n\ + if (err) \n\ + goto err; \n\ + \n\ + return obj; \n\ + err: \n\ + %1$s__destroy(obj); \n\ + errno = -err; \n\ + return NULL; \n\ + } \n\ + \n\ + #ifdef __cplusplus \n\ + struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\ + void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\ + #endif /* __cplusplus */ \n\ + \n\ + #endif /* %2$s */ \n\ + ", + obj_name, header_guard); + err = 0; +out: + bpf_object__close(obj); + if (obj_data) + munmap(obj_data, mmap_sz); + close(fd); + return err; +} + static int do_object(int argc, char **argv) { struct bpf_linker *linker; @@ -1192,6 +1610,7 @@ static int do_help(int argc, char **argv) fprintf(stderr, "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n" " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n" + " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n" " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n" " %1$s %2$s help\n" "\n" @@ -1788,6 +2207,7 @@ static int do_min_core_btf(int argc, char **argv) static const struct cmd cmds[] = { { "object", do_object }, { "skeleton", do_skeleton }, + { "subskeleton", do_subskeleton }, { "min_core_btf", do_min_core_btf}, { "help", do_help }, { 0 } From 3cccbaa0332169d4ff05587062a7ed528aeddb60 Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Wed, 16 Mar 2022 23:37:31 +0000 Subject: [PATCH 104/137] selftests/bpf: Test subskeleton functionality This patch changes the selftests/bpf Makefile to also generate a subskel.h for every skel.h it would have normally generated. Separately, it also introduces a new subskeleton test which tests library objects, externs, weak symbols, kconfigs, and user maps. Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/1bd24956940bbbfe169bb34f7f87b11df52ef011.1647473511.git.delyank@fb.com --- tools/testing/selftests/bpf/.gitignore | 1 + tools/testing/selftests/bpf/Makefile | 12 ++- .../selftests/bpf/prog_tests/subskeleton.c | 78 +++++++++++++++++++ .../selftests/bpf/progs/test_subskeleton.c | 28 +++++++ .../bpf/progs/test_subskeleton_lib.c | 61 +++++++++++++++ .../bpf/progs/test_subskeleton_lib2.c | 16 ++++ 6 files changed, 194 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/subskeleton.c create mode 100644 tools/testing/selftests/bpf/progs/test_subskeleton.c create mode 100644 tools/testing/selftests/bpf/progs/test_subskeleton_lib.c create mode 100644 tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index a7eead8820a0..595565eb68c0 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -31,6 +31,7 @@ test_tcp_check_syncookie_user test_sysctl xdping test_cpp +*.subskel.h *.skel.h *.lskel.h /no_alu32 diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 11f5883636c3..3820608faf57 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -327,7 +327,13 @@ endef SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \ - linked_vars.skel.h linked_maps.skel.h + linked_vars.skel.h linked_maps.skel.h \ + test_subskeleton.skel.h test_subskeleton_lib.skel.h + +# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file +# but that's created as a side-effect of the skel.h generation. +test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o +test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \ @@ -405,6 +411,7 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o) $(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o) $(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@ + $(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$(@:.skel.h=.subskel.h) $(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) @@ -422,6 +429,7 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT) $(Q)diff $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) $(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@ + $(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h) endif # ensure we set up tests.h header generation rule just once @@ -559,6 +567,6 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ feature bpftool \ - $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko) + $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h no_alu32 bpf_gcc bpf_testmod.ko) .PHONY: docs docs-clean diff --git a/tools/testing/selftests/bpf/prog_tests/subskeleton.c b/tools/testing/selftests/bpf/prog_tests/subskeleton.c new file mode 100644 index 000000000000..9c31b7004f9c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/subskeleton.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include "test_subskeleton.skel.h" +#include "test_subskeleton_lib.subskel.h" + +static void subskeleton_lib_setup(struct bpf_object *obj) +{ + struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj); + + if (!ASSERT_OK_PTR(lib, "open subskeleton")) + return; + + *lib->rodata.var1 = 1; + *lib->data.var2 = 2; + lib->bss.var3->var3_1 = 3; + lib->bss.var3->var3_2 = 4; + + test_subskeleton_lib__destroy(lib); +} + +static int subskeleton_lib_subresult(struct bpf_object *obj) +{ + struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj); + int result; + + if (!ASSERT_OK_PTR(lib, "open subskeleton")) + return -EINVAL; + + result = *lib->bss.libout1; + ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult"); + + ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler"); + ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler), + "lib_perf_handler", "program name"); + + ASSERT_OK_PTR(lib->maps.map1, "map1"); + ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name"); + + ASSERT_EQ(*lib->data.var5, 5, "__weak var5"); + ASSERT_EQ(*lib->data.var6, 6, "extern var6"); + ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL"); + + test_subskeleton_lib__destroy(lib); + return result; +} + +void test_subskeleton(void) +{ + int err, result; + struct test_subskeleton *skel; + + skel = test_subskeleton__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->rodata->rovar1 = 10; + skel->rodata->var1 = 1; + subskeleton_lib_setup(skel->obj); + + err = test_subskeleton__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + err = test_subskeleton__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + result = subskeleton_lib_subresult(skel->obj) * 10; + ASSERT_EQ(skel->bss->out1, result, "unexpected calculation"); + +cleanup: + test_subskeleton__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton.c b/tools/testing/selftests/bpf/progs/test_subskeleton.c new file mode 100644 index 000000000000..006417974372 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_subskeleton.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include + +/* volatile to force a read, compiler may assume 0 otherwise */ +const volatile int rovar1; +int out1; + +/* Override weak symbol in test_subskeleton_lib */ +int var5 = 5; + +extern volatile bool CONFIG_BPF_SYSCALL __kconfig; + +extern int lib_routine(void); + +SEC("raw_tp/sys_enter") +int handler1(const void *ctx) +{ + (void) CONFIG_BPF_SYSCALL; + + out1 = lib_routine() * rovar1; + return 0; +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c b/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c new file mode 100644 index 000000000000..ecfafe812c36 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_subskeleton_lib.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include + +/* volatile to force a read */ +const volatile int var1; +volatile int var2 = 1; +struct { + int var3_1; + __s64 var3_2; +} var3; +int libout1; + +extern volatile bool CONFIG_BPF_SYSCALL __kconfig; + +int var4[4]; + +__weak int var5 SEC(".data"); + +/* Fully contained within library extern-and-definition */ +extern int var6; + +int var7 SEC(".data.custom"); + +int (*fn_ptr)(void); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 16); +} map1 SEC(".maps"); + +extern struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 16); +} map2 SEC(".maps"); + +int lib_routine(void) +{ + __u32 key = 1, value = 2; + + (void) CONFIG_BPF_SYSCALL; + bpf_map_update_elem(&map2, &key, &value, BPF_ANY); + + libout1 = var1 + var2 + var3.var3_1 + var3.var3_2 + var5 + var6; + return libout1; +} + +SEC("perf_event") +int lib_perf_handler(struct pt_regs *ctx) +{ + return 0; +} + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c b/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c new file mode 100644 index 000000000000..80238486b7ce --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include + +int var6 = 6; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 16); +} map2 SEC(".maps"); + +char LICENSE[] SEC("license") = "GPL"; From a4c9fe0ed4a13e25e43fcd44d9f89bc19ba8fbb7 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Thu, 17 Mar 2022 12:39:17 +0100 Subject: [PATCH 105/137] selftests/bpf: Fix error reporting from sock_fields programs The helper macro that records an error in BPF programs that exercise sock fields access has been inadvertently broken by adaptation work that happened in commit b18c1f0aa477 ("bpf: selftest: Adapt sock_fields test to use skel and global variables"). BPF_NOEXIST flag cannot be used to update BPF_MAP_TYPE_ARRAY. The operation always fails with -EEXIST, which in turn means the error never gets recorded, and the checks for errors always pass. Revert the change in update flags. Fixes: b18c1f0aa477 ("bpf: selftest: Adapt sock_fields test to use skel and global variables") Signed-off-by: Jakub Sitnicki Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220317113920.1068535-2-jakub@cloudflare.com --- tools/testing/selftests/bpf/progs/test_sock_fields.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index 246f1f001813..3e2e3ee51cc9 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -114,7 +114,7 @@ static void tpcpy(struct bpf_tcp_sock *dst, #define RET_LOG() ({ \ linum = __LINE__; \ - bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \ + bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_ANY); \ return CG_OK; \ }) From 2d2202ba858c112b03f84d546e260c61425831a1 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Thu, 17 Mar 2022 12:39:18 +0100 Subject: [PATCH 106/137] selftests/bpf: Check dst_port only on the client socket cgroup_skb/egress programs which sock_fields test installs process packets flying in both directions, from the client to the server, and in reverse direction. Recently added dst_port check relies on the fact that destination port (remote peer port) of the socket which sends the packet is known ahead of time. This holds true only for the client socket, which connects to the known server port. Filter out any traffic that is not egressing from the client socket in the BPF program that tests reading the dst_port. Fixes: 8f50f16ff39d ("selftests/bpf: Extend verifier and bpf_sock tests for dst_port loads") Signed-off-by: Jakub Sitnicki Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220317113920.1068535-3-jakub@cloudflare.com --- tools/testing/selftests/bpf/progs/test_sock_fields.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index 3e2e3ee51cc9..43b31aa1fcf7 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -281,6 +281,10 @@ int read_sk_dst_port(struct __sk_buff *skb) if (!sk) RET_LOG(); + /* Ignore everything but the SYN from the client socket */ + if (sk->state != BPF_TCP_SYN_SENT) + return CG_OK; + if (!sk_dst_port__load_word(sk)) RET_LOG(); if (!sk_dst_port__load_half(sk)) From e06b5bbcf3f107fb5e148714efe2decfb3b4e0ac Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Thu, 17 Mar 2022 12:39:19 +0100 Subject: [PATCH 107/137] selftests/bpf: Use constants for socket states in sock_fields test Replace magic numbers in BPF code with constants from bpf.h, so that they don't require an explanation in the comments. Signed-off-by: Jakub Sitnicki Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220317113920.1068535-4-jakub@cloudflare.com --- tools/testing/selftests/bpf/progs/test_sock_fields.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index 43b31aa1fcf7..43a17fdef226 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -134,11 +134,11 @@ int egress_read_sock_fields(struct __sk_buff *skb) if (!sk) RET_LOG(); - /* Not the testing egress traffic or - * TCP_LISTEN (10) socket will be copied at the ingress side. + /* Not testing the egress traffic or the listening socket, + * which are covered by the cgroup_skb/ingress test program. */ if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) || - sk->state == 10) + sk->state == BPF_TCP_LISTEN) return CG_OK; if (sk->src_port == bpf_ntohs(srv_sa6.sin6_port)) { @@ -232,8 +232,8 @@ int ingress_read_sock_fields(struct __sk_buff *skb) sk->src_port != bpf_ntohs(srv_sa6.sin6_port)) return CG_OK; - /* Only interested in TCP_LISTEN */ - if (sk->state != 10) + /* Only interested in the listening socket */ + if (sk->state != BPF_TCP_LISTEN) return CG_OK; /* It must be a fullsock for cgroup_skb/ingress prog */ From deb59400464468352b4d7827420e04f1add94726 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Thu, 17 Mar 2022 12:39:20 +0100 Subject: [PATCH 108/137] selftests/bpf: Fix test for 4-byte load from dst_port on big-endian The check for 4-byte load from dst_port offset into bpf_sock is failing on big-endian architecture - s390. The bpf access converter rewrites the 4-byte load to a 2-byte load from sock_common at skc_dport offset, as shown below. * s390 / llvm-objdump -S --no-show-raw-insn 00000000000002a0 : 84: r1 = *(u32 *)(r1 + 48) 85: w0 = 1 86: if w1 == 51966 goto +1 87: w0 = 0 00000000000002c0 : 88: exit * s390 / bpftool prog dump xlated _Bool sk_dst_port__load_word(struct bpf_sock * sk): 35: (69) r1 = *(u16 *)(r1 +12) 36: (bc) w1 = w1 37: (b4) w0 = 1 38: (16) if w1 == 0xcafe goto pc+1 39: (b4) w0 = 0 40: (95) exit * x86_64 / llvm-objdump -S --no-show-raw-insn 00000000000002a0 : 84: r1 = *(u32 *)(r1 + 48) 85: w0 = 1 86: if w1 == 65226 goto +1 87: w0 = 0 00000000000002c0 : 88: exit * x86_64 / bpftool prog dump xlated _Bool sk_dst_port__load_word(struct bpf_sock * sk): 33: (69) r1 = *(u16 *)(r1 +12) 34: (b4) w0 = 1 35: (16) if w1 == 0xfeca goto pc+1 36: (b4) w0 = 0 37: (95) exit This leads to surprises if we treat the destination register contents as a 32-bit value, ignoring the fact that in reality it contains a 16-bit value. On little-endian the register contents reflect the bpf_sock struct definition, where the lower 16-bits contain the port number: struct bpf_sock { ... __be16 dst_port; /* offset 48 */ __u16 :16; ... }; However, on big-endian the register contents suggest that field the layout of bpf_sock struct is as so: struct bpf_sock { ... __u16 :16; /* offset 48 */ __be16 dst_port; ... }; Account for this quirky access conversion in the test case exercising the 4-byte load by treating the result as 16-bit wide. Signed-off-by: Jakub Sitnicki Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220317113920.1068535-5-jakub@cloudflare.com --- tools/testing/selftests/bpf/progs/test_sock_fields.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index 43a17fdef226..9f4b8f9f1181 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -251,10 +251,16 @@ int ingress_read_sock_fields(struct __sk_buff *skb) return CG_OK; } +/* + * NOTE: 4-byte load from bpf_sock at dst_port offset is quirky. It + * gets rewritten by the access converter to a 2-byte load for + * backward compatibility. Treating the load result as a be16 value + * makes the code portable across little- and big-endian platforms. + */ static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk) { __u32 *word = (__u32 *)&sk->dst_port; - return word[0] == bpf_htonl(0xcafe0000); + return word[0] == bpf_htons(0xcafe); } static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk) From 08063b4bc1581bbdcae99da4a54f546a50045fc0 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 18 Mar 2022 08:01:06 -0700 Subject: [PATCH 109/137] bpftool: Add BPF_TRACE_KPROBE_MULTI to attach type names table BPF_TRACE_KPROBE_MULTI is a new attach type name, add it to bpftool's table. This fixes a currently failing CI bpftool check. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220318150106.2933343-1-andrii@kernel.org --- tools/bpf/bpftool/common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 606743c6db41..0c1e06cf50b9 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -56,7 +56,6 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6", [BPF_CGROUP_GETSOCKOPT] = "getsockopt", [BPF_CGROUP_SETSOCKOPT] = "setsockopt", - [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser", [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict", [BPF_SK_SKB_VERDICT] = "sk_skb_verdict", @@ -76,6 +75,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { [BPF_SK_REUSEPORT_SELECT] = "sk_skb_reuseport_select", [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_skb_reuseport_select_or_migrate", [BPF_PERF_EVENT] = "perf_event", + [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi", }; void p_err(const char *fmt, ...) From edc3ec09ab706c45e955f7a52f0904b4ed649ca9 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Thu, 17 Mar 2022 17:29:43 +0530 Subject: [PATCH 110/137] bpf: Factor out fd returning from bpf_btf_find_by_name_kind In next few patches, we need a helper that searches all kernel BTFs (vmlinux and module BTFs), and finds the type denoted by 'name' and 'kind'. Turns out bpf_btf_find_by_name_kind already does the same thing, but it instead returns a BTF ID and optionally fd (if module BTF). This is used for relocating ksyms in BPF loader code (bpftool gen skel -L). We extract the core code out into a new helper bpf_find_btf_id, which returns the BTF ID in the return value, and BTF pointer in an out parameter. The reference for the returned BTF pointer is always raised, hence user must either transfer it (e.g. to a fd), or release it after use. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220317115957.3193097-2-memxor@gmail.com --- kernel/bpf/btf.c | 90 ++++++++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 37 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 8b34563a832e..17b9adcd88d3 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -525,6 +525,48 @@ s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) return -ENOENT; } +static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) +{ + struct btf *btf; + s32 ret; + int id; + + btf = bpf_get_btf_vmlinux(); + if (IS_ERR(btf)) + return PTR_ERR(btf); + + ret = btf_find_by_name_kind(btf, name, kind); + /* ret is never zero, since btf_find_by_name_kind returns + * positive btf_id or negative error. + */ + if (ret > 0) { + btf_get(btf); + *btf_p = btf; + return ret; + } + + /* If name is not found in vmlinux's BTF then search in module's BTFs */ + spin_lock_bh(&btf_idr_lock); + idr_for_each_entry(&btf_idr, btf, id) { + if (!btf_is_module(btf)) + continue; + /* linear search could be slow hence unlock/lock + * the IDR to avoiding holding it for too long + */ + btf_get(btf); + spin_unlock_bh(&btf_idr_lock); + ret = btf_find_by_name_kind(btf, name, kind); + if (ret > 0) { + *btf_p = btf; + return ret; + } + spin_lock_bh(&btf_idr_lock); + btf_put(btf); + } + spin_unlock_bh(&btf_idr_lock); + return ret; +} + const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, u32 id, u32 *res_id) { @@ -6562,7 +6604,8 @@ static struct btf *btf_get_module_btf(const struct module *module) BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) { - struct btf *btf; + struct btf *btf = NULL; + int btf_obj_fd = 0; long ret; if (flags) @@ -6571,44 +6614,17 @@ BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int if (name_sz <= 1 || name[name_sz - 1]) return -EINVAL; - btf = bpf_get_btf_vmlinux(); - if (IS_ERR(btf)) - return PTR_ERR(btf); - - ret = btf_find_by_name_kind(btf, name, kind); - /* ret is never zero, since btf_find_by_name_kind returns - * positive btf_id or negative error. - */ - if (ret < 0) { - struct btf *mod_btf; - int id; - - /* If name is not found in vmlinux's BTF then search in module's BTFs */ - spin_lock_bh(&btf_idr_lock); - idr_for_each_entry(&btf_idr, mod_btf, id) { - if (!btf_is_module(mod_btf)) - continue; - /* linear search could be slow hence unlock/lock - * the IDR to avoiding holding it for too long - */ - btf_get(mod_btf); - spin_unlock_bh(&btf_idr_lock); - ret = btf_find_by_name_kind(mod_btf, name, kind); - if (ret > 0) { - int btf_obj_fd; - - btf_obj_fd = __btf_new_fd(mod_btf); - if (btf_obj_fd < 0) { - btf_put(mod_btf); - return btf_obj_fd; - } - return ret | (((u64)btf_obj_fd) << 32); - } - spin_lock_bh(&btf_idr_lock); - btf_put(mod_btf); + ret = bpf_find_btf_id(name, kind, &btf); + if (ret > 0 && btf_is_module(btf)) { + btf_obj_fd = __btf_new_fd(btf); + if (btf_obj_fd < 0) { + btf_put(btf); + return btf_obj_fd; } - spin_unlock_bh(&btf_idr_lock); + return ret | (((u64)btf_obj_fd) << 32); } + if (ret > 0) + btf_put(btf); return ret; } From 9492450fd28736262dea9143ebb3afc2c131ace1 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Thu, 17 Mar 2022 17:29:51 +0530 Subject: [PATCH 111/137] bpf: Always raise reference in btf_get_module_btf Align it with helpers like bpf_find_btf_id, so all functions returning BTF in out parameter follow the same rule of raising reference consistently, regardless of module or vmlinux BTF. Adjust existing callers to handle the change accordinly. Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220317115957.3193097-10-memxor@gmail.com --- kernel/bpf/btf.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 17b9adcd88d3..6d9e711cb5d4 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6572,20 +6572,23 @@ struct module *btf_try_get_module(const struct btf *btf) return res; } -/* Returns struct btf corresponding to the struct module - * - * This function can return NULL or ERR_PTR. Note that caller must - * release reference for struct btf iff btf_is_module is true. +/* Returns struct btf corresponding to the struct module. + * This function can return NULL or ERR_PTR. */ static struct btf *btf_get_module_btf(const struct module *module) { - struct btf *btf = NULL; #ifdef CONFIG_DEBUG_INFO_BTF_MODULES struct btf_module *btf_mod, *tmp; #endif + struct btf *btf = NULL; + + if (!module) { + btf = bpf_get_btf_vmlinux(); + if (!IS_ERR(btf)) + btf_get(btf); + return btf; + } - if (!module) - return bpf_get_btf_vmlinux(); #ifdef CONFIG_DEBUG_INFO_BTF_MODULES mutex_lock(&btf_module_mutex); list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { @@ -6823,9 +6826,7 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, hook = bpf_prog_type_to_kfunc_hook(prog_type); ret = btf_populate_kfunc_set(btf, hook, kset); - /* reference is only taken for module BTF */ - if (btf_is_module(btf)) - btf_put(btf); + btf_put(btf); return ret; } EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); From a8fee96202e279441d0e52d83eb100bd4a6d6272 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 19 Mar 2022 17:19:11 -0700 Subject: [PATCH 112/137] libbpf: Avoid NULL deref when initializing map BTF info If BPF object doesn't have an BTF info, don't attempt to search for BTF types describing BPF map key or value layout. Fixes: 262cfb74ffda ("libbpf: Init btf_{key,value}_type_id on internal map open") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220320001911.3640917-1-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7526419e59e0..2efe9431c1ba 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4197,6 +4197,9 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) __u32 key_type_id = 0, value_type_id = 0; int ret; + if (!obj->btf) + return -ENOENT; + /* if it's BTF-defined map, we don't need to search for type IDs. * For struct_ops map, it does not need btf_key_type_id and * btf_value_type_id. From b00fa38a9c1cba044a32a601b49a55a18ed719d1 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 17 Mar 2022 21:55:52 -0700 Subject: [PATCH 113/137] bpf: Enable non-atomic allocations in local storage Currently, local storage memory can only be allocated atomically (GFP_ATOMIC). This restriction is too strict for sleepable bpf programs. In this patch, the verifier detects whether the program is sleepable, and passes the corresponding GFP_KERNEL or GFP_ATOMIC flag as a 5th argument to bpf_task/sk/inode_storage_get. This flag will propagate down to the local storage functions that allocate memory. Please note that bpf_task/sk/inode_storage_update_elem functions are invoked by userspace applications through syscalls. Preemption is disabled before bpf_task/sk/inode_storage_update_elem is called, which means they will always have to allocate memory atomically. Signed-off-by: Joanne Koong Signed-off-by: Alexei Starovoitov Acked-by: KP Singh Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220318045553.3091807-2-joannekoong@fb.com --- include/linux/bpf_local_storage.h | 7 ++-- kernel/bpf/bpf_inode_storage.c | 9 ++--- kernel/bpf/bpf_local_storage.c | 58 ++++++++++++++++++++----------- kernel/bpf/bpf_task_storage.c | 10 +++--- kernel/bpf/verifier.c | 20 +++++++++++ net/core/bpf_sk_storage.c | 21 ++++++----- 6 files changed, 84 insertions(+), 41 deletions(-) diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index 37b3906af8b1..493e63258497 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -154,16 +154,17 @@ void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem); struct bpf_local_storage_elem * bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value, - bool charge_mem); + bool charge_mem, gfp_t gfp_flags); int bpf_local_storage_alloc(void *owner, struct bpf_local_storage_map *smap, - struct bpf_local_storage_elem *first_selem); + struct bpf_local_storage_elem *first_selem, + gfp_t gfp_flags); struct bpf_local_storage_data * bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, - void *value, u64 map_flags); + void *value, u64 map_flags, gfp_t gfp_flags); void bpf_local_storage_free_rcu(struct rcu_head *rcu); diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c index e29d9e3d853e..96be8d518885 100644 --- a/kernel/bpf/bpf_inode_storage.c +++ b/kernel/bpf/bpf_inode_storage.c @@ -136,7 +136,7 @@ static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key, sdata = bpf_local_storage_update(f->f_inode, (struct bpf_local_storage_map *)map, - value, map_flags); + value, map_flags, GFP_ATOMIC); fput(f); return PTR_ERR_OR_ZERO(sdata); } @@ -169,8 +169,9 @@ static int bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key) return err; } -BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode, - void *, value, u64, flags) +/* *gfp_flags* is a hidden argument provided by the verifier */ +BPF_CALL_5(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode, + void *, value, u64, flags, gfp_t, gfp_flags) { struct bpf_local_storage_data *sdata; @@ -196,7 +197,7 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode, if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) { sdata = bpf_local_storage_update( inode, (struct bpf_local_storage_map *)map, value, - BPF_NOEXIST); + BPF_NOEXIST, gfp_flags); return IS_ERR(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data; } diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 092a1ac772d7..01aa2b51ec4d 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -63,7 +63,7 @@ static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem) struct bpf_local_storage_elem * bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, - void *value, bool charge_mem) + void *value, bool charge_mem, gfp_t gfp_flags) { struct bpf_local_storage_elem *selem; @@ -71,7 +71,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, return NULL; selem = bpf_map_kzalloc(&smap->map, smap->elem_size, - GFP_ATOMIC | __GFP_NOWARN); + gfp_flags | __GFP_NOWARN); if (selem) { if (value) memcpy(SDATA(selem)->data, value, smap->map.value_size); @@ -282,7 +282,8 @@ static int check_flags(const struct bpf_local_storage_data *old_sdata, int bpf_local_storage_alloc(void *owner, struct bpf_local_storage_map *smap, - struct bpf_local_storage_elem *first_selem) + struct bpf_local_storage_elem *first_selem, + gfp_t gfp_flags) { struct bpf_local_storage *prev_storage, *storage; struct bpf_local_storage **owner_storage_ptr; @@ -293,7 +294,7 @@ int bpf_local_storage_alloc(void *owner, return err; storage = bpf_map_kzalloc(&smap->map, sizeof(*storage), - GFP_ATOMIC | __GFP_NOWARN); + gfp_flags | __GFP_NOWARN); if (!storage) { err = -ENOMEM; goto uncharge; @@ -350,10 +351,10 @@ uncharge: */ struct bpf_local_storage_data * bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, - void *value, u64 map_flags) + void *value, u64 map_flags, gfp_t gfp_flags) { struct bpf_local_storage_data *old_sdata = NULL; - struct bpf_local_storage_elem *selem; + struct bpf_local_storage_elem *selem = NULL; struct bpf_local_storage *local_storage; unsigned long flags; int err; @@ -365,6 +366,9 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, !map_value_has_spin_lock(&smap->map))) return ERR_PTR(-EINVAL); + if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST) + return ERR_PTR(-EINVAL); + local_storage = rcu_dereference_check(*owner_storage(smap, owner), bpf_rcu_lock_held()); if (!local_storage || hlist_empty(&local_storage->list)) { @@ -373,11 +377,11 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, if (err) return ERR_PTR(err); - selem = bpf_selem_alloc(smap, owner, value, true); + selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); if (!selem) return ERR_PTR(-ENOMEM); - err = bpf_local_storage_alloc(owner, smap, selem); + err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags); if (err) { kfree(selem); mem_uncharge(smap, owner, smap->elem_size); @@ -404,6 +408,12 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, } } + if (gfp_flags == GFP_KERNEL) { + selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); + if (!selem) + return ERR_PTR(-ENOMEM); + } + raw_spin_lock_irqsave(&local_storage->lock, flags); /* Recheck local_storage->list under local_storage->lock */ @@ -429,19 +439,21 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, goto unlock; } - /* local_storage->lock is held. Hence, we are sure - * we can unlink and uncharge the old_sdata successfully - * later. Hence, instead of charging the new selem now - * and then uncharge the old selem later (which may cause - * a potential but unnecessary charge failure), avoid taking - * a charge at all here (the "!old_sdata" check) and the - * old_sdata will not be uncharged later during - * bpf_selem_unlink_storage_nolock(). - */ - selem = bpf_selem_alloc(smap, owner, value, !old_sdata); - if (!selem) { - err = -ENOMEM; - goto unlock_err; + if (gfp_flags != GFP_KERNEL) { + /* local_storage->lock is held. Hence, we are sure + * we can unlink and uncharge the old_sdata successfully + * later. Hence, instead of charging the new selem now + * and then uncharge the old selem later (which may cause + * a potential but unnecessary charge failure), avoid taking + * a charge at all here (the "!old_sdata" check) and the + * old_sdata will not be uncharged later during + * bpf_selem_unlink_storage_nolock(). + */ + selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags); + if (!selem) { + err = -ENOMEM; + goto unlock_err; + } } /* First, link the new selem to the map */ @@ -463,6 +475,10 @@ unlock: unlock_err: raw_spin_unlock_irqrestore(&local_storage->lock, flags); + if (selem) { + mem_uncharge(smap, owner, smap->elem_size); + kfree(selem); + } return ERR_PTR(err); } diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c index 5da7bed0f5f6..6638a0ecc3d2 100644 --- a/kernel/bpf/bpf_task_storage.c +++ b/kernel/bpf/bpf_task_storage.c @@ -174,7 +174,8 @@ static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key, bpf_task_storage_lock(); sdata = bpf_local_storage_update( - task, (struct bpf_local_storage_map *)map, value, map_flags); + task, (struct bpf_local_storage_map *)map, value, map_flags, + GFP_ATOMIC); bpf_task_storage_unlock(); err = PTR_ERR_OR_ZERO(sdata); @@ -226,8 +227,9 @@ out: return err; } -BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *, - task, void *, value, u64, flags) +/* *gfp_flags* is a hidden argument provided by the verifier */ +BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *, + task, void *, value, u64, flags, gfp_t, gfp_flags) { struct bpf_local_storage_data *sdata; @@ -250,7 +252,7 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *, (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) sdata = bpf_local_storage_update( task, (struct bpf_local_storage_map *)map, value, - BPF_NOEXIST); + BPF_NOEXIST, gfp_flags); unlock: bpf_task_storage_unlock(); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0287176bfe9a..6347dcdee1fd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13492,6 +13492,26 @@ static int do_misc_fixups(struct bpf_verifier_env *env) goto patch_call_imm; } + if (insn->imm == BPF_FUNC_task_storage_get || + insn->imm == BPF_FUNC_sk_storage_get || + insn->imm == BPF_FUNC_inode_storage_get) { + if (env->prog->aux->sleepable) + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__s32)GFP_KERNEL); + else + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__s32)GFP_ATOMIC); + insn_buf[1] = *insn; + cnt = 2; + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + goto patch_call_imm; + } + /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index d9c37fd10809..7aff1206a851 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -141,7 +141,7 @@ static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key, if (sock) { sdata = bpf_local_storage_update( sock->sk, (struct bpf_local_storage_map *)map, value, - map_flags); + map_flags, GFP_ATOMIC); sockfd_put(sock); return PTR_ERR_OR_ZERO(sdata); } @@ -172,7 +172,7 @@ bpf_sk_storage_clone_elem(struct sock *newsk, { struct bpf_local_storage_elem *copy_selem; - copy_selem = bpf_selem_alloc(smap, newsk, NULL, true); + copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, GFP_ATOMIC); if (!copy_selem) return NULL; @@ -230,7 +230,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) bpf_selem_link_map(smap, copy_selem); bpf_selem_link_storage_nolock(new_sk_storage, copy_selem); } else { - ret = bpf_local_storage_alloc(newsk, smap, copy_selem); + ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC); if (ret) { kfree(copy_selem); atomic_sub(smap->elem_size, @@ -255,8 +255,9 @@ out: return ret; } -BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, - void *, value, u64, flags) +/* *gfp_flags* is a hidden argument provided by the verifier */ +BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, + void *, value, u64, flags, gfp_t, gfp_flags) { struct bpf_local_storage_data *sdata; @@ -277,7 +278,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, refcount_inc_not_zero(&sk->sk_refcnt)) { sdata = bpf_local_storage_update( sk, (struct bpf_local_storage_map *)map, value, - BPF_NOEXIST); + BPF_NOEXIST, gfp_flags); /* sk must be a fullsock (guaranteed by verifier), * so sock_gen_put() is unnecessary. */ @@ -417,14 +418,16 @@ static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog) return false; } -BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk, - void *, value, u64, flags) +/* *gfp_flags* is a hidden argument provided by the verifier */ +BPF_CALL_5(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk, + void *, value, u64, flags, gfp_t, gfp_flags) { WARN_ON_ONCE(!bpf_rcu_lock_held()); if (in_hardirq() || in_nmi()) return (unsigned long)NULL; - return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags); + return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags, + gfp_flags); } BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map, From 0e790cbb1af97473d3ea53616f8584a71f80fc3b Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 17 Mar 2022 21:55:53 -0700 Subject: [PATCH 114/137] selftests/bpf: Test for associating multiple elements with the local storage This patch adds a few calls to the existing local storage selftest to test that we can associate multiple elements with the local storage. The sleepable program's call to bpf_sk_storage_get with sk_storage_map2 will lead to an allocation of a new selem under the GFP_KERNEL flag. Signed-off-by: Joanne Koong Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220318045553.3091807-3-joannekoong@fb.com --- .../selftests/bpf/progs/local_storage.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c index 9b1f9b75d5c2..19423ed862e3 100644 --- a/tools/testing/selftests/bpf/progs/local_storage.c +++ b/tools/testing/selftests/bpf/progs/local_storage.c @@ -36,6 +36,13 @@ struct { __type(value, struct local_storage); } sk_storage_map SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE); + __type(key, int); + __type(value, struct local_storage); +} sk_storage_map2 SEC(".maps"); + struct { __uint(type, BPF_MAP_TYPE_TASK_STORAGE); __uint(map_flags, BPF_F_NO_PREALLOC); @@ -115,7 +122,19 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, if (storage->value != DUMMY_STORAGE_VALUE) sk_storage_result = -1; + /* This tests that we can associate multiple elements + * with the local storage. + */ + storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + err = bpf_sk_storage_delete(&sk_storage_map, sock->sk); + if (err) + return 0; + + err = bpf_sk_storage_delete(&sk_storage_map2, sock->sk); if (!err) sk_storage_result = err; From 058ec4a7d9cf77238c73ad9f1e1a3ed9a29afcab Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Sat, 19 Mar 2022 19:33:54 +0100 Subject: [PATCH 115/137] bpf: Treat bpf_sk_lookup remote_port as a 2-byte field In commit 9a69e2b385f4 ("bpf: Make remote_port field in struct bpf_sk_lookup 16-bit wide") the remote_port field has been split up and re-declared from u32 to be16. However, the accompanying changes to the context access converter have not been well thought through when it comes big-endian platforms. Today 2-byte wide loads from offsetof(struct bpf_sk_lookup, remote_port) are handled as narrow loads from a 4-byte wide field. This by itself is not enough to create a problem, but when we combine 1. 32-bit wide access to ->remote_port backed by a 16-wide wide load, with 2. inherent difference between litte- and big-endian in how narrow loads need have to be handled (see bpf_ctx_narrow_access_offset), we get inconsistent results for a 2-byte loads from &ctx->remote_port on LE and BE architectures. This in turn makes BPF C code for the common case of 2-byte load from ctx->remote_port not portable. To rectify it, inform the context access converter that remote_port is 2-byte wide field, and only 1-byte loads need to be treated as narrow loads. At the same time, we special-case the 4-byte load from &ctx->remote_port to continue handling it the same way as do today, in order to keep the existing BPF programs working. Fixes: 9a69e2b385f4 ("bpf: Make remote_port field in struct bpf_sk_lookup 16-bit wide") Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220319183356.233666-2-jakub@cloudflare.com --- net/core/filter.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 03655f2074ae..a7044e98765e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -10989,13 +10989,24 @@ static bool sk_lookup_is_valid_access(int off, int size, case bpf_ctx_range(struct bpf_sk_lookup, local_ip4): case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]): case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): - case offsetof(struct bpf_sk_lookup, remote_port) ... - offsetof(struct bpf_sk_lookup, local_ip4) - 1: case bpf_ctx_range(struct bpf_sk_lookup, local_port): case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex): bpf_ctx_record_field_size(info, sizeof(__u32)); return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); + case bpf_ctx_range(struct bpf_sk_lookup, remote_port): + /* Allow 4-byte access to 2-byte field for backward compatibility */ + if (size == sizeof(__u32)) + return true; + bpf_ctx_record_field_size(info, sizeof(__be16)); + return bpf_ctx_narrow_access_ok(off, size, sizeof(__be16)); + + case offsetofend(struct bpf_sk_lookup, remote_port) ... + offsetof(struct bpf_sk_lookup, local_ip4) - 1: + /* Allow access to zero padding for backward compatibility */ + bpf_ctx_record_field_size(info, sizeof(__u16)); + return bpf_ctx_narrow_access_ok(off, size, sizeof(__u16)); + default: return false; } @@ -11077,6 +11088,11 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, sport, 2, target_size)); break; + case offsetofend(struct bpf_sk_lookup, remote_port): + *target_size = 2; + *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); + break; + case offsetof(struct bpf_sk_lookup, local_port): *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct bpf_sk_lookup_kern, From 3c69611b8926f8e74fcf76bd97ae0e5dafbeb26a Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Sat, 19 Mar 2022 19:33:55 +0100 Subject: [PATCH 116/137] selftests/bpf: Fix u8 narrow load checks for bpf_sk_lookup remote_port In commit 9a69e2b385f4 ("bpf: Make remote_port field in struct bpf_sk_lookup 16-bit wide") ->remote_port field changed from __u32 to __be16. However, narrow load tests which exercise 1-byte sized loads from offsetof(struct bpf_sk_lookup, remote_port) were not adopted to reflect the change. As a result, on little-endian we continue testing loads from addresses: - (__u8 *)&ctx->remote_port + 3 - (__u8 *)&ctx->remote_port + 4 which map to the zero padding following the remote_port field, and don't break the tests because there is no observable change. While on big-endian, we observe breakage because tests expect to see zeros for values loaded from: - (__u8 *)&ctx->remote_port - 1 - (__u8 *)&ctx->remote_port - 2 Above addresses map to ->remote_ip6 field, which precedes ->remote_port, and are populated during the bpf_sk_lookup IPv6 tests. Unsurprisingly, on s390x we observe: #136/38 sk_lookup/narrow access to ctx v4:OK #136/39 sk_lookup/narrow access to ctx v6:FAIL Fix it by removing the checks for 1-byte loads from offsets outside of the ->remote_port field. Fixes: 9a69e2b385f4 ("bpf: Make remote_port field in struct bpf_sk_lookup 16-bit wide") Suggested-by: Ilya Leoshkevich Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220319183356.233666-3-jakub@cloudflare.com --- tools/testing/selftests/bpf/progs/test_sk_lookup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c index bf5b7caefdd0..38b7a1fe67b6 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -413,8 +413,7 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx) /* Narrow loads from remote_port field. Expect SRC_PORT. */ if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) || - LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff) || - LSB(ctx->remote_port, 2) != 0 || LSB(ctx->remote_port, 3) != 0) + LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff)) return SK_DROP; if (LSW(ctx->remote_port, 0) != SRC_PORT) return SK_DROP; From ce5236800116d18ac2c06c58f73930406e3ea4be Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Sat, 19 Mar 2022 19:33:56 +0100 Subject: [PATCH 117/137] selftests/bpf: Fix test for 4-byte load from remote_port on big-endian The context access converter rewrites the 4-byte load from bpf_sk_lookup->remote_port to a 2-byte load from bpf_sk_lookup_kern structure. It means that we cannot treat the destination register contents as a 32-bit value, or the code will not be portable across big- and little-endian architectures. This is exactly the same case as with 4-byte loads from bpf_sock->dst_port so follow the approach outlined in [1] and treat the register contents as a 16-bit value in the test. [1]: https://lore.kernel.org/bpf/20220317113920.1068535-5-jakub@cloudflare.com/ Fixes: 2ed0dc5937d3 ("selftests/bpf: Cover 4-byte load from remote_port in bpf_sk_lookup") Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220319183356.233666-4-jakub@cloudflare.com --- tools/testing/selftests/bpf/progs/test_sk_lookup.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c index 38b7a1fe67b6..6058dcb11b36 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -418,9 +418,15 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx) if (LSW(ctx->remote_port, 0) != SRC_PORT) return SK_DROP; - /* Load from remote_port field with zero padding (backward compatibility) */ + /* + * NOTE: 4-byte load from bpf_sk_lookup at remote_port offset + * is quirky. It gets rewritten by the access converter to a + * 2-byte load for backward compatibility. Treating the load + * result as a be16 value makes the code portable across + * little- and big-endian platforms. + */ val_u32 = *(__u32 *)&ctx->remote_port; - if (val_u32 != bpf_htonl(bpf_ntohs(SRC_PORT) << 16)) + if (val_u32 != SRC_PORT) return SK_DROP; /* Narrow loads from local_port field. Expect DST_PORT. */ From ef078600eec20f20eb7833cf597d4a5edf2953c1 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 11 Mar 2022 12:11:35 -0800 Subject: [PATCH 118/137] bpf: Select proper size for bpf_prog_pack Using HPAGE_PMD_SIZE as the size for bpf_prog_pack is not ideal in some cases. Specifically, for NUMA systems, __vmalloc_node_range requires PMD_SIZE * num_online_nodes() to allocate huge pages. Also, if the system does not support huge pages (i.e., with cmdline option nohugevmalloc), it is better to use PAGE_SIZE packs. Add logic to select proper size for bpf_prog_pack. This solution is not ideal, as it makes assumption about the behavior of module_alloc and __vmalloc_node_range. However, it appears to be the easiest solution as it doesn't require changes in module_alloc and vmalloc code. Fixes: 57631054fae6 ("bpf: Introduce bpf_prog_pack allocator") Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220311201135.3573610-1-song@kernel.org --- kernel/bpf/core.c | 70 +++++++++++++++++++++++++++++++---------------- 1 file changed, 47 insertions(+), 23 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 1324f9523e7c..9d661e07e77c 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -815,15 +816,9 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) * to host BPF programs. */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE -#else -#define BPF_PROG_PACK_SIZE PAGE_SIZE -#endif #define BPF_PROG_CHUNK_SHIFT 6 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1)) -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE) struct bpf_prog_pack { struct list_head list; @@ -831,30 +826,56 @@ struct bpf_prog_pack { unsigned long bitmap[]; }; -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) +static size_t bpf_prog_pack_size = -1; + +static int bpf_prog_chunk_count(void) +{ + WARN_ON_ONCE(bpf_prog_pack_size == -1); + return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE; +} + static DEFINE_MUTEX(pack_mutex); static LIST_HEAD(pack_list); +static size_t select_bpf_prog_pack_size(void) +{ + size_t size; + void *ptr; + + size = PMD_SIZE * num_online_nodes(); + ptr = module_alloc(size); + + /* Test whether we can get huge pages. If not just use PAGE_SIZE + * packs. + */ + if (!ptr || !is_vm_area_hugepages(ptr)) + size = PAGE_SIZE; + + vfree(ptr); + return size; +} + static struct bpf_prog_pack *alloc_new_pack(void) { struct bpf_prog_pack *pack; - pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(BPF_PROG_CHUNK_COUNT), GFP_KERNEL); + pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(bpf_prog_chunk_count())), + GFP_KERNEL); if (!pack) return NULL; - pack->ptr = module_alloc(BPF_PROG_PACK_SIZE); + pack->ptr = module_alloc(bpf_prog_pack_size); if (!pack->ptr) { kfree(pack); return NULL; } - bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); + bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE); list_add_tail(&pack->list, &pack_list); set_vm_flush_reset_perms(pack->ptr); - set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); - set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); + set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE); + set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE); return pack; } @@ -865,7 +886,11 @@ static void *bpf_prog_pack_alloc(u32 size) unsigned long pos; void *ptr = NULL; - if (size > BPF_PROG_MAX_PACK_PROG_SIZE) { + mutex_lock(&pack_mutex); + if (bpf_prog_pack_size == -1) + bpf_prog_pack_size = select_bpf_prog_pack_size(); + + if (size > bpf_prog_pack_size) { size = round_up(size, PAGE_SIZE); ptr = module_alloc(size); if (ptr) { @@ -873,13 +898,12 @@ static void *bpf_prog_pack_alloc(u32 size) set_memory_ro((unsigned long)ptr, size / PAGE_SIZE); set_memory_x((unsigned long)ptr, size / PAGE_SIZE); } - return ptr; + goto out; } - mutex_lock(&pack_mutex); list_for_each_entry(pack, &pack_list, list) { - pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, + pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0, nbits, 0); - if (pos < BPF_PROG_CHUNK_COUNT) + if (pos < bpf_prog_chunk_count()) goto found_free_area; } @@ -905,13 +929,13 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr) unsigned long pos; void *pack_ptr; - if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) { + mutex_lock(&pack_mutex); + if (hdr->size > bpf_prog_pack_size) { module_memfree(hdr); - return; + goto out; } - pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1)); - mutex_lock(&pack_mutex); + pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size - 1)); list_for_each_entry(tmp, &pack_list, list) { if (tmp->ptr == pack_ptr) { @@ -927,8 +951,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr) pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT; bitmap_clear(pack->bitmap, pos, nbits); - if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, - BPF_PROG_CHUNK_COUNT, 0) == 0) { + if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0, + bpf_prog_chunk_count(), 0) == 0) { list_del(&pack->list); module_memfree(pack->ptr); kfree(pack); From ee2a098851bfbe8bcdd964c0121f4246f00ff41e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 14 Mar 2022 11:20:41 -0700 Subject: [PATCH 119/137] bpf: Adjust BPF stack helper functions to accommodate skip > 0 Let's say that the caller has storage for num_elem stack frames. Then, the BPF stack helper functions walk the stack for only num_elem frames. This means that if skip > 0, one keeps only 'num_elem - skip' frames. This is because it sets init_nr in the perf_callchain_entry to the end of the buffer to save num_elem entries only. I believe it was because the perf callchain code unwound the stack frames until it reached the global max size (sysctl_perf_event_max_stack). However it now has perf_callchain_entry_ctx.max_stack to limit the iteration locally. This simplifies the code to handle init_nr in the BPF callstack entries and removes the confusion with the perf_event's __PERF_SAMPLE_CALLCHAIN_EARLY which sets init_nr to 0. Also change the comment on bpf_get_stack() in the header file to be more explicit what the return value means. Fixes: c195651e565a ("bpf: add bpf_get_stack helper") Signed-off-by: Namhyung Kim Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/30a7b5d5-6726-1cc2-eaee-8da2828a9a9c@oracle.com Link: https://lore.kernel.org/bpf/20220314182042.71025-1-namhyung@kernel.org Based-on-patch-by: Eugene Loh --- include/uapi/linux/bpf.h | 8 +++--- kernel/bpf/stackmap.c | 56 +++++++++++++++++----------------------- 2 files changed, 28 insertions(+), 36 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 7604e7d5438f..d14b10b85e51 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3009,8 +3009,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack= * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description @@ -4316,8 +4316,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack= * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) * Description diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 38bdfcd06f55..34725bfa1e97 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -176,7 +176,7 @@ build_id_valid: } static struct perf_callchain_entry * -get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) +get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) { #ifdef CONFIG_STACKTRACE struct perf_callchain_entry *entry; @@ -187,9 +187,8 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) if (!entry) return NULL; - entry->nr = init_nr + - stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr), - sysctl_perf_event_max_stack - init_nr, 0); + entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip, + max_depth, 0); /* stack_trace_save_tsk() works on unsigned long array, while * perf_callchain_entry uses u64 array. For 32-bit systems, it is @@ -201,7 +200,7 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) int i; /* copy data from the end to avoid using extra buffer */ - for (i = entry->nr - 1; i >= (int)init_nr; i--) + for (i = entry->nr - 1; i >= 0; i--) to[i] = (u64)(from[i]); } @@ -218,27 +217,19 @@ static long __bpf_get_stackid(struct bpf_map *map, { struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); struct stack_map_bucket *bucket, *new_bucket, *old_bucket; - u32 max_depth = map->value_size / stack_map_data_size(map); - /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ - u32 init_nr = sysctl_perf_event_max_stack - max_depth; u32 skip = flags & BPF_F_SKIP_FIELD_MASK; u32 hash, id, trace_nr, trace_len; bool user = flags & BPF_F_USER_STACK; u64 *ips; bool hash_matches; - /* get_perf_callchain() guarantees that trace->nr >= init_nr - * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth - */ - trace_nr = trace->nr - init_nr; - - if (trace_nr <= skip) + if (trace->nr <= skip) /* skipping more than usable stack trace */ return -EFAULT; - trace_nr -= skip; + trace_nr = trace->nr - skip; trace_len = trace_nr * sizeof(u64); - ips = trace->ip + skip + init_nr; + ips = trace->ip + skip; hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); id = hash & (smap->n_buckets - 1); bucket = READ_ONCE(smap->buckets[id]); @@ -295,8 +286,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u64, flags) { u32 max_depth = map->value_size / stack_map_data_size(map); - /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ - u32 init_nr = sysctl_perf_event_max_stack - max_depth; + u32 skip = flags & BPF_F_SKIP_FIELD_MASK; bool user = flags & BPF_F_USER_STACK; struct perf_callchain_entry *trace; bool kernel = !user; @@ -305,8 +295,12 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) return -EINVAL; - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, false, false); + max_depth += skip; + if (max_depth > sysctl_perf_event_max_stack) + max_depth = sysctl_perf_event_max_stack; + + trace = get_perf_callchain(regs, 0, kernel, user, max_depth, + false, false); if (unlikely(!trace)) /* couldn't fetch the stack trace */ @@ -397,7 +391,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, struct perf_callchain_entry *trace_in, void *buf, u32 size, u64 flags) { - u32 init_nr, trace_nr, copy_len, elem_size, num_elem; + u32 trace_nr, copy_len, elem_size, num_elem, max_depth; bool user_build_id = flags & BPF_F_USER_BUILD_ID; u32 skip = flags & BPF_F_SKIP_FIELD_MASK; bool user = flags & BPF_F_USER_STACK; @@ -422,30 +416,28 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, goto err_fault; num_elem = size / elem_size; - if (sysctl_perf_event_max_stack < num_elem) - init_nr = 0; - else - init_nr = sysctl_perf_event_max_stack - num_elem; + max_depth = num_elem + skip; + if (sysctl_perf_event_max_stack < max_depth) + max_depth = sysctl_perf_event_max_stack; if (trace_in) trace = trace_in; else if (kernel && task) - trace = get_callchain_entry_for_task(task, init_nr); + trace = get_callchain_entry_for_task(task, max_depth); else - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, + trace = get_perf_callchain(regs, 0, kernel, user, max_depth, false, false); if (unlikely(!trace)) goto err_fault; - trace_nr = trace->nr - init_nr; - if (trace_nr < skip) + if (trace->nr < skip) goto err_fault; - trace_nr -= skip; + trace_nr = trace->nr - skip; trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; copy_len = trace_nr * elem_size; - ips = trace->ip + skip + init_nr; + + ips = trace->ip + skip; if (user && user_build_id) stack_map_get_build_id_offset(buf, ips, trace_nr, user); else From e1cc1f39981b06ba22a0c92e800e9fd8ba59d2d3 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 14 Mar 2022 11:20:42 -0700 Subject: [PATCH 120/137] selftests/bpf: Test skipping stacktrace Add a test case for stacktrace with skip > 0 using a small sized buffer. It didn't support skipping entries greater than or equal to the size of buffer and filled the skipped part with 0. Signed-off-by: Namhyung Kim Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220314182042.71025-2-namhyung@kernel.org --- .../bpf/prog_tests/stacktrace_map_skip.c | 63 +++++++++++++++++ .../selftests/bpf/progs/stacktrace_map_skip.c | 68 +++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c create mode 100644 tools/testing/selftests/bpf/progs/stacktrace_map_skip.c diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c new file mode 100644 index 000000000000..1932b1e0685c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "stacktrace_map_skip.skel.h" + +#define TEST_STACK_DEPTH 2 + +void test_stacktrace_map_skip(void) +{ + struct stacktrace_map_skip *skel; + int stackid_hmap_fd, stackmap_fd, stack_amap_fd; + int err, stack_trace_len; + + skel = stacktrace_map_skip__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + /* find map fds */ + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); + if (!ASSERT_GE(stackid_hmap_fd, 0, "stackid_hmap fd")) + goto out; + + stackmap_fd = bpf_map__fd(skel->maps.stackmap); + if (!ASSERT_GE(stackmap_fd, 0, "stackmap fd")) + goto out; + + stack_amap_fd = bpf_map__fd(skel->maps.stack_amap); + if (!ASSERT_GE(stack_amap_fd, 0, "stack_amap fd")) + goto out; + + skel->bss->pid = getpid(); + + err = stacktrace_map_skip__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + skel->bss->control = 1; + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap")) + goto out; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (!ASSERT_OK(err, "compare_map_keys stackmap vs. stackid_hmap")) + goto out; + + stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64); + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); + if (!ASSERT_OK(err, "compare_stack_ips stackmap vs. stack_amap")) + goto out; + + if (!ASSERT_EQ(skel->bss->failed, 0, "skip_failed")) + goto out; + +out: + stacktrace_map_skip__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c new file mode 100644 index 000000000000..2eb297df3dd6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#define TEST_STACK_DEPTH 2 +#define TEST_MAX_ENTRIES 16384 + +typedef __u64 stack_trace_t[TEST_STACK_DEPTH]; + +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, TEST_MAX_ENTRIES); + __type(key, __u32); + __type(value, stack_trace_t); +} stackmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, TEST_MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} stackid_hmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, TEST_MAX_ENTRIES); + __type(key, __u32); + __type(value, stack_trace_t); +} stack_amap SEC(".maps"); + +int pid = 0; +int control = 0; +int failed = 0; + +SEC("tracepoint/sched/sched_switch") +int oncpu(struct trace_event_raw_sched_switch *ctx) +{ + __u32 max_len = TEST_STACK_DEPTH * sizeof(__u64); + __u32 key = 0, val = 0; + __u64 *stack_p; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + if (control) + return 0; + + /* it should allow skipping whole buffer size entries */ + key = bpf_get_stackid(ctx, &stackmap, TEST_STACK_DEPTH); + if ((int)key >= 0) { + /* The size of stackmap and stack_amap should be the same */ + bpf_map_update_elem(&stackid_hmap, &key, &val, 0); + stack_p = bpf_map_lookup_elem(&stack_amap, &key); + if (stack_p) { + bpf_get_stack(ctx, stack_p, max_len, TEST_STACK_DEPTH); + /* it wrongly skipped all the entries and filled zero */ + if (stack_p[0] == 0) + failed = 1; + } + } else { + /* old kernel doesn't support skipping that many entries */ + failed = 2; + } + + return 0; +} + +char _license[] SEC("license") = "GPL"; From 7ada3787e91c89b0aa7abf47682e8e587b855c13 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sun, 20 Mar 2022 20:00:03 +0530 Subject: [PATCH 121/137] bpf: Check for NULL return from bpf_get_btf_vmlinux When CONFIG_DEBUG_INFO_BTF is disabled, bpf_get_btf_vmlinux can return a NULL pointer. Check for it in btf_get_module_btf to prevent a NULL pointer dereference. While kernel test robot only complained about this specific case, let's also check for NULL in other call sites of bpf_get_btf_vmlinux. Fixes: 9492450fd287 ("bpf: Always raise reference in btf_get_module_btf") Reported-by: kernel test robot Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220320143003.589540-1-memxor@gmail.com --- kernel/bpf/btf.c | 6 +++++- net/core/bpf_sk_storage.c | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 6d9e711cb5d4..ce212bf39b2b 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -534,6 +534,8 @@ static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) btf = bpf_get_btf_vmlinux(); if (IS_ERR(btf)) return PTR_ERR(btf); + if (!btf) + return -EINVAL; ret = btf_find_by_name_kind(btf, name, kind); /* ret is never zero, since btf_find_by_name_kind returns @@ -6584,7 +6586,7 @@ static struct btf *btf_get_module_btf(const struct module *module) if (!module) { btf = bpf_get_btf_vmlinux(); - if (!IS_ERR(btf)) + if (!IS_ERR_OR_NULL(btf)) btf_get(btf); return btf; } @@ -7180,6 +7182,8 @@ bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id) main_btf = bpf_get_btf_vmlinux(); if (IS_ERR(main_btf)) return ERR_CAST(main_btf); + if (!main_btf) + return ERR_PTR(-EINVAL); local_type = btf_type_by_id(local_btf, local_type_id); if (!local_type) diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 7aff1206a851..e3ac36380520 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -406,6 +406,8 @@ static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog) case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: btf_vmlinux = bpf_get_btf_vmlinux(); + if (IS_ERR_OR_NULL(btf_vmlinux)) + return false; btf_id = prog->aux->attach_btf_id; t = btf_type_by_id(btf_vmlinux, btf_id); tname = btf_name_by_offset(btf_vmlinux, t->name_off); From ec80906b0fbd7be11e3e960813b977b1ffe5f8fe Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Mon, 21 Mar 2022 10:41:49 +0800 Subject: [PATCH 122/137] selftests/bpf/test_lirc_mode2.sh: Exit with proper code When test_lirc_mode2_user exec failed, the test report failed but still exit with 0. Fix it by exiting with an error code. Another issue is for the LIRCDEV checking. With bash -n, we need to quote the variable, or it will always be true. So if test_lirc_mode2_user was not run, just exit with skip code. Fixes: 6bdd533cee9a ("bpf: add selftest for lirc_mode2 type program") Signed-off-by: Hangbin Liu Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220321024149.157861-1-liuhangbin@gmail.com --- tools/testing/selftests/bpf/test_lirc_mode2.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh index ec4e15948e40..5252b91f48a1 100755 --- a/tools/testing/selftests/bpf/test_lirc_mode2.sh +++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh @@ -3,6 +3,7 @@ # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 +ret=$ksft_skip msg="skip all tests:" if [ $UID != 0 ]; then @@ -25,7 +26,7 @@ do fi done -if [ -n $LIRCDEV ]; +if [ -n "$LIRCDEV" ]; then TYPE=lirc_mode2 ./test_lirc_mode2_user $LIRCDEV $INPUTDEV @@ -36,3 +37,5 @@ then echo -e ${GREEN}"PASS: $TYPE"${NC} fi fi + +exit $ret From 583669ab3aed29994e50bde6c66b52d44e1bdb73 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Sun, 20 Mar 2022 15:52:40 +0800 Subject: [PATCH 123/137] bpf: Simplify check in btf_parse_hdr() Replace offsetof(hdr_len) + sizeof(hdr_len) with offsetofend(hdr_len) to simplify the check for correctness of btf_data_size in btf_parse_hdr() Signed-off-by: Yuntao Wang Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220320075240.1001728-1-ytcoode@gmail.com --- kernel/bpf/btf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ce212bf39b2b..24788ce564a0 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4482,8 +4482,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env) btf = env->btf; btf_data_size = btf->data_size; - if (btf_data_size < - offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) { + if (btf_data_size < offsetofend(struct btf_header, hdr_len)) { btf_verifier_log(env, "hdr_len not found"); return -EINVAL; } From f705ec764b34323412f14b9bd95412e9bcb8770b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 21 Mar 2022 08:01:12 +0100 Subject: [PATCH 124/137] Revert "bpf: Add support to inline bpf_get_func_ip helper on x86" This reverts commit 97ee4d20ee67eb462581a7af01442de6586e390b. Following change is adding more complexity to bpf_get_func_ip helper for kprobe_multi programs, which can't be inlined easily. Signed-off-by: Jiri Olsa Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220321070113.1449167-2-jolsa@kernel.org --- kernel/bpf/verifier.c | 21 +-------------------- kernel/trace/bpf_trace.c | 1 - 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6347dcdee1fd..571ccd7f04eb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13698,7 +13698,7 @@ patch_map_ops_generic: continue; } - /* Implement tracing bpf_get_func_ip inline. */ + /* Implement bpf_get_func_ip inline. */ if (prog_type == BPF_PROG_TYPE_TRACING && insn->imm == BPF_FUNC_get_func_ip) { /* Load IP address from ctx - 16 */ @@ -13713,25 +13713,6 @@ patch_map_ops_generic: continue; } -#ifdef CONFIG_X86 - /* Implement kprobe_multi bpf_get_func_ip inline. */ - if (prog_type == BPF_PROG_TYPE_KPROBE && - eatype == BPF_TRACE_KPROBE_MULTI && - insn->imm == BPF_FUNC_get_func_ip) { - /* Load IP address from ctx (struct pt_regs) ip */ - insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct pt_regs, ip)); - - new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); - if (!new_prog) - return -ENOMEM; - - env->prog = prog = new_prog; - insn = new_prog->insnsi + i + delta; - continue; - } -#endif - patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 9a7b6be655e4..52c2998e1dc3 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1042,7 +1042,6 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) { - /* This helper call is inlined by verifier on x86. */ return instruction_pointer(regs); } From f70986902c86f88612ed45a96aa7cf4caa65f7c1 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 21 Mar 2022 08:01:13 +0100 Subject: [PATCH 125/137] bpf: Fix kprobe_multi return probe backtrace Andrii reported that backtraces from kprobe_multi program attached as return probes are not complete and showing just initial entry [1]. It's caused by changing registers to have original function ip address as instruction pointer even for return probe, which will screw backtrace from return probe. This change keeps registers intact and store original entry ip and link address on the stack in bpf_kprobe_multi_run_ctx struct, where bpf_get_func_ip and bpf_get_attach_cookie helpers for kprobe_multi programs can find it. [1] https://lore.kernel.org/bpf/CAEf4BzZDDqK24rSKwXNp7XL3ErGD4bZa1M6c_c4EvDSt3jrZcg@mail.gmail.com/T/#m8d1301c0ea0892ddf9dc6fba57a57b8cf11b8c51 Fixes: ca74823c6e16 ("bpf: Add cookie support to programs attached with kprobe multi link") Reported-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220321070113.1449167-3-jolsa@kernel.org --- kernel/trace/bpf_trace.c | 67 ++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 30 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 52c2998e1dc3..172ef545730d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -80,7 +80,8 @@ u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags, const struct btf **btf, s32 *btf_id); -static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip); +static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); +static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); /** * trace_call_bpf - invoke BPF program @@ -1042,7 +1043,7 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) { - return instruction_pointer(regs); + return bpf_kprobe_multi_entry_ip(current->bpf_ctx); } static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { @@ -1054,7 +1055,7 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) { - return bpf_kprobe_multi_cookie(current->bpf_ctx, instruction_pointer(regs)); + return bpf_kprobe_multi_cookie(current->bpf_ctx); } static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { @@ -2219,15 +2220,16 @@ struct bpf_kprobe_multi_link { struct bpf_link link; struct fprobe fp; unsigned long *addrs; - /* - * The run_ctx here is used to get struct bpf_kprobe_multi_link in - * get_attach_cookie helper, so it can't be used to store data. - */ - struct bpf_run_ctx run_ctx; u64 *cookies; u32 cnt; }; +struct bpf_kprobe_multi_run_ctx { + struct bpf_run_ctx run_ctx; + struct bpf_kprobe_multi_link *link; + unsigned long entry_ip; +}; + static void bpf_kprobe_multi_link_release(struct bpf_link *link) { struct bpf_kprobe_multi_link *kmulti_link; @@ -2281,18 +2283,21 @@ static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void return __bpf_kprobe_multi_cookie_cmp(a, b); } -static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip) +static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) { + struct bpf_kprobe_multi_run_ctx *run_ctx; struct bpf_kprobe_multi_link *link; + u64 *cookie, entry_ip; unsigned long *addr; - u64 *cookie; if (WARN_ON_ONCE(!ctx)) return 0; - link = container_of(ctx, struct bpf_kprobe_multi_link, run_ctx); + run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); + link = run_ctx->link; if (!link->cookies) return 0; - addr = bsearch(&ip, link->addrs, link->cnt, sizeof(ip), + entry_ip = run_ctx->entry_ip; + addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), __bpf_kprobe_multi_cookie_cmp); if (!addr) return 0; @@ -2300,10 +2305,22 @@ static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip) return *cookie; } +static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) +{ + struct bpf_kprobe_multi_run_ctx *run_ctx; + + run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); + return run_ctx->entry_ip; +} + static int kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, - struct pt_regs *regs) + unsigned long entry_ip, struct pt_regs *regs) { + struct bpf_kprobe_multi_run_ctx run_ctx = { + .link = link, + .entry_ip = entry_ip, + }; struct bpf_run_ctx *old_run_ctx; int err; @@ -2314,7 +2331,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, migrate_disable(); rcu_read_lock(); - old_run_ctx = bpf_set_run_ctx(&link->run_ctx); + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); err = bpf_prog_run(link->link.prog, regs); bpf_reset_run_ctx(old_run_ctx); rcu_read_unlock(); @@ -2329,24 +2346,10 @@ static void kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs) { - unsigned long saved_ip = instruction_pointer(regs); struct bpf_kprobe_multi_link *link; - /* - * Because fprobe's regs->ip is set to the next instruction of - * dynamic-ftrace instruction, correct entry ip must be set, so - * that the bpf program can access entry address via regs as same - * as kprobes. - * - * Both kprobe and kretprobe see the entry ip of traced function - * as instruction pointer. - */ - instruction_pointer_set(regs, entry_ip); - link = container_of(fp, struct bpf_kprobe_multi_link, fp); - kprobe_multi_link_prog_run(link, regs); - - instruction_pointer_set(regs, saved_ip); + kprobe_multi_link_prog_run(link, entry_ip, regs); } static int @@ -2513,7 +2516,11 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr { return -EOPNOTSUPP; } -static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip) +static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) +{ + return 0; +} +static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) { return 0; } From 1824d8ea75f275a5e69e5f6bc0ffe122ea9b938c Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Sun, 20 Mar 2022 06:08:14 +0000 Subject: [PATCH 126/137] bpftool: Fix print error when show bpf map If there is no btf_id or frozen, it will not show the pids, but the pids don't depend on any one of them. Below is the result after this change: $ ./bpftool map show 2: lpm_trie flags 0x1 key 8B value 8B max_entries 1 memlock 4096B pids systemd(1) 3: lpm_trie flags 0x1 key 20B value 8B max_entries 1 memlock 4096B pids systemd(1) While before this change, the 'pids systemd(1)' can't be displayed. Fixes: 9330986c0300 ("bpf: Add bloom filter map implementation") Signed-off-by: Yafang Shao Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220320060815.7716-1-laoar.shao@gmail.com --- tools/bpf/bpftool/map.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index f91d9bf9054e..c26378f20831 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -620,17 +620,14 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) u32_as_hash_field(info->id)) printf("\n\tpinned %s", (char *)entry->value); } - printf("\n"); if (frozen_str) { frozen = atoi(frozen_str); free(frozen_str); } - if (!info->btf_id && !frozen) - return 0; - - printf("\t"); + if (info->btf_id || frozen) + printf("\n\t"); if (info->btf_id) printf("btf_id %d", info->btf_id); From d0f325c34c2fbe15f6774f2b628224280b571ae9 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Sat, 19 Mar 2022 11:05:33 +0800 Subject: [PATCH 127/137] libbpf: Close fd in bpf_object__reuse_map pin_fd is dup-ed and assigned in bpf_map__reuse_fd. Close it in bpf_object__reuse_map after reuse. Signed-off-by: Hengqi Chen Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220319030533.3132250-1-hengqi.chen@gmail.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2efe9431c1ba..809fe209cdcc 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4823,8 +4823,8 @@ bpf_object__reuse_map(struct bpf_map *map) } err = bpf_map__reuse_fd(map, pin_fd); + close(pin_fd); if (err) { - close(pin_fd); return err; } map->pinned = true; From d8dc09a4db45e49cc1ee5170632833ec11fafa7e Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 18 Mar 2022 11:37:04 +0100 Subject: [PATCH 128/137] bpf, arm: Fix various typos in comments Various spelling mistakes in comments. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220318103729.157574-9-Julia.Lawall@inria.fr --- arch/arm/net/bpf_jit_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 10ceebb7530b..9e457156ad4d 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1864,7 +1864,7 @@ static int build_body(struct jit_ctx *ctx) if (ctx->target == NULL) ctx->offsets[i] = ctx->idx; - /* If unsuccesfull, return with error code */ + /* If unsuccesful, return with error code */ if (ret) return ret; } @@ -1973,7 +1973,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) * for jit, although it can decrease the size of the image. * * As each arm instruction is of length 32bit, we are translating - * number of JITed intructions into the size required to store these + * number of JITed instructions into the size required to store these * JITed code. */ image_size = sizeof(u32) * ctx.idx; From d56c9fe6a06820d5ef8188d96bf4345c7bdba249 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 21 Mar 2022 11:58:02 -0700 Subject: [PATCH 129/137] bpf: Fix warning for cast from restricted gfp_t in verifier This fixes the sparse warning reported by the kernel test robot: kernel/bpf/verifier.c:13499:47: sparse: warning: cast from restricted gfp_t kernel/bpf/verifier.c:13501:47: sparse: warning: cast from restricted gfp_t This fix can be verified locally by running: 1) wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O make.cross 2) chmod +x ~/bin/make.cross 3) COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 ./make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' Fixes: b00fa38a9c1c ("bpf: Enable non-atomic allocations in local storage") Reported-by: kernel test robot Signed-off-by: Joanne Koong Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220321185802.824223-1-joannekoong@fb.com --- kernel/bpf/verifier.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 571ccd7f04eb..d175b70067b3 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13496,9 +13496,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env) insn->imm == BPF_FUNC_sk_storage_get || insn->imm == BPF_FUNC_inode_storage_get) { if (env->prog->aux->sleepable) - insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__s32)GFP_KERNEL); + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL); else - insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__s32)GFP_ATOMIC); + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); insn_buf[1] = *insn; cnt = 2; From 96805674e5624b3c79780a2b41c7a3d6bc38dc76 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 21 Mar 2022 11:00:08 -0700 Subject: [PATCH 130/137] bpf: Fix bpf_prog_pack for multi-node setup module_alloc requires num_online_nodes * PMD_SIZE to allocate huge pages. bpf_prog_pack uses pack of size num_online_nodes * PMD_SIZE. OTOH, module_alloc returns addresses that are PMD_SIZE aligned (instead of num_online_nodes * PMD_SIZE aligned). Therefore, PMD_MASK should be used to calculate pack_ptr in bpf_prog_pack_free(). Fixes: ef078600eec2 ("bpf: Select proper size for bpf_prog_pack") Reported-by: syzbot+c946805b5ce6ab87df0b@syzkaller.appspotmail.com Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220321180009.1944482-2-song@kernel.org --- kernel/bpf/core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 9d661e07e77c..f6b20fcbeb24 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -829,6 +829,7 @@ struct bpf_prog_pack { #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) static size_t bpf_prog_pack_size = -1; +static size_t bpf_prog_pack_mask = -1; static int bpf_prog_chunk_count(void) { @@ -850,8 +851,12 @@ static size_t select_bpf_prog_pack_size(void) /* Test whether we can get huge pages. If not just use PAGE_SIZE * packs. */ - if (!ptr || !is_vm_area_hugepages(ptr)) + if (!ptr || !is_vm_area_hugepages(ptr)) { size = PAGE_SIZE; + bpf_prog_pack_mask = PAGE_MASK; + } else { + bpf_prog_pack_mask = PMD_MASK; + } vfree(ptr); return size; @@ -935,7 +940,7 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr) goto out; } - pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size - 1)); + pack_ptr = (void *)((unsigned long)hdr & bpf_prog_pack_mask); list_for_each_entry(tmp, &pack_list, list) { if (tmp->ptr == pack_ptr) { From e581094167beb674c8a3bc2c27362f50dc5dd617 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 21 Mar 2022 11:00:09 -0700 Subject: [PATCH 131/137] bpf: Fix bpf_prog_pack when PMU_SIZE is not defined PMD_SIZE is not available in some special config, e.g. ARCH=arm with CONFIG_MMU=n. Use bpf_prog_pack of PAGE_SIZE in these cases. Fixes: ef078600eec2 ("bpf: Select proper size for bpf_prog_pack") Reported-by: kernel test robot Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220321180009.1944482-3-song@kernel.org --- kernel/bpf/core.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index f6b20fcbeb24..13e9dbeeedf3 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -840,12 +840,23 @@ static int bpf_prog_chunk_count(void) static DEFINE_MUTEX(pack_mutex); static LIST_HEAD(pack_list); +/* PMD_SIZE is not available in some special config, e.g. ARCH=arm with + * CONFIG_MMU=n. Use PAGE_SIZE in these cases. + */ +#ifdef PMD_SIZE +#define BPF_HPAGE_SIZE PMD_SIZE +#define BPF_HPAGE_MASK PMD_MASK +#else +#define BPF_HPAGE_SIZE PAGE_SIZE +#define BPF_HPAGE_MASK PAGE_MASK +#endif + static size_t select_bpf_prog_pack_size(void) { size_t size; void *ptr; - size = PMD_SIZE * num_online_nodes(); + size = BPF_HPAGE_SIZE * num_online_nodes(); ptr = module_alloc(size); /* Test whether we can get huge pages. If not just use PAGE_SIZE @@ -855,7 +866,7 @@ static size_t select_bpf_prog_pack_size(void) size = PAGE_SIZE; bpf_prog_pack_mask = PAGE_MASK; } else { - bpf_prog_pack_mask = PMD_MASK; + bpf_prog_pack_mask = BPF_HPAGE_MASK; } vfree(ptr); From f97b8b9bd630fb76c0e9e11cbf390e3d64a144d7 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Sat, 19 Mar 2022 20:20:09 -0700 Subject: [PATCH 132/137] bpftool: Fix a bug in subskeleton code generation Compiled with clang by adding LLVM=1 both kernel and selftests/bpf build, I hit the following compilation error: In file included from /.../tools/testing/selftests/bpf/prog_tests/subskeleton.c:6: ./test_subskeleton_lib.subskel.h:168:6: error: variable 'err' is used uninitialized whenever 'if' condition is true [-Werror,-Wsometimes-uninitialized] if (!s->progs) ^~~~~~~~~ ./test_subskeleton_lib.subskel.h:181:11: note: uninitialized use occurs here errno = -err; ^~~ ./test_subskeleton_lib.subskel.h:168:2: note: remove the 'if' if its condition is always false if (!s->progs) ^~~~~~~~~~~~~~ The compilation error is triggered by the following code ... int err; obj = (struct test_subskeleton_lib *)calloc(1, sizeof(*obj)); if (!obj) { errno = ENOMEM; goto err; } ... err: test_subskeleton_lib__destroy(obj); errno = -err; ... in test_subskeleton_lib__open(). The 'err' is not initialized, yet it is used in 'errno = -err' later. The fix is to remove 'errno = -err' since errno has been set properly in all incoming branches. Fixes: 00389c58ffe9 ("bpftool: Add support for subskeletons") Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220320032009.3106133-1-yhs@fb.com --- tools/bpf/bpftool/gen.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 96bd2b33ccf6..7ba7ff55d2ea 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -1538,7 +1538,6 @@ static int do_subskeleton(int argc, char **argv) return obj; \n\ err: \n\ %1$s__destroy(obj); \n\ - errno = -err; \n\ return NULL; \n\ } \n\ \n\ From ecaed3b9deea9ff3e7d98b9ee8722c7d78de5d97 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 21 Mar 2022 21:40:45 -0700 Subject: [PATCH 133/137] Revert "ARM: rethook: Add rethook arm implementation" This reverts commit 515a49173b80a4aabcbad9a4fa2a247042378ea1. Signed-off-by: Alexei Starovoitov --- arch/arm/Kconfig | 1 - arch/arm/include/asm/stacktrace.h | 4 +- arch/arm/kernel/stacktrace.c | 6 -- arch/arm/probes/Makefile | 1 - arch/arm/probes/rethook.c | 103 ------------------------------ 5 files changed, 2 insertions(+), 113 deletions(-) delete mode 100644 arch/arm/probes/rethook.c diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 440f69ee8af5..4c97cb40eebb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -107,7 +107,6 @@ config ARM select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_OPTPROBES if !THUMB2_KERNEL - select HAVE_RETHOOK select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h index babed1707ca8..8f54f9ad8a9b 100644 --- a/arch/arm/include/asm/stacktrace.h +++ b/arch/arm/include/asm/stacktrace.h @@ -14,7 +14,7 @@ struct stackframe { unsigned long sp; unsigned long lr; unsigned long pc; -#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) +#ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; struct task_struct *tsk; #endif @@ -27,7 +27,7 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame) frame->sp = regs->ARM_sp; frame->lr = regs->ARM_lr; frame->pc = regs->ARM_pc; -#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) +#ifdef CONFIG_KRETPROBES frame->kr_cur = NULL; frame->tsk = current; #endif diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index f509c6be4f57..75e905508f27 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include #include -#include #include #include #include @@ -67,11 +66,6 @@ int notrace unwind_frame(struct stackframe *frame) frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); #endif -#ifdef CONFIG_RETHOOK - if (is_rethook_trampoline(frame->pc)) - frame->pc = rethook_find_ret_addr(frame->tsk, frame->fp, - &frame->kr_cur); -#endif #ifdef CONFIG_KRETPROBES if (is_kretprobe_trampoline(frame->pc)) frame->pc = kretprobe_find_ret_addr(frame->tsk, diff --git a/arch/arm/probes/Makefile b/arch/arm/probes/Makefile index 10c083a22223..8b0ea5ace100 100644 --- a/arch/arm/probes/Makefile +++ b/arch/arm/probes/Makefile @@ -6,4 +6,3 @@ obj-$(CONFIG_KPROBES) += decode-thumb.o else obj-$(CONFIG_KPROBES) += decode-arm.o endif -obj-$(CONFIG_RETHOOK) += rethook.o diff --git a/arch/arm/probes/rethook.c b/arch/arm/probes/rethook.c deleted file mode 100644 index 1c1357a86365..000000000000 --- a/arch/arm/probes/rethook.c +++ /dev/null @@ -1,103 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arm implementation of rethook. Mostly copied from arch/arm/probes/kprobes/core.c - */ - -#include -#include - -/* Called from arch_rethook_trampoline */ -static __used unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs) -{ - return rethook_trampoline_handler(regs, regs->ARM_fp); -} -NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); - -/* - * When a rethook'ed function returns, it returns to arch_rethook_trampoline - * which calls rethook callback. We construct a struct pt_regs to - * give a view of registers r0-r11, sp, lr, and pc to the user - * return-handler. This is not a complete pt_regs structure, but that - * should be enough for stacktrace from the return handler with or - * without pt_regs. - */ -asm( - ".text\n" - ".global arch_rethook_trampoline\n" - ".type arch_rethook_trampoline, %function\n" - "arch_rethook_trampoline:\n" -#ifdef CONFIG_FRAME_POINTER - "ldr lr, =arch_rethook_trampoline \n\t" - /* this makes a framepointer on pt_regs. */ -#ifdef CONFIG_CC_IS_CLANG - "stmdb sp, {sp, lr, pc} \n\t" - "sub sp, sp, #12 \n\t" - /* In clang case, pt_regs->ip = lr. */ - "stmdb sp!, {r0 - r11, lr} \n\t" - /* fp points regs->r11 (fp) */ - "add fp, sp, #44 \n\t" -#else /* !CONFIG_CC_IS_CLANG */ - /* In gcc case, pt_regs->ip = fp. */ - "stmdb sp, {fp, sp, lr, pc} \n\t" - "sub sp, sp, #16 \n\t" - "stmdb sp!, {r0 - r11} \n\t" - /* fp points regs->r15 (pc) */ - "add fp, sp, #60 \n\t" -#endif /* CONFIG_CC_IS_CLANG */ -#else /* !CONFIG_FRAME_POINTER */ - "sub sp, sp, #16 \n\t" - "stmdb sp!, {r0 - r11} \n\t" -#endif /* CONFIG_FRAME_POINTER */ - "mov r0, sp \n\t" - "bl arch_rethook_trampoline_callback \n\t" - "mov lr, r0 \n\t" - "ldmia sp!, {r0 - r11} \n\t" - "add sp, sp, #16 \n\t" -#ifdef CONFIG_THUMB2_KERNEL - "bx lr \n\t" -#else - "mov pc, lr \n\t" -#endif - ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n" -); -NOKPROBE_SYMBOL(arch_rethook_trampoline); - -/* - * At the entry of function with mcount. The stack and registers are prepared - * for the mcount function as below. - * - * mov ip, sp - * push {fp, ip, lr, pc} - * sub fp, ip, #4 ; FP[0] = PC, FP[-4] = LR, and FP[-12] = call-site FP. - * push {lr} - * bl <__gnu_mcount_nc> ; call ftrace - * - * And when returning from the function, call-site FP, SP and PC are restored - * from stack as below; - * - * ldm sp, {fp, sp, pc} - * - * Thus, if the arch_rethook_prepare() is called from real function entry, - * it must change the LR and save FP in pt_regs. But if it is called via - * mcount context (ftrace), it must change the LR on stack, which is next - * to the PC (= FP[-4]), and save the FP value at FP[-12]. - */ -void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) -{ - unsigned long *ret_addr, *frame; - - if (mcount) { - ret_addr = (unsigned long *)(regs->ARM_fp - 4); - frame = (unsigned long *)(regs->ARM_fp - 12); - } else { - ret_addr = ®s->ARM_lr; - frame = ®s->ARM_fp; - } - - rh->ret_addr = *ret_addr; - rh->frame = *frame; - - /* Replace the return addr with trampoline addr. */ - *ret_addr = (unsigned long)arch_rethook_trampoline; -} -NOKPROBE_SYMBOL(arch_rethook_prepare); From 35df0155e68a1fb20646f34247f19131170693bd Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 21 Mar 2022 21:40:49 -0700 Subject: [PATCH 134/137] Revert "powerpc: Add rethook support" This reverts commit 02752bd99dc2daae05c12f7063bf0632e22b4c1c. Signed-off-by: Alexei Starovoitov --- arch/powerpc/Kconfig | 1 - arch/powerpc/kernel/Makefile | 1 - arch/powerpc/kernel/rethook.c | 72 ----------------------------------- 3 files changed, 74 deletions(-) delete mode 100644 arch/powerpc/kernel/rethook.c diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 5feaa241fb56..b779603978e1 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -229,7 +229,6 @@ config PPC select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP - select HAVE_RETHOOK if KPROBES select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE select HAVE_RSEQ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index feb24ea83ca6..4d7829399570 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -115,7 +115,6 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o -obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o diff --git a/arch/powerpc/kernel/rethook.c b/arch/powerpc/kernel/rethook.c deleted file mode 100644 index a8a128748efa..000000000000 --- a/arch/powerpc/kernel/rethook.c +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * PowerPC implementation of rethook. This depends on kprobes. - */ - -#include -#include - -/* - * Function return trampoline: - * - init_kprobes() establishes a probepoint here - * - When the probed function returns, this probe - * causes the handlers to fire - */ -asm(".global arch_rethook_trampoline\n" - ".type arch_rethook_trampoline, @function\n" - "arch_rethook_trampoline:\n" - "nop\n" - "blr\n" - ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n"); - -/* - * Called when the probe at kretprobe trampoline is hit - */ -static int trampoline_rethook_handler(struct kprobe *p, struct pt_regs *regs) -{ - unsigned long orig_ret_address; - - orig_ret_address = rethook_trampoline_handler(regs, 0); - /* - * We get here through one of two paths: - * 1. by taking a trap -> kprobe_handler() -> here - * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here - * - * When going back through (1), we need regs->nip to be setup properly - * as it is used to determine the return address from the trap. - * For (2), since nip is not honoured with optprobes, we instead setup - * the link register properly so that the subsequent 'blr' in - * __kretprobe_trampoline jumps back to the right instruction. - * - * For nip, we should set the address to the previous instruction since - * we end up emulating it in kprobe_handler(), which increments the nip - * again. - */ - regs_set_return_ip(regs, orig_ret_address - 4); - regs->link = orig_ret_address; - - return 0; -} -NOKPROBE_SYMBOL(trampoline_rethook_handler); - -void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) -{ - rh->ret_addr = regs->link; - rh->frame = 0; - - /* Replace the return addr with trampoline addr */ - regs->link = (unsigned long)arch_rethook_trampoline; -} -NOKPROBE_SYMBOL(arch_prepare_kretprobe); - -static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &arch_rethook_trampoline, - .pre_handler = trampoline_rethook_handler -}; - -static int init_arch_rethook(void) -{ - return register_kprobe(&trampoline_p); -} - -core_initcall(init_arch_rethook); From 0f8f8030038ae37c32a233186caab2c381396784 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 21 Mar 2022 21:40:51 -0700 Subject: [PATCH 135/137] Revert "arm64: rethook: Add arm64 rethook implementation" This reverts commit 83acdce6894908337ca82973149d9709d28204d7. Signed-off-by: Alexei Starovoitov --- arch/arm64/Kconfig | 1 - arch/arm64/include/asm/stacktrace.h | 2 +- arch/arm64/kernel/probes/Makefile | 1 - arch/arm64/kernel/probes/rethook.c | 25 ------ arch/arm64/kernel/probes/rethook_trampoline.S | 87 ------------------- arch/arm64/kernel/stacktrace.c | 7 +- 6 files changed, 2 insertions(+), 121 deletions(-) delete mode 100644 arch/arm64/kernel/probes/rethook.c delete mode 100644 arch/arm64/kernel/probes/rethook_trampoline.S diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 62cff9d0a5bb..09b885cc4db5 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -201,7 +201,6 @@ config ARM64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_KPROBES select HAVE_KRETPROBES - select HAVE_RETHOOK select HAVE_GENERIC_VDSO select IOMMU_DMA if IOMMU_SUPPORT select IRQ_DOMAIN diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index bf04107da97c..e77cdef9ca29 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -58,7 +58,7 @@ struct stackframe { DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); unsigned long prev_fp; enum stack_type prev_type; -#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) +#ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; #endif }; diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile index 24e689f44c32..8e4be92e25b1 100644 --- a/arch/arm64/kernel/probes/Makefile +++ b/arch/arm64/kernel/probes/Makefile @@ -4,4 +4,3 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \ simulate-insn.o obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \ simulate-insn.o -obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o diff --git a/arch/arm64/kernel/probes/rethook.c b/arch/arm64/kernel/probes/rethook.c deleted file mode 100644 index edc6b804ad6a..000000000000 --- a/arch/arm64/kernel/probes/rethook.c +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic return hook for arm64. - * Most of the code is copied from arch/arm64/kernel/probes/kprobes.c - */ - -#include -#include - -/* This is called from arch_rethook_trampoline() */ -unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs) -{ - return rethook_trampoline_handler(regs, regs->regs[29]); -} -NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); - -void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount) -{ - rhn->ret_addr = regs->regs[30]; - rhn->frame = regs->regs[29]; - - /* replace return addr (x30) with trampoline */ - regs->regs[30] = (u64)arch_rethook_trampoline; -} -NOKPROBE_SYMBOL(arch_rethook_prepare); diff --git a/arch/arm64/kernel/probes/rethook_trampoline.S b/arch/arm64/kernel/probes/rethook_trampoline.S deleted file mode 100644 index 610f520ee72b..000000000000 --- a/arch/arm64/kernel/probes/rethook_trampoline.S +++ /dev/null @@ -1,87 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * trampoline entry and return code for rethook. - * Copied from arch/arm64/kernel/probes/kprobes_trampoline.S - */ - -#include -#include -#include - - .text - - .macro save_all_base_regs - stp x0, x1, [sp, #S_X0] - stp x2, x3, [sp, #S_X2] - stp x4, x5, [sp, #S_X4] - stp x6, x7, [sp, #S_X6] - stp x8, x9, [sp, #S_X8] - stp x10, x11, [sp, #S_X10] - stp x12, x13, [sp, #S_X12] - stp x14, x15, [sp, #S_X14] - stp x16, x17, [sp, #S_X16] - stp x18, x19, [sp, #S_X18] - stp x20, x21, [sp, #S_X20] - stp x22, x23, [sp, #S_X22] - stp x24, x25, [sp, #S_X24] - stp x26, x27, [sp, #S_X26] - stp x28, x29, [sp, #S_X28] - add x0, sp, #PT_REGS_SIZE - stp lr, x0, [sp, #S_LR] - /* - * Construct a useful saved PSTATE - */ - mrs x0, nzcv - mrs x1, daif - orr x0, x0, x1 - mrs x1, CurrentEL - orr x0, x0, x1 - mrs x1, SPSel - orr x0, x0, x1 - stp xzr, x0, [sp, #S_PC] - .endm - - .macro restore_all_base_regs - ldr x0, [sp, #S_PSTATE] - and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT) - msr nzcv, x0 - ldp x0, x1, [sp, #S_X0] - ldp x2, x3, [sp, #S_X2] - ldp x4, x5, [sp, #S_X4] - ldp x6, x7, [sp, #S_X6] - ldp x8, x9, [sp, #S_X8] - ldp x10, x11, [sp, #S_X10] - ldp x12, x13, [sp, #S_X12] - ldp x14, x15, [sp, #S_X14] - ldp x16, x17, [sp, #S_X16] - ldp x18, x19, [sp, #S_X18] - ldp x20, x21, [sp, #S_X20] - ldp x22, x23, [sp, #S_X22] - ldp x24, x25, [sp, #S_X24] - ldp x26, x27, [sp, #S_X26] - ldp x28, x29, [sp, #S_X28] - .endm - -SYM_CODE_START(arch_rethook_trampoline) - sub sp, sp, #PT_REGS_SIZE - - save_all_base_regs - - /* Setup a frame pointer. */ - add x29, sp, #S_FP - - mov x0, sp - bl arch_rethook_trampoline_callback - /* - * Replace trampoline address in lr with actual orig_ret_addr return - * address. - */ - mov lr, x0 - - /* The frame pointer (x29) is restored with other registers. */ - restore_all_base_regs - - add sp, sp, #PT_REGS_SIZE - ret - -SYM_CODE_END(arch_rethook_trampoline) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index efb5b85b57c2..e4103e085681 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -39,7 +38,7 @@ static notrace void start_backtrace(struct stackframe *frame, unsigned long fp, { frame->fp = fp; frame->pc = pc; -#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) +#ifdef CONFIG_KRETPROBES frame->kr_cur = NULL; #endif @@ -139,10 +138,6 @@ static int notrace unwind_frame(struct task_struct *tsk, if (is_kretprobe_trampoline(frame->pc)) frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur); #endif -#ifdef CONFIG_RETHOOK - if (is_rethook_trampoline(frame->pc)) - frame->pc = rethook_find_ret_addr(tsk, frame->fp, &frame->kr_cur); -#endif return 0; } From 4e8ca13440b4b84873da44871f8824a12381d16b Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 21 Mar 2022 21:40:53 -0700 Subject: [PATCH 136/137] Revert "rethook: x86: Add rethook x86 implementation" This reverts commit 75caf33eda242e2f34f61e475d666359749ae5ff. Signed-off-by: Alexei Starovoitov --- arch/x86/Kconfig | 1 - arch/x86/include/asm/unwind.h | 8 +-- arch/x86/kernel/Makefile | 1 - arch/x86/kernel/kprobes/common.h | 1 - arch/x86/kernel/rethook.c | 119 ------------------------------- 5 files changed, 1 insertion(+), 129 deletions(-) delete mode 100644 arch/x86/kernel/rethook.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1270b65a5546..9b356da6f46b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -221,7 +221,6 @@ config X86 select HAVE_KPROBES_ON_FTRACE select HAVE_FUNCTION_ERROR_INJECTION select HAVE_KRETPROBES - select HAVE_RETHOOK select HAVE_KVM select HAVE_LIVEPATCH if X86_64 select HAVE_MIXED_BREAKPOINTS_REGS diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 192df5b2094d..2a1f8734416d 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include @@ -17,7 +16,7 @@ struct unwind_state { unsigned long stack_mask; struct task_struct *task; int graph_idx; -#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK) +#ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; #endif bool error; @@ -108,11 +107,6 @@ static inline unsigned long unwind_recover_kretprobe(struct unwind_state *state, unsigned long addr, unsigned long *addr_p) { -#ifdef CONFIG_RETHOOK - if (is_rethook_trampoline(addr)) - return rethook_find_ret_addr(state->task, (unsigned long)addr_p, - &state->kr_cur); -#endif #ifdef CONFIG_KRETPROBES return is_kretprobe_trampoline(addr) ? kretprobe_find_ret_addr(state->task, addr_p, &state->kr_cur) : diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 792a893a5cc5..6aef9ee28a39 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -106,7 +106,6 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_X86_TSC) += trace_clock.o obj-$(CONFIG_TRACING) += trace.o -obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index c993521d4933..7d3a2e2daf01 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -6,7 +6,6 @@ #include #include -#include #ifdef CONFIG_X86_64 diff --git a/arch/x86/kernel/rethook.c b/arch/x86/kernel/rethook.c deleted file mode 100644 index f0f2f0608282..000000000000 --- a/arch/x86/kernel/rethook.c +++ /dev/null @@ -1,119 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * x86 implementation of rethook. Mostly copied from arch/x86/kernel/kprobes/core.c. - */ -#include -#include -#include - -#include "kprobes/common.h" - -__visible void arch_rethook_trampoline_callback(struct pt_regs *regs); - -/* - * When a target function returns, this code saves registers and calls - * arch_rethook_trampoline_callback(), which calls the rethook handler. - */ -asm( - ".text\n" - ".global arch_rethook_trampoline\n" - ".type arch_rethook_trampoline, @function\n" - "arch_rethook_trampoline:\n" -#ifdef CONFIG_X86_64 - /* Push a fake return address to tell the unwinder it's a kretprobe. */ - " pushq $arch_rethook_trampoline\n" - UNWIND_HINT_FUNC - /* Save the 'sp - 8', this will be fixed later. */ - " pushq %rsp\n" - " pushfq\n" - SAVE_REGS_STRING - " movq %rsp, %rdi\n" - " call arch_rethook_trampoline_callback\n" - RESTORE_REGS_STRING - /* In the callback function, 'regs->flags' is copied to 'regs->sp'. */ - " addq $8, %rsp\n" - " popfq\n" -#else - /* Push a fake return address to tell the unwinder it's a kretprobe. */ - " pushl $arch_rethook_trampoline\n" - UNWIND_HINT_FUNC - /* Save the 'sp - 4', this will be fixed later. */ - " pushl %esp\n" - " pushfl\n" - SAVE_REGS_STRING - " movl %esp, %eax\n" - " call arch_rethook_trampoline_callback\n" - RESTORE_REGS_STRING - /* In the callback function, 'regs->flags' is copied to 'regs->sp'. */ - " addl $4, %esp\n" - " popfl\n" -#endif - " ret\n" - ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n" -); -NOKPROBE_SYMBOL(arch_rethook_trampoline); - -/* - * Called from arch_rethook_trampoline - */ -__used __visible void arch_rethook_trampoline_callback(struct pt_regs *regs) -{ - unsigned long *frame_pointer; - - /* fixup registers */ - regs->cs = __KERNEL_CS; -#ifdef CONFIG_X86_32 - regs->gs = 0; -#endif - regs->ip = (unsigned long)&arch_rethook_trampoline; - regs->orig_ax = ~0UL; - regs->sp += sizeof(long); - frame_pointer = ®s->sp + 1; - - /* - * The return address at 'frame_pointer' is recovered by the - * arch_rethook_fixup_return() which called from this - * rethook_trampoline_handler(). - */ - rethook_trampoline_handler(regs, (unsigned long)frame_pointer); - - /* - * Copy FLAGS to 'pt_regs::sp' so that arch_rethook_trapmoline() - * can do RET right after POPF. - */ - regs->sp = regs->flags; -} -NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); - -/* - * arch_rethook_trampoline() skips updating frame pointer. The frame pointer - * saved in arch_rethook_trampoline_callback() points to the real caller - * function's frame pointer. Thus the arch_rethook_trampoline() doesn't have - * a standard stack frame with CONFIG_FRAME_POINTER=y. - * Let's mark it non-standard function. Anyway, FP unwinder can correctly - * unwind without the hint. - */ -STACK_FRAME_NON_STANDARD_FP(arch_rethook_trampoline); - -/* This is called from rethook_trampoline_handler(). */ -void arch_rethook_fixup_return(struct pt_regs *regs, - unsigned long correct_ret_addr) -{ - unsigned long *frame_pointer = ®s->sp + 1; - - /* Replace fake return address with real one. */ - *frame_pointer = correct_ret_addr; -} -NOKPROBE_SYMBOL(arch_rethook_fixup_return); - -void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount) -{ - unsigned long *stack = (unsigned long *)regs->sp; - - rh->ret_addr = stack[0]; - rh->frame = regs->sp; - - /* Replace the return addr with trampoline addr */ - stack[0] = (unsigned long) arch_rethook_trampoline; -} -NOKPROBE_SYMBOL(arch_rethook_prepare); From 7f0059b58f0257d895fafd2f2e3afe3bbdf21e64 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 21 Mar 2022 21:54:19 -0700 Subject: [PATCH 137/137] selftests/bpf: Fix kprobe_multi test. When compiler emits endbr insn the function address could be different than what bpf_get_func_ip() reports. This is a short term workaround. bpf_get_func_ip() will be fixed later. Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/kprobe_multi.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c index af27d2c6fce8..600be50800f8 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c @@ -36,13 +36,15 @@ __u64 kretprobe_test6_result = 0; __u64 kretprobe_test7_result = 0; __u64 kretprobe_test8_result = 0; +extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak; + static void kprobe_multi_check(void *ctx, bool is_return) { if (bpf_get_current_pid_tgid() >> 32 != pid) return; __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0; - __u64 addr = bpf_get_func_ip(ctx); + __u64 addr = bpf_get_func_ip(ctx) - (CONFIG_X86_KERNEL_IBT ? 4 : 0); #define SET(__var, __addr, __cookie) ({ \ if (((const void *) addr == __addr) && \