Merge branch 'Attach a cookie to a tracing program.'
Kui-Feng Lee says: ==================== Allow users to attach a 64-bits cookie to a bpf_link of fentry, fexit, or fmod_ret. This patchset includes several major changes. - Define struct bpf_tramp_links to replace bpf_tramp_prog. struct bpf_tramp_links collects bpf_links of a trampoline - Generate a trampoline to call bpf_progs of given bpf_links. - Trampolines always set/reset bpf_run_ctx before/after calling/leaving a tracing program. - Attach a cookie to a bpf_link of fentry/fexit/fmod_ret/lsm. The value will be available when running the associated bpf_prog. Th major differences from v6: - bpf_link_create() can create links of BPF_LSM_MAC attach type. - Add a test for lsm. - Add function proto of bpf_get_attach_cookie() for lsm. - Check BPF_LSM_MAC in bpf_prog_has_trampoline(). - Adapt to the changes of LINK_CREATE made by Andrii. The major differences from v7: - Change stack_size instead of pushing/popping run_ctx. - Move cookie to bpf_tramp_link from bpf_tracing_link.. v1: https://lore.kernel.org/all/20220126214809.3868787-1-kuifeng@fb.com/ v2: https://lore.kernel.org/bpf/20220316004231.1103318-1-kuifeng@fb.com/ v3: https://lore.kernel.org/bpf/20220407192552.2343076-1-kuifeng@fb.com/ v4: https://lore.kernel.org/bpf/20220411173429.4139609-1-kuifeng@fb.com/ v5: https://lore.kernel.org/bpf/20220412165555.4146407-1-kuifeng@fb.com/ v6: https://lore.kernel.org/bpf/20220416042940.656344-1-kuifeng@fb.com/ v7: https://lore.kernel.org/bpf/20220508032117.2783209-1-kuifeng@fb.com/ ==================== Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
This commit is contained in:
commit
93dafa92e1
@ -1762,13 +1762,32 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
|
||||
}
|
||||
|
||||
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_prog *p, int stack_size, bool save_ret)
|
||||
struct bpf_tramp_link *l, int stack_size,
|
||||
int run_ctx_off, bool save_ret)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
u8 *jmp_insn;
|
||||
int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
|
||||
struct bpf_prog *p = l->link.prog;
|
||||
u64 cookie = l->cookie;
|
||||
|
||||
/* mov rdi, cookie */
|
||||
emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
|
||||
|
||||
/* Prepare struct bpf_tramp_run_ctx.
|
||||
*
|
||||
* bpf_tramp_run_ctx is already preserved by
|
||||
* arch_prepare_bpf_trampoline().
|
||||
*
|
||||
* mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
|
||||
*/
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
|
||||
|
||||
/* arg1: mov rdi, progs[i] */
|
||||
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
|
||||
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
|
||||
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
|
||||
|
||||
if (emit_call(&prog,
|
||||
p->aux->sleepable ? __bpf_prog_enter_sleepable :
|
||||
__bpf_prog_enter, prog))
|
||||
@ -1814,6 +1833,8 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
||||
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
|
||||
/* arg2: mov rsi, rbx <- start time in nsec */
|
||||
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
|
||||
/* arg3: lea rdx, [rbp - run_ctx_off] */
|
||||
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
|
||||
if (emit_call(&prog,
|
||||
p->aux->sleepable ? __bpf_prog_exit_sleepable :
|
||||
__bpf_prog_exit, prog))
|
||||
@ -1850,15 +1871,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
|
||||
}
|
||||
|
||||
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_tramp_progs *tp, int stack_size,
|
||||
bool save_ret)
|
||||
struct bpf_tramp_links *tl, int stack_size,
|
||||
int run_ctx_off, bool save_ret)
|
||||
{
|
||||
int i;
|
||||
u8 *prog = *pprog;
|
||||
|
||||
for (i = 0; i < tp->nr_progs; i++) {
|
||||
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
|
||||
save_ret))
|
||||
for (i = 0; i < tl->nr_links; i++) {
|
||||
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
|
||||
run_ctx_off, save_ret))
|
||||
return -EINVAL;
|
||||
}
|
||||
*pprog = prog;
|
||||
@ -1866,8 +1887,8 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
|
||||
}
|
||||
|
||||
static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_tramp_progs *tp, int stack_size,
|
||||
u8 **branches)
|
||||
struct bpf_tramp_links *tl, int stack_size,
|
||||
int run_ctx_off, u8 **branches)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
int i;
|
||||
@ -1877,8 +1898,8 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
|
||||
*/
|
||||
emit_mov_imm32(&prog, false, BPF_REG_0, 0);
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
||||
for (i = 0; i < tp->nr_progs; i++) {
|
||||
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
|
||||
for (i = 0; i < tl->nr_links; i++) {
|
||||
if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
|
||||
return -EINVAL;
|
||||
|
||||
/* mod_ret prog stored return value into [rbp - 8]. Emit:
|
||||
@ -1980,14 +2001,14 @@ static bool is_valid_bpf_tramp_flags(unsigned int flags)
|
||||
*/
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_progs *tprogs,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *orig_call)
|
||||
{
|
||||
int ret, i, nr_args = m->nr_args;
|
||||
int regs_off, ip_off, args_off, stack_size = nr_args * 8;
|
||||
struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
|
||||
struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
|
||||
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
|
||||
int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
|
||||
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
|
||||
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
|
||||
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
|
||||
u8 **branches = NULL;
|
||||
u8 *prog;
|
||||
bool save_ret;
|
||||
@ -2014,6 +2035,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
* RBP - args_off [ args count ] always
|
||||
*
|
||||
* RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
|
||||
*
|
||||
* RBP - run_ctx_off [ bpf_tramp_run_ctx ]
|
||||
*/
|
||||
|
||||
/* room for return value of orig_call or fentry prog */
|
||||
@ -2032,6 +2055,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
|
||||
ip_off = stack_size;
|
||||
|
||||
stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
|
||||
run_ctx_off = stack_size;
|
||||
|
||||
if (flags & BPF_TRAMP_F_SKIP_FRAME) {
|
||||
/* skip patched call instruction and point orig_call to actual
|
||||
* body of the kernel function.
|
||||
@ -2078,19 +2104,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
}
|
||||
}
|
||||
|
||||
if (fentry->nr_progs)
|
||||
if (invoke_bpf(m, &prog, fentry, regs_off,
|
||||
if (fentry->nr_links)
|
||||
if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
|
||||
flags & BPF_TRAMP_F_RET_FENTRY_RET))
|
||||
return -EINVAL;
|
||||
|
||||
if (fmod_ret->nr_progs) {
|
||||
branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
|
||||
if (fmod_ret->nr_links) {
|
||||
branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
|
||||
GFP_KERNEL);
|
||||
if (!branches)
|
||||
return -ENOMEM;
|
||||
|
||||
if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
|
||||
branches)) {
|
||||
run_ctx_off, branches)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
@ -2111,7 +2137,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
prog += X86_PATCH_SIZE;
|
||||
}
|
||||
|
||||
if (fmod_ret->nr_progs) {
|
||||
if (fmod_ret->nr_links) {
|
||||
/* From Intel 64 and IA-32 Architectures Optimization
|
||||
* Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
|
||||
* Coding Rule 11: All branch targets should be 16-byte
|
||||
@ -2121,13 +2147,13 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
/* Update the branches saved in invoke_bpf_mod_ret with the
|
||||
* aligned address of do_fexit.
|
||||
*/
|
||||
for (i = 0; i < fmod_ret->nr_progs; i++)
|
||||
for (i = 0; i < fmod_ret->nr_links; i++)
|
||||
emit_cond_near_jump(&branches[i], prog, branches[i],
|
||||
X86_JNE);
|
||||
}
|
||||
|
||||
if (fexit->nr_progs)
|
||||
if (invoke_bpf(m, &prog, fexit, regs_off, false)) {
|
||||
if (fexit->nr_links)
|
||||
if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -723,13 +723,15 @@ struct btf_func_model {
|
||||
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
|
||||
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
|
||||
*/
|
||||
#define BPF_MAX_TRAMP_PROGS 38
|
||||
#define BPF_MAX_TRAMP_LINKS 38
|
||||
|
||||
struct bpf_tramp_progs {
|
||||
struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
|
||||
int nr_progs;
|
||||
struct bpf_tramp_links {
|
||||
struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
|
||||
int nr_links;
|
||||
};
|
||||
|
||||
struct bpf_tramp_run_ctx;
|
||||
|
||||
/* Different use cases for BPF trampoline:
|
||||
* 1. replace nop at the function entry (kprobe equivalent)
|
||||
* flags = BPF_TRAMP_F_RESTORE_REGS
|
||||
@ -753,13 +755,14 @@ struct bpf_tramp_progs {
|
||||
struct bpf_tramp_image;
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_progs *tprogs,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *orig_call);
|
||||
/* these two functions are called from generated trampoline */
|
||||
u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
|
||||
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
|
||||
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
|
||||
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
|
||||
u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
|
||||
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
|
||||
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
|
||||
|
||||
@ -852,9 +855,10 @@ static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
|
||||
{
|
||||
return bpf_func(ctx, insnsi);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
|
||||
int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
|
||||
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
|
||||
int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
|
||||
struct bpf_trampoline *bpf_trampoline_get(u64 key,
|
||||
struct bpf_attach_target_info *tgt_info);
|
||||
void bpf_trampoline_put(struct bpf_trampoline *tr);
|
||||
@ -905,12 +909,12 @@ int bpf_jit_charge_modmem(u32 size);
|
||||
void bpf_jit_uncharge_modmem(u32 size);
|
||||
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
|
||||
#else
|
||||
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
|
||||
static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
|
||||
struct bpf_trampoline *tr)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
|
||||
static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
|
||||
struct bpf_trampoline *tr)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
@ -1009,7 +1013,6 @@ struct bpf_prog_aux {
|
||||
bool tail_call_reachable;
|
||||
bool xdp_has_frags;
|
||||
bool use_bpf_prog_pack;
|
||||
struct hlist_node tramp_hlist;
|
||||
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
|
||||
const struct btf_type *attach_func_proto;
|
||||
/* function name for valid attach_btf_id */
|
||||
@ -1096,6 +1099,19 @@ struct bpf_link_ops {
|
||||
struct bpf_link_info *info);
|
||||
};
|
||||
|
||||
struct bpf_tramp_link {
|
||||
struct bpf_link link;
|
||||
struct hlist_node tramp_hlist;
|
||||
u64 cookie;
|
||||
};
|
||||
|
||||
struct bpf_tracing_link {
|
||||
struct bpf_tramp_link link;
|
||||
enum bpf_attach_type attach_type;
|
||||
struct bpf_trampoline *trampoline;
|
||||
struct bpf_prog *tgt_prog;
|
||||
};
|
||||
|
||||
struct bpf_link_primer {
|
||||
struct bpf_link *link;
|
||||
struct file *file;
|
||||
@ -1133,8 +1149,8 @@ bool bpf_struct_ops_get(const void *kdata);
|
||||
void bpf_struct_ops_put(const void *kdata);
|
||||
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
|
||||
void *value);
|
||||
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
|
||||
struct bpf_prog *prog,
|
||||
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
|
||||
struct bpf_tramp_link *link,
|
||||
const struct btf_func_model *model,
|
||||
void *image, void *image_end);
|
||||
static inline bool bpf_try_module_get(const void *data, struct module *owner)
|
||||
@ -1339,6 +1355,12 @@ struct bpf_trace_run_ctx {
|
||||
u64 bpf_cookie;
|
||||
};
|
||||
|
||||
struct bpf_tramp_run_ctx {
|
||||
struct bpf_run_ctx run_ctx;
|
||||
u64 bpf_cookie;
|
||||
struct bpf_run_ctx *saved_run_ctx;
|
||||
};
|
||||
|
||||
static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
|
||||
{
|
||||
struct bpf_run_ctx *old_ctx = NULL;
|
||||
|
@ -141,3 +141,4 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
|
||||
#endif
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
|
||||
|
@ -1013,6 +1013,7 @@ enum bpf_link_type {
|
||||
BPF_LINK_TYPE_XDP = 6,
|
||||
BPF_LINK_TYPE_PERF_EVENT = 7,
|
||||
BPF_LINK_TYPE_KPROBE_MULTI = 8,
|
||||
BPF_LINK_TYPE_STRUCT_OPS = 9,
|
||||
|
||||
MAX_BPF_LINK_TYPE,
|
||||
};
|
||||
@ -1489,6 +1490,15 @@ union bpf_attr {
|
||||
__aligned_u64 addrs;
|
||||
__aligned_u64 cookies;
|
||||
} kprobe_multi;
|
||||
struct {
|
||||
/* this is overlaid with the target_btf_id above. */
|
||||
__u32 target_btf_id;
|
||||
/* black box user-provided value passed through
|
||||
* to BPF program at the execution time and
|
||||
* accessible through bpf_get_attach_cookie() BPF helper
|
||||
*/
|
||||
__u64 cookie;
|
||||
} tracing;
|
||||
};
|
||||
} link_create;
|
||||
|
||||
|
@ -117,6 +117,21 @@ static const struct bpf_func_proto bpf_ima_file_hash_proto = {
|
||||
.allowed = bpf_ima_inode_hash_allowed,
|
||||
};
|
||||
|
||||
BPF_CALL_1(bpf_get_attach_cookie, void *, ctx)
|
||||
{
|
||||
struct bpf_trace_run_ctx *run_ctx;
|
||||
|
||||
run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
|
||||
return run_ctx->bpf_cookie;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_attach_cookie_proto = {
|
||||
.func = bpf_get_attach_cookie,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
@ -141,6 +156,8 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return prog->aux->sleepable ? &bpf_ima_inode_hash_proto : NULL;
|
||||
case BPF_FUNC_ima_file_hash:
|
||||
return prog->aux->sleepable ? &bpf_ima_file_hash_proto : NULL;
|
||||
case BPF_FUNC_get_attach_cookie:
|
||||
return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto : NULL;
|
||||
default:
|
||||
return tracing_prog_func_proto(func_id, prog);
|
||||
}
|
||||
|
@ -33,15 +33,15 @@ struct bpf_struct_ops_map {
|
||||
const struct bpf_struct_ops *st_ops;
|
||||
/* protect map_update */
|
||||
struct mutex lock;
|
||||
/* progs has all the bpf_prog that is populated
|
||||
/* link has all the bpf_links that is populated
|
||||
* to the func ptr of the kernel's struct
|
||||
* (in kvalue.data).
|
||||
*/
|
||||
struct bpf_prog **progs;
|
||||
struct bpf_link **links;
|
||||
/* image is a page that has all the trampolines
|
||||
* that stores the func args before calling the bpf_prog.
|
||||
* A PAGE_SIZE "image" is enough to store all trampoline for
|
||||
* "progs[]".
|
||||
* "links[]".
|
||||
*/
|
||||
void *image;
|
||||
/* uvalue->data stores the kernel struct
|
||||
@ -283,9 +283,9 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < btf_type_vlen(t); i++) {
|
||||
if (st_map->progs[i]) {
|
||||
bpf_prog_put(st_map->progs[i]);
|
||||
st_map->progs[i] = NULL;
|
||||
if (st_map->links[i]) {
|
||||
bpf_link_put(st_map->links[i]);
|
||||
st_map->links[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -316,18 +316,34 @@ static int check_zero_holes(const struct btf_type *t, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
|
||||
struct bpf_prog *prog,
|
||||
static void bpf_struct_ops_link_release(struct bpf_link *link)
|
||||
{
|
||||
}
|
||||
|
||||
static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
|
||||
{
|
||||
struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
|
||||
|
||||
kfree(tlink);
|
||||
}
|
||||
|
||||
const struct bpf_link_ops bpf_struct_ops_link_lops = {
|
||||
.release = bpf_struct_ops_link_release,
|
||||
.dealloc = bpf_struct_ops_link_dealloc,
|
||||
};
|
||||
|
||||
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
|
||||
struct bpf_tramp_link *link,
|
||||
const struct btf_func_model *model,
|
||||
void *image, void *image_end)
|
||||
{
|
||||
u32 flags;
|
||||
|
||||
tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
|
||||
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
|
||||
tlinks[BPF_TRAMP_FENTRY].links[0] = link;
|
||||
tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
|
||||
flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
|
||||
return arch_prepare_bpf_trampoline(NULL, image, image_end,
|
||||
model, flags, tprogs, NULL);
|
||||
model, flags, tlinks, NULL);
|
||||
}
|
||||
|
||||
static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
@ -338,7 +354,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
struct bpf_struct_ops_value *uvalue, *kvalue;
|
||||
const struct btf_member *member;
|
||||
const struct btf_type *t = st_ops->type;
|
||||
struct bpf_tramp_progs *tprogs = NULL;
|
||||
struct bpf_tramp_links *tlinks = NULL;
|
||||
void *udata, *kdata;
|
||||
int prog_fd, err = 0;
|
||||
void *image, *image_end;
|
||||
@ -362,8 +378,8 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
if (uvalue->state || refcount_read(&uvalue->refcnt))
|
||||
return -EINVAL;
|
||||
|
||||
tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
|
||||
if (!tprogs)
|
||||
tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
|
||||
if (!tlinks)
|
||||
return -ENOMEM;
|
||||
|
||||
uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
|
||||
@ -386,6 +402,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
for_each_member(i, t, member) {
|
||||
const struct btf_type *mtype, *ptype;
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_tramp_link *link;
|
||||
u32 moff;
|
||||
|
||||
moff = __btf_member_bit_offset(t, member) / 8;
|
||||
@ -439,16 +456,26 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
err = PTR_ERR(prog);
|
||||
goto reset_unlock;
|
||||
}
|
||||
st_map->progs[i] = prog;
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
|
||||
prog->aux->attach_btf_id != st_ops->type_id ||
|
||||
prog->expected_attach_type != i) {
|
||||
bpf_prog_put(prog);
|
||||
err = -EINVAL;
|
||||
goto reset_unlock;
|
||||
}
|
||||
|
||||
err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
|
||||
link = kzalloc(sizeof(*link), GFP_USER);
|
||||
if (!link) {
|
||||
bpf_prog_put(prog);
|
||||
err = -ENOMEM;
|
||||
goto reset_unlock;
|
||||
}
|
||||
bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
|
||||
&bpf_struct_ops_link_lops, prog);
|
||||
st_map->links[i] = &link->link;
|
||||
|
||||
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
|
||||
&st_ops->func_models[i],
|
||||
image, image_end);
|
||||
if (err < 0)
|
||||
@ -491,7 +518,7 @@ reset_unlock:
|
||||
memset(uvalue, 0, map->value_size);
|
||||
memset(kvalue, 0, map->value_size);
|
||||
unlock:
|
||||
kfree(tprogs);
|
||||
kfree(tlinks);
|
||||
mutex_unlock(&st_map->lock);
|
||||
return err;
|
||||
}
|
||||
@ -546,9 +573,9 @@ static void bpf_struct_ops_map_free(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
|
||||
|
||||
if (st_map->progs)
|
||||
if (st_map->links)
|
||||
bpf_struct_ops_map_put_progs(st_map);
|
||||
bpf_map_area_free(st_map->progs);
|
||||
bpf_map_area_free(st_map->links);
|
||||
bpf_jit_free_exec(st_map->image);
|
||||
bpf_map_area_free(st_map->uvalue);
|
||||
bpf_map_area_free(st_map);
|
||||
@ -597,11 +624,11 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
|
||||
map = &st_map->map;
|
||||
|
||||
st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
|
||||
st_map->progs =
|
||||
bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_prog *),
|
||||
st_map->links =
|
||||
bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
|
||||
NUMA_NO_NODE);
|
||||
st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
if (!st_map->uvalue || !st_map->progs || !st_map->image) {
|
||||
if (!st_map->uvalue || !st_map->links || !st_map->image) {
|
||||
bpf_struct_ops_map_free(map);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
@ -2864,19 +2864,12 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd)
|
||||
}
|
||||
EXPORT_SYMBOL(bpf_link_get_from_fd);
|
||||
|
||||
struct bpf_tracing_link {
|
||||
struct bpf_link link;
|
||||
enum bpf_attach_type attach_type;
|
||||
struct bpf_trampoline *trampoline;
|
||||
struct bpf_prog *tgt_prog;
|
||||
};
|
||||
|
||||
static void bpf_tracing_link_release(struct bpf_link *link)
|
||||
{
|
||||
struct bpf_tracing_link *tr_link =
|
||||
container_of(link, struct bpf_tracing_link, link);
|
||||
container_of(link, struct bpf_tracing_link, link.link);
|
||||
|
||||
WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
|
||||
WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
|
||||
tr_link->trampoline));
|
||||
|
||||
bpf_trampoline_put(tr_link->trampoline);
|
||||
@ -2889,7 +2882,7 @@ static void bpf_tracing_link_release(struct bpf_link *link)
|
||||
static void bpf_tracing_link_dealloc(struct bpf_link *link)
|
||||
{
|
||||
struct bpf_tracing_link *tr_link =
|
||||
container_of(link, struct bpf_tracing_link, link);
|
||||
container_of(link, struct bpf_tracing_link, link.link);
|
||||
|
||||
kfree(tr_link);
|
||||
}
|
||||
@ -2898,7 +2891,7 @@ static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
|
||||
struct seq_file *seq)
|
||||
{
|
||||
struct bpf_tracing_link *tr_link =
|
||||
container_of(link, struct bpf_tracing_link, link);
|
||||
container_of(link, struct bpf_tracing_link, link.link);
|
||||
|
||||
seq_printf(seq,
|
||||
"attach_type:\t%d\n",
|
||||
@ -2909,7 +2902,7 @@ static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
|
||||
struct bpf_link_info *info)
|
||||
{
|
||||
struct bpf_tracing_link *tr_link =
|
||||
container_of(link, struct bpf_tracing_link, link);
|
||||
container_of(link, struct bpf_tracing_link, link.link);
|
||||
|
||||
info->tracing.attach_type = tr_link->attach_type;
|
||||
bpf_trampoline_unpack_key(tr_link->trampoline->key,
|
||||
@ -2928,7 +2921,8 @@ static const struct bpf_link_ops bpf_tracing_link_lops = {
|
||||
|
||||
static int bpf_tracing_prog_attach(struct bpf_prog *prog,
|
||||
int tgt_prog_fd,
|
||||
u32 btf_id)
|
||||
u32 btf_id,
|
||||
u64 bpf_cookie)
|
||||
{
|
||||
struct bpf_link_primer link_primer;
|
||||
struct bpf_prog *tgt_prog = NULL;
|
||||
@ -2990,9 +2984,10 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
|
||||
err = -ENOMEM;
|
||||
goto out_put_prog;
|
||||
}
|
||||
bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
|
||||
bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
|
||||
&bpf_tracing_link_lops, prog);
|
||||
link->attach_type = prog->expected_attach_type;
|
||||
link->link.cookie = bpf_cookie;
|
||||
|
||||
mutex_lock(&prog->aux->dst_mutex);
|
||||
|
||||
@ -3060,11 +3055,11 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
|
||||
tgt_prog = prog->aux->dst_prog;
|
||||
}
|
||||
|
||||
err = bpf_link_prime(&link->link, &link_primer);
|
||||
err = bpf_link_prime(&link->link.link, &link_primer);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
err = bpf_trampoline_link_prog(prog, tr);
|
||||
err = bpf_trampoline_link_prog(&link->link, tr);
|
||||
if (err) {
|
||||
bpf_link_cleanup(&link_primer);
|
||||
link = NULL;
|
||||
@ -3278,7 +3273,7 @@ static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
|
||||
tp_name = prog->aux->attach_func_name;
|
||||
break;
|
||||
}
|
||||
return bpf_tracing_prog_attach(prog, 0, 0);
|
||||
return bpf_tracing_prog_attach(prog, 0, 0, 0);
|
||||
case BPF_PROG_TYPE_RAW_TRACEPOINT:
|
||||
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
|
||||
if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
|
||||
@ -4531,7 +4526,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
ret = bpf_tracing_prog_attach(prog,
|
||||
attr->link_create.target_fd,
|
||||
attr->link_create.target_btf_id);
|
||||
attr->link_create.target_btf_id,
|
||||
attr->link_create.tracing.cookie);
|
||||
break;
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
@ -4546,7 +4542,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
|
||||
else
|
||||
ret = bpf_tracing_prog_attach(prog,
|
||||
attr->link_create.target_fd,
|
||||
attr->link_create.target_btf_id);
|
||||
attr->link_create.target_btf_id,
|
||||
attr->link_create.tracing.cookie);
|
||||
break;
|
||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||
case BPF_PROG_TYPE_SK_LOOKUP:
|
||||
@ -5027,6 +5024,7 @@ static bool syscall_prog_is_valid_access(int off, int size,
|
||||
BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
|
||||
{
|
||||
struct bpf_prog * __maybe_unused prog;
|
||||
struct bpf_tramp_run_ctx __maybe_unused run_ctx;
|
||||
|
||||
switch (cmd) {
|
||||
case BPF_MAP_CREATE:
|
||||
@ -5054,13 +5052,15 @@ BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!__bpf_prog_enter_sleepable(prog)) {
|
||||
run_ctx.bpf_cookie = 0;
|
||||
run_ctx.saved_run_ctx = NULL;
|
||||
if (!__bpf_prog_enter_sleepable(prog, &run_ctx)) {
|
||||
/* recursion detected */
|
||||
bpf_prog_put(prog);
|
||||
return -EBUSY;
|
||||
}
|
||||
attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
|
||||
__bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */);
|
||||
__bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx);
|
||||
bpf_prog_put(prog);
|
||||
return 0;
|
||||
#endif
|
||||
|
@ -30,9 +30,12 @@ static DEFINE_MUTEX(trampoline_mutex);
|
||||
bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
|
||||
{
|
||||
enum bpf_attach_type eatype = prog->expected_attach_type;
|
||||
enum bpf_prog_type ptype = prog->type;
|
||||
|
||||
return eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
|
||||
eatype == BPF_MODIFY_RETURN;
|
||||
return (ptype == BPF_PROG_TYPE_TRACING &&
|
||||
(eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
|
||||
eatype == BPF_MODIFY_RETURN)) ||
|
||||
(ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
|
||||
}
|
||||
|
||||
void *bpf_jit_alloc_exec_page(void)
|
||||
@ -168,30 +171,30 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bpf_tramp_progs *
|
||||
static struct bpf_tramp_links *
|
||||
bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
|
||||
{
|
||||
const struct bpf_prog_aux *aux;
|
||||
struct bpf_tramp_progs *tprogs;
|
||||
struct bpf_prog **progs;
|
||||
struct bpf_tramp_link *link;
|
||||
struct bpf_tramp_links *tlinks;
|
||||
struct bpf_tramp_link **links;
|
||||
int kind;
|
||||
|
||||
*total = 0;
|
||||
tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
|
||||
if (!tprogs)
|
||||
tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
|
||||
if (!tlinks)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
|
||||
tprogs[kind].nr_progs = tr->progs_cnt[kind];
|
||||
tlinks[kind].nr_links = tr->progs_cnt[kind];
|
||||
*total += tr->progs_cnt[kind];
|
||||
progs = tprogs[kind].progs;
|
||||
links = tlinks[kind].links;
|
||||
|
||||
hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) {
|
||||
*ip_arg |= aux->prog->call_get_func_ip;
|
||||
*progs++ = aux->prog;
|
||||
hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
|
||||
*ip_arg |= link->link.prog->call_get_func_ip;
|
||||
*links++ = link;
|
||||
}
|
||||
}
|
||||
return tprogs;
|
||||
return tlinks;
|
||||
}
|
||||
|
||||
static void __bpf_tramp_image_put_deferred(struct work_struct *work)
|
||||
@ -330,14 +333,14 @@ out:
|
||||
static int bpf_trampoline_update(struct bpf_trampoline *tr)
|
||||
{
|
||||
struct bpf_tramp_image *im;
|
||||
struct bpf_tramp_progs *tprogs;
|
||||
struct bpf_tramp_links *tlinks;
|
||||
u32 flags = BPF_TRAMP_F_RESTORE_REGS;
|
||||
bool ip_arg = false;
|
||||
int err, total;
|
||||
|
||||
tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg);
|
||||
if (IS_ERR(tprogs))
|
||||
return PTR_ERR(tprogs);
|
||||
tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
|
||||
if (IS_ERR(tlinks))
|
||||
return PTR_ERR(tlinks);
|
||||
|
||||
if (total == 0) {
|
||||
err = unregister_fentry(tr, tr->cur_image->image);
|
||||
@ -353,15 +356,15 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
|
||||
tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
|
||||
if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
|
||||
tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links)
|
||||
flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
|
||||
|
||||
if (ip_arg)
|
||||
flags |= BPF_TRAMP_F_IP_ARG;
|
||||
|
||||
err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
|
||||
&tr->func.model, flags, tprogs,
|
||||
&tr->func.model, flags, tlinks,
|
||||
tr->func.addr);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
@ -381,7 +384,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
|
||||
tr->cur_image = im;
|
||||
tr->selector++;
|
||||
out:
|
||||
kfree(tprogs);
|
||||
kfree(tlinks);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -407,13 +410,14 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
|
||||
}
|
||||
}
|
||||
|
||||
int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
|
||||
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
|
||||
{
|
||||
enum bpf_tramp_prog_type kind;
|
||||
struct bpf_tramp_link *link_exiting;
|
||||
int err = 0;
|
||||
int cnt;
|
||||
|
||||
kind = bpf_attach_type_to_tramp(prog);
|
||||
kind = bpf_attach_type_to_tramp(link->link.prog);
|
||||
mutex_lock(&tr->mutex);
|
||||
if (tr->extension_prog) {
|
||||
/* cannot attach fentry/fexit if extension prog is attached.
|
||||
@ -429,25 +433,33 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
tr->extension_prog = prog;
|
||||
tr->extension_prog = link->link.prog;
|
||||
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
|
||||
prog->bpf_func);
|
||||
link->link.prog->bpf_func);
|
||||
goto out;
|
||||
}
|
||||
if (cnt >= BPF_MAX_TRAMP_PROGS) {
|
||||
if (cnt >= BPF_MAX_TRAMP_LINKS) {
|
||||
err = -E2BIG;
|
||||
goto out;
|
||||
}
|
||||
if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
|
||||
if (!hlist_unhashed(&link->tramp_hlist)) {
|
||||
/* prog already linked */
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
|
||||
hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
|
||||
if (link_exiting->link.prog != link->link.prog)
|
||||
continue;
|
||||
/* prog already linked */
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
|
||||
tr->progs_cnt[kind]++;
|
||||
err = bpf_trampoline_update(tr);
|
||||
if (err) {
|
||||
hlist_del_init(&prog->aux->tramp_hlist);
|
||||
hlist_del_init(&link->tramp_hlist);
|
||||
tr->progs_cnt[kind]--;
|
||||
}
|
||||
out:
|
||||
@ -456,12 +468,12 @@ out:
|
||||
}
|
||||
|
||||
/* bpf_trampoline_unlink_prog() should never fail. */
|
||||
int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
|
||||
int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr)
|
||||
{
|
||||
enum bpf_tramp_prog_type kind;
|
||||
int err;
|
||||
|
||||
kind = bpf_attach_type_to_tramp(prog);
|
||||
kind = bpf_attach_type_to_tramp(link->link.prog);
|
||||
mutex_lock(&tr->mutex);
|
||||
if (kind == BPF_TRAMP_REPLACE) {
|
||||
WARN_ON_ONCE(!tr->extension_prog);
|
||||
@ -470,7 +482,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
|
||||
tr->extension_prog = NULL;
|
||||
goto out;
|
||||
}
|
||||
hlist_del_init(&prog->aux->tramp_hlist);
|
||||
hlist_del_init(&link->tramp_hlist);
|
||||
tr->progs_cnt[kind]--;
|
||||
err = bpf_trampoline_update(tr);
|
||||
out:
|
||||
@ -559,11 +571,14 @@ static void notrace inc_misses_counter(struct bpf_prog *prog)
|
||||
* [2..MAX_U64] - execute bpf prog and record execution time.
|
||||
* This is start time.
|
||||
*/
|
||||
u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
|
||||
u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
|
||||
__acquires(RCU)
|
||||
{
|
||||
rcu_read_lock();
|
||||
migrate_disable();
|
||||
|
||||
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
|
||||
|
||||
if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
|
||||
inc_misses_counter(prog);
|
||||
return 0;
|
||||
@ -593,29 +608,38 @@ static void notrace update_prog_stats(struct bpf_prog *prog,
|
||||
}
|
||||
}
|
||||
|
||||
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
|
||||
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx)
|
||||
__releases(RCU)
|
||||
{
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
||||
update_prog_stats(prog, start);
|
||||
__this_cpu_dec(*(prog->active));
|
||||
migrate_enable();
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
|
||||
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
|
||||
{
|
||||
rcu_read_lock_trace();
|
||||
migrate_disable();
|
||||
might_fault();
|
||||
|
||||
if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
|
||||
inc_misses_counter(prog);
|
||||
return 0;
|
||||
}
|
||||
|
||||
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
|
||||
|
||||
return bpf_prog_start_time();
|
||||
}
|
||||
|
||||
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
|
||||
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
{
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
||||
update_prog_stats(prog, start);
|
||||
__this_cpu_dec(*(prog->active));
|
||||
migrate_enable();
|
||||
@ -635,7 +659,7 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
|
||||
int __weak
|
||||
arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_progs *tprogs,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *orig_call)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
|
@ -1091,6 +1091,21 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
};
|
||||
|
||||
BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
|
||||
{
|
||||
struct bpf_trace_run_ctx *run_ctx;
|
||||
|
||||
run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
|
||||
return run_ctx->bpf_cookie;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
|
||||
.func = bpf_get_attach_cookie_tracing,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
|
||||
{
|
||||
#ifndef CONFIG_X86
|
||||
@ -1719,6 +1734,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
|
||||
case BPF_FUNC_get_func_arg_cnt:
|
||||
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
|
||||
case BPF_FUNC_get_attach_cookie:
|
||||
return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
|
||||
default:
|
||||
fn = raw_tp_prog_func_proto(func_id, prog);
|
||||
if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
|
||||
|
@ -72,13 +72,16 @@ static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
|
||||
args->args[3], args->args[4]);
|
||||
}
|
||||
|
||||
extern const struct bpf_link_ops bpf_struct_ops_link_lops;
|
||||
|
||||
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
|
||||
const struct btf_type *func_proto;
|
||||
struct bpf_dummy_ops_test_args *args;
|
||||
struct bpf_tramp_progs *tprogs;
|
||||
struct bpf_tramp_links *tlinks;
|
||||
struct bpf_tramp_link *link = NULL;
|
||||
void *image = NULL;
|
||||
unsigned int op_idx;
|
||||
int prog_ret;
|
||||
@ -92,8 +95,8 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
if (IS_ERR(args))
|
||||
return PTR_ERR(args);
|
||||
|
||||
tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
|
||||
if (!tprogs) {
|
||||
tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
|
||||
if (!tlinks) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -105,8 +108,17 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
}
|
||||
set_vm_flush_reset_perms(image);
|
||||
|
||||
link = kzalloc(sizeof(*link), GFP_USER);
|
||||
if (!link) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
/* prog doesn't take the ownership of the reference from caller */
|
||||
bpf_prog_inc(prog);
|
||||
bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog);
|
||||
|
||||
op_idx = prog->expected_attach_type;
|
||||
err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
|
||||
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
|
||||
&st_ops->func_models[op_idx],
|
||||
image, image + PAGE_SIZE);
|
||||
if (err < 0)
|
||||
@ -124,7 +136,9 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
out:
|
||||
kfree(args);
|
||||
bpf_jit_free_exec(image);
|
||||
kfree(tprogs);
|
||||
if (link)
|
||||
bpf_link_put(&link->link);
|
||||
kfree(tlinks);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@ static const char * const link_type_name[] = {
|
||||
[BPF_LINK_TYPE_XDP] = "xdp",
|
||||
[BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
|
||||
[BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
|
||||
[BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
|
||||
};
|
||||
|
||||
static struct hashmap *link_table;
|
||||
|
@ -1013,6 +1013,7 @@ enum bpf_link_type {
|
||||
BPF_LINK_TYPE_XDP = 6,
|
||||
BPF_LINK_TYPE_PERF_EVENT = 7,
|
||||
BPF_LINK_TYPE_KPROBE_MULTI = 8,
|
||||
BPF_LINK_TYPE_STRUCT_OPS = 9,
|
||||
|
||||
MAX_BPF_LINK_TYPE,
|
||||
};
|
||||
@ -1489,6 +1490,15 @@ union bpf_attr {
|
||||
__aligned_u64 addrs;
|
||||
__aligned_u64 cookies;
|
||||
} kprobe_multi;
|
||||
struct {
|
||||
/* this is overlaid with the target_btf_id above. */
|
||||
__u32 target_btf_id;
|
||||
/* black box user-provided value passed through
|
||||
* to BPF program at the execution time and
|
||||
* accessible through bpf_get_attach_cookie() BPF helper
|
||||
*/
|
||||
__u64 cookie;
|
||||
} tracing;
|
||||
};
|
||||
} link_create;
|
||||
|
||||
|
@ -863,6 +863,14 @@ int bpf_link_create(int prog_fd, int target_fd,
|
||||
if (!OPTS_ZEROED(opts, kprobe_multi))
|
||||
return libbpf_err(-EINVAL);
|
||||
break;
|
||||
case BPF_TRACE_FENTRY:
|
||||
case BPF_TRACE_FEXIT:
|
||||
case BPF_MODIFY_RETURN:
|
||||
case BPF_LSM_MAC:
|
||||
attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0);
|
||||
if (!OPTS_ZEROED(opts, tracing))
|
||||
return libbpf_err(-EINVAL);
|
||||
break;
|
||||
default:
|
||||
if (!OPTS_ZEROED(opts, flags))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
@ -420,6 +420,9 @@ struct bpf_link_create_opts {
|
||||
const unsigned long *addrs;
|
||||
const __u64 *cookies;
|
||||
} kprobe_multi;
|
||||
struct {
|
||||
__u64 cookie;
|
||||
} tracing;
|
||||
};
|
||||
size_t :0;
|
||||
};
|
||||
|
@ -11568,12 +11568,17 @@ static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf
|
||||
}
|
||||
|
||||
/* Common logic for all BPF program types that attach to a btf_id */
|
||||
static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog)
|
||||
static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
|
||||
const struct bpf_trace_opts *opts)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
|
||||
char errmsg[STRERR_BUFSIZE];
|
||||
struct bpf_link *link;
|
||||
int prog_fd, pfd;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_trace_opts))
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
if (prog_fd < 0) {
|
||||
pr_warn("prog '%s': can't attach before loaded\n", prog->name);
|
||||
@ -11586,7 +11591,8 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
|
||||
link->detach = &bpf_link__detach_fd;
|
||||
|
||||
/* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
|
||||
pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), NULL);
|
||||
link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
|
||||
pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
|
||||
if (pfd < 0) {
|
||||
pfd = -errno;
|
||||
free(link);
|
||||
@ -11600,12 +11606,18 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
|
||||
|
||||
struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
|
||||
{
|
||||
return bpf_program__attach_btf_id(prog);
|
||||
return bpf_program__attach_btf_id(prog, NULL);
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
|
||||
const struct bpf_trace_opts *opts)
|
||||
{
|
||||
return bpf_program__attach_btf_id(prog, opts);
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
|
||||
{
|
||||
return bpf_program__attach_btf_id(prog);
|
||||
return bpf_program__attach_btf_id(prog, NULL);
|
||||
}
|
||||
|
||||
static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
|
||||
|
@ -603,8 +603,20 @@ bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
|
||||
const char *tp_name);
|
||||
|
||||
struct bpf_trace_opts {
|
||||
/* size of this struct, for forward/backward compatibility */
|
||||
size_t sz;
|
||||
/* custom user-provided value fetchable through bpf_get_attach_cookie() */
|
||||
__u64 cookie;
|
||||
};
|
||||
#define bpf_trace_opts__last_field cookie
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_trace(const struct bpf_program *prog);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_trace_opts(const struct bpf_program *prog, const struct bpf_trace_opts *opts);
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_lsm(const struct bpf_program *prog);
|
||||
LIBBPF_API struct bpf_link *
|
||||
|
@ -447,6 +447,7 @@ LIBBPF_0.8.0 {
|
||||
bpf_object__destroy_subskeleton;
|
||||
bpf_object__open_subskeleton;
|
||||
bpf_program__attach_kprobe_multi_opts;
|
||||
bpf_program__attach_trace_opts;
|
||||
bpf_program__attach_usdt;
|
||||
libbpf_register_prog_handler;
|
||||
libbpf_unregister_prog_handler;
|
||||
|
@ -4,8 +4,11 @@
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
#include <bpf/btf.h>
|
||||
#include "test_bpf_cookie.skel.h"
|
||||
#include "kprobe_multi.skel.h"
|
||||
|
||||
@ -410,6 +413,88 @@ cleanup:
|
||||
bpf_link__destroy(link);
|
||||
}
|
||||
|
||||
static void tracing_subtest(struct test_bpf_cookie *skel)
|
||||
{
|
||||
__u64 cookie;
|
||||
int prog_fd;
|
||||
int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, opts);
|
||||
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
|
||||
|
||||
skel->bss->fentry_res = 0;
|
||||
skel->bss->fexit_res = 0;
|
||||
|
||||
cookie = 0x10000000000000L;
|
||||
prog_fd = bpf_program__fd(skel->progs.fentry_test1);
|
||||
link_opts.tracing.cookie = cookie;
|
||||
fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts);
|
||||
if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create"))
|
||||
goto cleanup;
|
||||
|
||||
cookie = 0x20000000000000L;
|
||||
prog_fd = bpf_program__fd(skel->progs.fexit_test1);
|
||||
link_opts.tracing.cookie = cookie;
|
||||
fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts);
|
||||
if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create"))
|
||||
goto cleanup;
|
||||
|
||||
cookie = 0x30000000000000L;
|
||||
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
|
||||
link_opts.tracing.cookie = cookie;
|
||||
fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts);
|
||||
if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create"))
|
||||
goto cleanup;
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.fentry_test1);
|
||||
bpf_prog_test_run_opts(prog_fd, &opts);
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
|
||||
bpf_prog_test_run_opts(prog_fd, &opts);
|
||||
|
||||
ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res");
|
||||
ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res");
|
||||
ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res");
|
||||
|
||||
cleanup:
|
||||
if (fentry_fd >= 0)
|
||||
close(fentry_fd);
|
||||
if (fexit_fd >= 0)
|
||||
close(fexit_fd);
|
||||
if (fmod_ret_fd >= 0)
|
||||
close(fmod_ret_fd);
|
||||
}
|
||||
|
||||
int stack_mprotect(void);
|
||||
|
||||
static void lsm_subtest(struct test_bpf_cookie *skel)
|
||||
{
|
||||
__u64 cookie;
|
||||
int prog_fd;
|
||||
int lsm_fd = -1;
|
||||
LIBBPF_OPTS(bpf_link_create_opts, link_opts);
|
||||
|
||||
skel->bss->lsm_res = 0;
|
||||
|
||||
cookie = 0x90000000000090L;
|
||||
prog_fd = bpf_program__fd(skel->progs.test_int_hook);
|
||||
link_opts.tracing.cookie = cookie;
|
||||
lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts);
|
||||
if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create"))
|
||||
goto cleanup;
|
||||
|
||||
stack_mprotect();
|
||||
if (!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
|
||||
goto cleanup;
|
||||
|
||||
usleep(1);
|
||||
|
||||
ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res");
|
||||
|
||||
cleanup:
|
||||
if (lsm_fd >= 0)
|
||||
close(lsm_fd);
|
||||
}
|
||||
|
||||
void test_bpf_cookie(void)
|
||||
{
|
||||
struct test_bpf_cookie *skel;
|
||||
@ -432,6 +517,10 @@ void test_bpf_cookie(void)
|
||||
tp_subtest(skel);
|
||||
if (test__start_subtest("perf_event"))
|
||||
pe_subtest(skel);
|
||||
if (test__start_subtest("trampoline"))
|
||||
tracing_subtest(skel);
|
||||
if (test__start_subtest("lsm"))
|
||||
lsm_subtest(skel);
|
||||
|
||||
test_bpf_cookie__destroy(skel);
|
||||
}
|
||||
|
@ -4,18 +4,23 @@
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <errno.h>
|
||||
|
||||
int my_tid;
|
||||
|
||||
int kprobe_res;
|
||||
int kprobe_multi_res;
|
||||
int kretprobe_res;
|
||||
int uprobe_res;
|
||||
int uretprobe_res;
|
||||
int tp_res;
|
||||
int pe_res;
|
||||
__u64 kprobe_res;
|
||||
__u64 kprobe_multi_res;
|
||||
__u64 kretprobe_res;
|
||||
__u64 uprobe_res;
|
||||
__u64 uretprobe_res;
|
||||
__u64 tp_res;
|
||||
__u64 pe_res;
|
||||
__u64 fentry_res;
|
||||
__u64 fexit_res;
|
||||
__u64 fmod_ret_res;
|
||||
__u64 lsm_res;
|
||||
|
||||
static void update(void *ctx, int *res)
|
||||
static void update(void *ctx, __u64 *res)
|
||||
{
|
||||
if (my_tid != (u32)bpf_get_current_pid_tgid())
|
||||
return;
|
||||
@ -82,4 +87,35 @@ int handle_pe(struct pt_regs *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fentry/bpf_fentry_test1")
|
||||
int BPF_PROG(fentry_test1, int a)
|
||||
{
|
||||
update(ctx, &fentry_res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fexit/bpf_fentry_test1")
|
||||
int BPF_PROG(fexit_test1, int a, int ret)
|
||||
{
|
||||
update(ctx, &fexit_res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fmod_ret/bpf_modify_return_test")
|
||||
int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
|
||||
{
|
||||
update(ctx, &fmod_ret_res);
|
||||
return 1234;
|
||||
}
|
||||
|
||||
SEC("lsm/file_mprotect")
|
||||
int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
|
||||
unsigned long reqprot, unsigned long prot, int ret)
|
||||
{
|
||||
if (my_tid != (u32)bpf_get_current_pid_tgid())
|
||||
return ret;
|
||||
update(ctx, &lsm_res);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
Loading…
Reference in New Issue
Block a user