bpf: Extend sys_bpf commands for bpf_syscall programs.

bpf_sycall programs can be used directly by the kernel modules
to load programs and create maps via kernel skeleton.
. Export bpf_sys_bpf syscall wrapper to be used in kernel skeleton.
. Export bpf_map_get to be used in kernel skeleton.
. Allow prog_run cmd for bpf_syscall programs with recursion check.
. Enable link_create and raw_tp_open cmds.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Yonghong Song <yhs@fb.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220209232001.27490-2-alexei.starovoitov@gmail.com
This commit is contained in:
Alexei Starovoitov 2022-02-09 15:19:57 -08:00 committed by Daniel Borkmann
parent 4f5e483b8c
commit b1d18a7574

View File

@ -985,6 +985,7 @@ struct bpf_map *bpf_map_get(u32 ufd)
return map; return map;
} }
EXPORT_SYMBOL(bpf_map_get);
struct bpf_map *bpf_map_get_with_uref(u32 ufd) struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{ {
@ -4756,23 +4757,52 @@ static bool syscall_prog_is_valid_access(int off, int size,
return true; return true;
} }
BPF_CALL_3(bpf_sys_bpf, int, cmd, void *, attr, u32, attr_size) BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
{ {
struct bpf_prog * __maybe_unused prog;
switch (cmd) { switch (cmd) {
case BPF_MAP_CREATE: case BPF_MAP_CREATE:
case BPF_MAP_UPDATE_ELEM: case BPF_MAP_UPDATE_ELEM:
case BPF_MAP_FREEZE: case BPF_MAP_FREEZE:
case BPF_PROG_LOAD: case BPF_PROG_LOAD:
case BPF_BTF_LOAD: case BPF_BTF_LOAD:
case BPF_LINK_CREATE:
case BPF_RAW_TRACEPOINT_OPEN:
break; break;
/* case BPF_PROG_TEST_RUN: #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
* is not part of this list to prevent recursive test_run case BPF_PROG_TEST_RUN:
*/ if (attr->test.data_in || attr->test.data_out ||
attr->test.ctx_out || attr->test.duration ||
attr->test.repeat || attr->test.flags)
return -EINVAL;
prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
if (IS_ERR(prog))
return PTR_ERR(prog);
if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
attr->test.ctx_size_in > U16_MAX) {
bpf_prog_put(prog);
return -EINVAL;
}
if (!__bpf_prog_enter_sleepable(prog)) {
/* recursion detected */
bpf_prog_put(prog);
return -EBUSY;
}
attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
__bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */);
bpf_prog_put(prog);
return 0;
#endif
default: default:
return -EINVAL; return -EINVAL;
} }
return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
} }
EXPORT_SYMBOL(bpf_sys_bpf);
static const struct bpf_func_proto bpf_sys_bpf_proto = { static const struct bpf_func_proto bpf_sys_bpf_proto = {
.func = bpf_sys_bpf, .func = bpf_sys_bpf,