Daniel Borkmann says:

====================
bpf 2021-08-10

We've added 5 non-merge commits during the last 2 day(s) which contain
a total of 7 files changed, 27 insertions(+), 15 deletions(-).

1) Fix missing bpf_read_lock_trace() context for BPF loader progs, from Yonghong Song.

2) Fix corner case where BPF prog retrieves wrong local storage, also from Yonghong Song.

3) Restrict availability of BPF write_user helper behind lockdown, from Daniel Borkmann.

4) Fix multiple kernel-doc warnings in BPF core, from Randy Dunlap.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf, core: Fix kernel-doc notation
  bpf: Fix potentially incorrect results with bpf_get_local_storage()
  bpf: Add missing bpf_read_[un]lock_trace() for syscall program
  bpf: Add lockdown check for probe_write_user helper
  bpf: Add _kernel suffix to internal lockdown_bpf_read
====================

Link: https://lore.kernel.org/r/20210810144025.22814-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-08-10 07:52:09 -07:00
commit 2e273b0996
7 changed files with 27 additions and 15 deletions

View File

@ -201,8 +201,8 @@ static inline void bpf_cgroup_storage_unset(void)
{ {
int i; int i;
for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
continue; continue;
this_cpu_write(bpf_cgroup_storage_info[i].task, NULL); this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);

View File

@ -120,10 +120,11 @@ enum lockdown_reason {
LOCKDOWN_MMIOTRACE, LOCKDOWN_MMIOTRACE,
LOCKDOWN_DEBUGFS, LOCKDOWN_DEBUGFS,
LOCKDOWN_XMON_WR, LOCKDOWN_XMON_WR,
LOCKDOWN_BPF_WRITE_USER,
LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE, LOCKDOWN_KCORE,
LOCKDOWN_KPROBES, LOCKDOWN_KPROBES,
LOCKDOWN_BPF_READ, LOCKDOWN_BPF_READ_KERNEL,
LOCKDOWN_PERF, LOCKDOWN_PERF,
LOCKDOWN_TRACEFS, LOCKDOWN_TRACEFS,
LOCKDOWN_XMON_RW, LOCKDOWN_XMON_RW,

View File

@ -1362,11 +1362,13 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
} }
/** /**
* __bpf_prog_run - run eBPF program on a given context * ___bpf_prog_run - run eBPF program on a given context
* @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
* @insn: is the array of eBPF instructions * @insn: is the array of eBPF instructions
* *
* Decode and execute eBPF instructions. * Decode and execute eBPF instructions.
*
* Return: whatever value is in %BPF_R0 at program exit
*/ */
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
{ {
@ -1878,6 +1880,9 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
* *
* Try to JIT eBPF program, if JIT is not available, use interpreter. * Try to JIT eBPF program, if JIT is not available, use interpreter.
* The BPF program will be executed via BPF_PROG_RUN() macro. * The BPF program will be executed via BPF_PROG_RUN() macro.
*
* Return: the &fp argument along with &err set to 0 for success or
* a negative errno code on failure
*/ */
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{ {

View File

@ -397,8 +397,8 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
void *ptr; void *ptr;
int i; int i;
for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
continue; continue;
storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]); storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
@ -1070,12 +1070,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_probe_read_user: case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto; return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel: case BPF_FUNC_probe_read_kernel:
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_kernel_proto; NULL : &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read_user_str: case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto; return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str: case BPF_FUNC_probe_read_kernel_str:
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_kernel_str_proto; NULL : &bpf_probe_read_kernel_str_proto;
case BPF_FUNC_snprintf_btf: case BPF_FUNC_snprintf_btf:
return &bpf_snprintf_btf_proto; return &bpf_snprintf_btf_proto;

View File

@ -990,28 +990,29 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_numa_node_id_proto; return &bpf_get_numa_node_id_proto;
case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_read:
return &bpf_perf_event_read_proto; return &bpf_perf_event_read_proto;
case BPF_FUNC_probe_write_user:
return bpf_get_probe_write_proto();
case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_current_task_under_cgroup:
return &bpf_current_task_under_cgroup_proto; return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32: case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto; return &bpf_get_prandom_u32_proto;
case BPF_FUNC_probe_write_user:
return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
NULL : bpf_get_probe_write_proto();
case BPF_FUNC_probe_read_user: case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto; return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel: case BPF_FUNC_probe_read_kernel:
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_kernel_proto; NULL : &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read_user_str: case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto; return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str: case BPF_FUNC_probe_read_kernel_str:
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_kernel_str_proto; NULL : &bpf_probe_read_kernel_str_proto;
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
case BPF_FUNC_probe_read: case BPF_FUNC_probe_read:
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_compat_proto; NULL : &bpf_probe_read_compat_proto;
case BPF_FUNC_probe_read_str: case BPF_FUNC_probe_read_str:
return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
NULL : &bpf_probe_read_compat_str_proto; NULL : &bpf_probe_read_compat_str_proto;
#endif #endif
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS

View File

@ -7,6 +7,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/rcupdate_trace.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <net/bpf_sk_storage.h> #include <net/bpf_sk_storage.h>
#include <net/sock.h> #include <net/sock.h>
@ -951,7 +952,10 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
goto out; goto out;
} }
} }
rcu_read_lock_trace();
retval = bpf_prog_run_pin_on_cpu(prog, ctx); retval = bpf_prog_run_pin_on_cpu(prog, ctx);
rcu_read_unlock_trace();
if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
err = -EFAULT; err = -EFAULT;

View File

@ -58,10 +58,11 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
[LOCKDOWN_MMIOTRACE] = "unsafe mmio", [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
[LOCKDOWN_DEBUGFS] = "debugfs access", [LOCKDOWN_DEBUGFS] = "debugfs access",
[LOCKDOWN_XMON_WR] = "xmon write access", [LOCKDOWN_XMON_WR] = "xmon write access",
[LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
[LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_INTEGRITY_MAX] = "integrity",
[LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KCORE] = "/proc/kcore access",
[LOCKDOWN_KPROBES] = "use of kprobes", [LOCKDOWN_KPROBES] = "use of kprobes",
[LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM", [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
[LOCKDOWN_PERF] = "unsafe use of perf", [LOCKDOWN_PERF] = "unsafe use of perf",
[LOCKDOWN_TRACEFS] = "use of tracefs", [LOCKDOWN_TRACEFS] = "use of tracefs",
[LOCKDOWN_XMON_RW] = "xmon read and write access", [LOCKDOWN_XMON_RW] = "xmon read and write access",