mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 01:51:34 +00:00
fec56f5890
Introduce BPF trampoline concept to allow kernel code to call into BPF programs with practically zero overhead. The trampoline generation logic is architecture dependent. It's converting native calling convention into BPF calling convention. BPF ISA is 64-bit (even on 32-bit architectures). The registers R1 to R5 are used to pass arguments into BPF functions. The main BPF program accepts only single argument "ctx" in R1. Whereas CPU native calling convention is different. x86-64 is passing first 6 arguments in registers and the rest on the stack. x86-32 is passing first 3 arguments in registers. sparc64 is passing first 6 in registers. And so on. The trampolines between BPF and kernel already exist. BPF_CALL_x macros in include/linux/filter.h statically compile trampolines from BPF into kernel helpers. They convert up to five u64 arguments into kernel C pointers and integers. On 64-bit architectures this BPF_to_kernel trampolines are nops. On 32-bit architecture they're meaningful. The opposite job kernel_to_BPF trampolines is done by CAST_TO_U64 macros and __bpf_trace_##call() shim functions in include/trace/bpf_probe.h. They convert kernel function arguments into array of u64s that BPF program consumes via R1=ctx pointer. This patch set is doing the same job as __bpf_trace_##call() static trampolines, but dynamically for any kernel function. There are ~22k global kernel functions that are attachable via nop at function entry. The function arguments and types are described in BTF. The job of btf_distill_func_proto() function is to extract useful information from BTF into "function model" that architecture dependent trampoline generators will use to generate assembly code to cast kernel function arguments into array of u64s. For example the kernel function eth_type_trans has two pointers. They will be casted to u64 and stored into stack of generated trampoline. The pointer to that stack space will be passed into BPF program in R1. On x86-64 such generated trampoline will consume 16 bytes of stack and two stores of %rdi and %rsi into stack. The verifier will make sure that only two u64 are accessed read-only by BPF program. The verifier will also recognize the precise type of the pointers being accessed and will not allow typecasting of the pointer to a different type within BPF program. The tracing use case in the datacenter demonstrated that certain key kernel functions have (like tcp_retransmit_skb) have 2 or more kprobes that are always active. Other functions have both kprobe and kretprobe. So it is essential to keep both kernel code and BPF programs executing at maximum speed. Hence generated BPF trampoline is re-generated every time new program is attached or detached to maintain maximum performance. To avoid the high cost of retpoline the attached BPF programs are called directly. __bpf_prog_enter/exit() are used to support per-program execution stats. In the future this logic will be optimized further by adding support for bpf_stats_enabled_key inside generated assembly code. Introduction of preemptible and sleepable BPF programs will completely remove the need to call to __bpf_prog_enter/exit(). Detach of a BPF program from the trampoline should not fail. To avoid memory allocation in detach path the half of the page is used as a reserve and flipped after each attach/detach. 2k bytes is enough to call 40+ BPF programs directly which is enough for BPF tracing use cases. This limit can be increased in the future. BPF_TRACE_FENTRY programs have access to raw kernel function arguments while BPF_TRACE_FEXIT programs have access to kernel return value as well. Often kprobe BPF program remembers function arguments in a map while kretprobe fetches arguments from a map and analyzes them together with return value. BPF_TRACE_FEXIT accelerates this typical use case. Recursion prevention for kprobe BPF programs is done via per-cpu bpf_prog_active counter. In practice that turned out to be a mistake. It caused programs to randomly skip execution. The tracing tools missed results they were looking for. Hence BPF trampoline doesn't provide builtin recursion prevention. It's a job of BPF program itself and will be addressed in the follow up patches. BPF trampoline is intended to be used beyond tracing and fentry/fexit use cases in the future. For example to remove retpoline cost from XDP programs. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Andrii Nakryiko <andriin@fb.com> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20191114185720.1641606-5-ast@kernel.org
254 lines
6.3 KiB
C
254 lines
6.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* Copyright (c) 2019 Facebook */
|
|
#include <linux/hash.h>
|
|
#include <linux/bpf.h>
|
|
#include <linux/filter.h>
|
|
|
|
/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
|
|
#define TRAMPOLINE_HASH_BITS 10
|
|
#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
|
|
|
|
static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
|
|
|
|
/* serializes access to trampoline_table */
|
|
static DEFINE_MUTEX(trampoline_mutex);
|
|
|
|
struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
|
|
{
|
|
struct bpf_trampoline *tr;
|
|
struct hlist_head *head;
|
|
void *image;
|
|
int i;
|
|
|
|
mutex_lock(&trampoline_mutex);
|
|
head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
|
|
hlist_for_each_entry(tr, head, hlist) {
|
|
if (tr->key == key) {
|
|
refcount_inc(&tr->refcnt);
|
|
goto out;
|
|
}
|
|
}
|
|
tr = kzalloc(sizeof(*tr), GFP_KERNEL);
|
|
if (!tr)
|
|
goto out;
|
|
|
|
/* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
|
|
image = bpf_jit_alloc_exec(PAGE_SIZE);
|
|
if (!image) {
|
|
kfree(tr);
|
|
tr = NULL;
|
|
goto out;
|
|
}
|
|
|
|
tr->key = key;
|
|
INIT_HLIST_NODE(&tr->hlist);
|
|
hlist_add_head(&tr->hlist, head);
|
|
refcount_set(&tr->refcnt, 1);
|
|
mutex_init(&tr->mutex);
|
|
for (i = 0; i < BPF_TRAMP_MAX; i++)
|
|
INIT_HLIST_HEAD(&tr->progs_hlist[i]);
|
|
|
|
set_vm_flush_reset_perms(image);
|
|
/* Keep image as writeable. The alternative is to keep flipping ro/rw
|
|
* everytime new program is attached or detached.
|
|
*/
|
|
set_memory_x((long)image, 1);
|
|
tr->image = image;
|
|
out:
|
|
mutex_unlock(&trampoline_mutex);
|
|
return tr;
|
|
}
|
|
|
|
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
|
|
* bytes on x86. Pick a number to fit into PAGE_SIZE / 2
|
|
*/
|
|
#define BPF_MAX_TRAMP_PROGS 40
|
|
|
|
static int bpf_trampoline_update(struct bpf_trampoline *tr)
|
|
{
|
|
void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
|
|
void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
|
|
struct bpf_prog *progs_to_run[BPF_MAX_TRAMP_PROGS];
|
|
int fentry_cnt = tr->progs_cnt[BPF_TRAMP_FENTRY];
|
|
int fexit_cnt = tr->progs_cnt[BPF_TRAMP_FEXIT];
|
|
struct bpf_prog **progs, **fentry, **fexit;
|
|
u32 flags = BPF_TRAMP_F_RESTORE_REGS;
|
|
struct bpf_prog_aux *aux;
|
|
int err;
|
|
|
|
if (fentry_cnt + fexit_cnt == 0) {
|
|
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL_TO_NOP,
|
|
old_image, NULL);
|
|
tr->selector = 0;
|
|
goto out;
|
|
}
|
|
|
|
/* populate fentry progs */
|
|
fentry = progs = progs_to_run;
|
|
hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FENTRY], tramp_hlist)
|
|
*progs++ = aux->prog;
|
|
|
|
/* populate fexit progs */
|
|
fexit = progs;
|
|
hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FEXIT], tramp_hlist)
|
|
*progs++ = aux->prog;
|
|
|
|
if (fexit_cnt)
|
|
flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
|
|
|
|
err = arch_prepare_bpf_trampoline(new_image, &tr->func.model, flags,
|
|
fentry, fentry_cnt,
|
|
fexit, fexit_cnt,
|
|
tr->func.addr);
|
|
if (err)
|
|
goto out;
|
|
|
|
if (tr->selector)
|
|
/* progs already running at this address */
|
|
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL_TO_CALL,
|
|
old_image, new_image);
|
|
else
|
|
/* first time registering */
|
|
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_NOP_TO_CALL,
|
|
NULL, new_image);
|
|
if (err)
|
|
goto out;
|
|
tr->selector++;
|
|
out:
|
|
return err;
|
|
}
|
|
|
|
static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(enum bpf_attach_type t)
|
|
{
|
|
switch (t) {
|
|
case BPF_TRACE_FENTRY:
|
|
return BPF_TRAMP_FENTRY;
|
|
default:
|
|
return BPF_TRAMP_FEXIT;
|
|
}
|
|
}
|
|
|
|
int bpf_trampoline_link_prog(struct bpf_prog *prog)
|
|
{
|
|
enum bpf_tramp_prog_type kind;
|
|
struct bpf_trampoline *tr;
|
|
int err = 0;
|
|
|
|
tr = prog->aux->trampoline;
|
|
kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
|
|
mutex_lock(&tr->mutex);
|
|
if (tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT]
|
|
>= BPF_MAX_TRAMP_PROGS) {
|
|
err = -E2BIG;
|
|
goto out;
|
|
}
|
|
if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
|
|
/* prog already linked */
|
|
err = -EBUSY;
|
|
goto out;
|
|
}
|
|
hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
|
|
tr->progs_cnt[kind]++;
|
|
err = bpf_trampoline_update(prog->aux->trampoline);
|
|
if (err) {
|
|
hlist_del(&prog->aux->tramp_hlist);
|
|
tr->progs_cnt[kind]--;
|
|
}
|
|
out:
|
|
mutex_unlock(&tr->mutex);
|
|
return err;
|
|
}
|
|
|
|
/* bpf_trampoline_unlink_prog() should never fail. */
|
|
int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
|
|
{
|
|
enum bpf_tramp_prog_type kind;
|
|
struct bpf_trampoline *tr;
|
|
int err;
|
|
|
|
tr = prog->aux->trampoline;
|
|
kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
|
|
mutex_lock(&tr->mutex);
|
|
hlist_del(&prog->aux->tramp_hlist);
|
|
tr->progs_cnt[kind]--;
|
|
err = bpf_trampoline_update(prog->aux->trampoline);
|
|
mutex_unlock(&tr->mutex);
|
|
return err;
|
|
}
|
|
|
|
void bpf_trampoline_put(struct bpf_trampoline *tr)
|
|
{
|
|
if (!tr)
|
|
return;
|
|
mutex_lock(&trampoline_mutex);
|
|
if (!refcount_dec_and_test(&tr->refcnt))
|
|
goto out;
|
|
WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
|
|
if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
|
|
goto out;
|
|
if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
|
|
goto out;
|
|
bpf_jit_free_exec(tr->image);
|
|
hlist_del(&tr->hlist);
|
|
kfree(tr);
|
|
out:
|
|
mutex_unlock(&trampoline_mutex);
|
|
}
|
|
|
|
/* The logic is similar to BPF_PROG_RUN, but with explicit rcu and preempt that
|
|
* are needed for trampoline. The macro is split into
|
|
* call _bpf_prog_enter
|
|
* call prog->bpf_func
|
|
* call __bpf_prog_exit
|
|
*/
|
|
u64 notrace __bpf_prog_enter(void)
|
|
{
|
|
u64 start = 0;
|
|
|
|
rcu_read_lock();
|
|
preempt_disable();
|
|
if (static_branch_unlikely(&bpf_stats_enabled_key))
|
|
start = sched_clock();
|
|
return start;
|
|
}
|
|
|
|
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
|
|
{
|
|
struct bpf_prog_stats *stats;
|
|
|
|
if (static_branch_unlikely(&bpf_stats_enabled_key) &&
|
|
/* static_key could be enabled in __bpf_prog_enter
|
|
* and disabled in __bpf_prog_exit.
|
|
* And vice versa.
|
|
* Hence check that 'start' is not zero.
|
|
*/
|
|
start) {
|
|
stats = this_cpu_ptr(prog->aux->stats);
|
|
u64_stats_update_begin(&stats->syncp);
|
|
stats->cnt++;
|
|
stats->nsecs += sched_clock() - start;
|
|
u64_stats_update_end(&stats->syncp);
|
|
}
|
|
preempt_enable();
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
int __weak
|
|
arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
|
|
struct bpf_prog **fentry_progs, int fentry_cnt,
|
|
struct bpf_prog **fexit_progs, int fexit_cnt,
|
|
void *orig_call)
|
|
{
|
|
return -ENOTSUPP;
|
|
}
|
|
|
|
static int __init init_trampolines(void)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
|
|
INIT_HLIST_HEAD(&trampoline_table[i]);
|
|
return 0;
|
|
}
|
|
late_initcall(init_trampolines);
|