Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2021-12-30 The following pull-request contains BPF updates for your *net-next* tree. We've added 72 non-merge commits during the last 20 day(s) which contain a total of 223 files changed, 3510 insertions(+), 1591 deletions(-). The main changes are: 1) Automatic setrlimit in libbpf when bpf is memcg's in the kernel, from Andrii. 2) Beautify and de-verbose verifier logs, from Christy. 3) Composable verifier types, from Hao. 4) bpf_strncmp helper, from Hou. 5) bpf.h header dependency cleanup, from Jakub. 6) get_func_[arg|ret|arg_cnt] helpers, from Jiri. 7) Sleepable local storage, from KP. 8) Extend kfunc with PTR_TO_CTX, PTR_TO_MEM argument support, from Kumar. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -57,7 +57,7 @@ $(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_
|
||||
$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
|
||||
DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR) prefix= \
|
||||
ARCH= CC=$(HOSTCC) LD=$(HOSTLD) $@ install_headers
|
||||
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) $@ install_headers
|
||||
|
||||
$(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR)
|
||||
$(call QUIET_INSTALL, $@)
|
||||
@@ -152,6 +152,9 @@ CFLAGS += -DHAVE_LIBBFD_SUPPORT
|
||||
SRCS += $(BFD_SRCS)
|
||||
endif
|
||||
|
||||
HOST_CFLAGS = $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),\
|
||||
$(subst $(CLANG_CROSS_FLAGS),,$(CFLAGS)))
|
||||
|
||||
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
|
||||
|
||||
BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o)
|
||||
@@ -202,7 +205,7 @@ endif
|
||||
CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS)
|
||||
|
||||
$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
|
||||
$(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD $< -o $@
|
||||
$(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@
|
||||
|
||||
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
|
||||
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
|
||||
@@ -213,15 +216,13 @@ ifneq ($(feature-zlib), 1)
|
||||
endif
|
||||
|
||||
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
|
||||
$(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
|
||||
$(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
|
||||
|
||||
$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@
|
||||
|
||||
$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_BOOTSTRAP_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT)
|
||||
$(QUIET_CC)$(HOSTCC) \
|
||||
$(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),$(CFLAGS)) \
|
||||
-c -MMD $< -o $@
|
||||
$(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@
|
||||
|
||||
$(OUTPUT)%.o: %.c
|
||||
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
|
||||
|
||||
@@ -642,12 +642,32 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
static void
|
||||
probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
|
||||
/*
|
||||
* Probe for availability of kernel commit (5.3):
|
||||
*
|
||||
* c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
|
||||
*/
|
||||
static void probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
||||
.prog_ifindex = ifindex,
|
||||
);
|
||||
struct bpf_insn insns[BPF_MAXINSNS + 1];
|
||||
bool res;
|
||||
int i, fd;
|
||||
|
||||
for (i = 0; i < BPF_MAXINSNS; i++)
|
||||
insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
|
||||
insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
|
||||
|
||||
errno = 0;
|
||||
fd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, NULL, "GPL",
|
||||
insns, ARRAY_SIZE(insns), &opts);
|
||||
res = fd >= 0 || (errno != E2BIG && errno != EINVAL);
|
||||
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
|
||||
res = bpf_probe_large_insn_limit(ifindex);
|
||||
print_bool_feature("have_large_insn_limit",
|
||||
"Large program size limit",
|
||||
"LARGE_INSN_LIMIT",
|
||||
|
||||
@@ -408,6 +408,8 @@ int main(int argc, char **argv)
|
||||
bool version_requested = false;
|
||||
int opt, ret;
|
||||
|
||||
setlinebuf(stdout);
|
||||
|
||||
last_do_help = do_help;
|
||||
pretty_output = false;
|
||||
json_output = false;
|
||||
|
||||
@@ -19,6 +19,7 @@ CC = $(HOSTCC)
|
||||
LD = $(HOSTLD)
|
||||
ARCH = $(HOSTARCH)
|
||||
RM ?= rm
|
||||
CROSS_COMPILE =
|
||||
|
||||
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a
|
||||
BPF_DESTDIR := $(BPFOBJ_OUTPUT)
|
||||
BPF_INCLUDE := $(BPF_DESTDIR)/include
|
||||
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
|
||||
CFLAGS := -g -Wall
|
||||
CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS)
|
||||
|
||||
# Try to detect best kernel BTF source
|
||||
KERNEL_REL := $(shell uname -r)
|
||||
@@ -88,4 +88,4 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OU
|
||||
|
||||
$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \
|
||||
CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
|
||||
@@ -4983,6 +4983,41 @@ union bpf_attr {
|
||||
* Return
|
||||
* The number of loops performed, **-EINVAL** for invalid **flags**,
|
||||
* **-E2BIG** if **nr_loops** exceeds the maximum number of loops.
|
||||
*
|
||||
* long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2)
|
||||
* Description
|
||||
* Do strncmp() between **s1** and **s2**. **s1** doesn't need
|
||||
* to be null-terminated and **s1_sz** is the maximum storage
|
||||
* size of **s1**. **s2** must be a read-only string.
|
||||
* Return
|
||||
* An integer less than, equal to, or greater than zero
|
||||
* if the first **s1_sz** bytes of **s1** is found to be
|
||||
* less than, to match, or be greater than **s2**.
|
||||
*
|
||||
* long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
|
||||
* Description
|
||||
* Get **n**-th argument (zero based) of the traced function (for tracing programs)
|
||||
* returned in **value**.
|
||||
*
|
||||
* Return
|
||||
* 0 on success.
|
||||
* **-EINVAL** if n >= arguments count of traced function.
|
||||
*
|
||||
* long bpf_get_func_ret(void *ctx, u64 *value)
|
||||
* Description
|
||||
* Get return value of the traced function (for tracing programs)
|
||||
* in **value**.
|
||||
*
|
||||
* Return
|
||||
* 0 on success.
|
||||
* **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
|
||||
*
|
||||
* long bpf_get_func_arg_cnt(void *ctx)
|
||||
* Description
|
||||
* Get number of arguments of the traced function (for tracing programs).
|
||||
*
|
||||
* Return
|
||||
* The number of arguments of the traced function.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
@@ -5167,6 +5202,10 @@ union bpf_attr {
|
||||
FN(kallsyms_lookup_name), \
|
||||
FN(find_vma), \
|
||||
FN(loop), \
|
||||
FN(strncmp), \
|
||||
FN(get_func_arg), \
|
||||
FN(get_func_ret), \
|
||||
FN(get_func_arg_cnt), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
||||
@@ -90,6 +90,7 @@ override CFLAGS += -Werror -Wall
|
||||
override CFLAGS += $(INCLUDES)
|
||||
override CFLAGS += -fvisibility=hidden
|
||||
override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
|
||||
override CFLAGS += $(CLANG_CROSS_FLAGS)
|
||||
|
||||
# flags specific for shared library
|
||||
SHLIB_FLAGS := -DSHARED -fPIC
|
||||
@@ -162,7 +163,7 @@ $(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
|
||||
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
|
||||
|
||||
$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED) $(VERSION_SCRIPT)
|
||||
$(QUIET_LINK)$(CC) $(LDFLAGS) \
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) \
|
||||
--shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
|
||||
-Wl,--version-script=$(VERSION_SCRIPT) $< -lelf -lz -o $@
|
||||
@ln -sf $(@F) $(OUTPUT)libbpf.so
|
||||
|
||||
@@ -28,7 +28,9 @@
|
||||
#include <asm/unistd.h>
|
||||
#include <errno.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/filter.h>
|
||||
#include <limits.h>
|
||||
#include <sys/resource.h>
|
||||
#include "bpf.h"
|
||||
#include "libbpf.h"
|
||||
#include "libbpf_internal.h"
|
||||
@@ -94,6 +96,77 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int
|
||||
return fd;
|
||||
}
|
||||
|
||||
/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
|
||||
* memcg-based memory accounting for BPF maps and progs. This was done in [0].
|
||||
* We use the support for bpf_ktime_get_coarse_ns() helper, which was added in
|
||||
* the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF.
|
||||
*
|
||||
* [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
|
||||
* [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
|
||||
*/
|
||||
int probe_memcg_account(void)
|
||||
{
|
||||
const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insn_cnt = sizeof(insns) / sizeof(insns[0]);
|
||||
union bpf_attr attr;
|
||||
int prog_fd;
|
||||
|
||||
/* attempt loading freplace trying to use custom BTF */
|
||||
memset(&attr, 0, prog_load_attr_sz);
|
||||
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
|
||||
attr.insns = ptr_to_u64(insns);
|
||||
attr.insn_cnt = insn_cnt;
|
||||
attr.license = ptr_to_u64("GPL");
|
||||
|
||||
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
|
||||
if (prog_fd >= 0) {
|
||||
close(prog_fd);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool memlock_bumped;
|
||||
static rlim_t memlock_rlim = RLIM_INFINITY;
|
||||
|
||||
int libbpf_set_memlock_rlim(size_t memlock_bytes)
|
||||
{
|
||||
if (memlock_bumped)
|
||||
return libbpf_err(-EBUSY);
|
||||
|
||||
memlock_rlim = memlock_bytes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bump_rlimit_memlock(void)
|
||||
{
|
||||
struct rlimit rlim;
|
||||
|
||||
/* this the default in libbpf 1.0, but for now user has to opt-in explicitly */
|
||||
if (!(libbpf_mode & LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK))
|
||||
return 0;
|
||||
|
||||
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
|
||||
if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
|
||||
return 0;
|
||||
|
||||
memlock_bumped = true;
|
||||
|
||||
/* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
|
||||
if (memlock_rlim == 0)
|
||||
return 0;
|
||||
|
||||
rlim.rlim_cur = rlim.rlim_max = memlock_rlim;
|
||||
if (setrlimit(RLIMIT_MEMLOCK, &rlim))
|
||||
return -errno;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_map_create(enum bpf_map_type map_type,
|
||||
const char *map_name,
|
||||
__u32 key_size,
|
||||
@@ -105,6 +178,8 @@ int bpf_map_create(enum bpf_map_type map_type,
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
bump_rlimit_memlock();
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_map_create_opts))
|
||||
@@ -112,7 +187,7 @@ int bpf_map_create(enum bpf_map_type map_type,
|
||||
|
||||
attr.map_type = map_type;
|
||||
if (map_name)
|
||||
strncat(attr.map_name, map_name, sizeof(attr.map_name) - 1);
|
||||
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||
attr.key_size = key_size;
|
||||
attr.value_size = value_size;
|
||||
attr.max_entries = max_entries;
|
||||
@@ -251,6 +326,8 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
union bpf_attr attr;
|
||||
char *log_buf;
|
||||
|
||||
bump_rlimit_memlock();
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_prog_load_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
@@ -271,7 +348,7 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
|
||||
attr.kern_version = OPTS_GET(opts, kern_version, 0);
|
||||
|
||||
if (prog_name)
|
||||
strncat(attr.prog_name, prog_name, sizeof(attr.prog_name) - 1);
|
||||
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
||||
attr.license = ptr_to_u64(license);
|
||||
|
||||
if (insn_cnt > UINT_MAX)
|
||||
@@ -456,6 +533,8 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
|
||||
bump_rlimit_memlock();
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_type = type;
|
||||
attr.insn_cnt = (__u32)insns_cnt;
|
||||
@@ -1056,6 +1135,8 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa
|
||||
__u32 log_level;
|
||||
int fd;
|
||||
|
||||
bump_rlimit_memlock();
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_btf_load_opts))
|
||||
|
||||
@@ -35,6 +35,8 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int libbpf_set_memlock_rlim(size_t memlock_bytes);
|
||||
|
||||
struct bpf_map_create_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
|
||||
|
||||
@@ -66,277 +66,204 @@
|
||||
|
||||
#if defined(__KERNEL__) || defined(__VMLINUX_H__)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->di)
|
||||
#define PT_REGS_PARM2(x) ((x)->si)
|
||||
#define PT_REGS_PARM3(x) ((x)->dx)
|
||||
#define PT_REGS_PARM4(x) ((x)->cx)
|
||||
#define PT_REGS_PARM5(x) ((x)->r8)
|
||||
#define PT_REGS_RET(x) ((x)->sp)
|
||||
#define PT_REGS_FP(x) ((x)->bp)
|
||||
#define PT_REGS_RC(x) ((x)->ax)
|
||||
#define PT_REGS_SP(x) ((x)->sp)
|
||||
#define PT_REGS_IP(x) ((x)->ip)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), di)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), si)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), dx)
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), cx)
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), sp)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), bp)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), ax)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), ip)
|
||||
#define __PT_PARM1_REG di
|
||||
#define __PT_PARM2_REG si
|
||||
#define __PT_PARM3_REG dx
|
||||
#define __PT_PARM4_REG cx
|
||||
#define __PT_PARM5_REG r8
|
||||
#define __PT_RET_REG sp
|
||||
#define __PT_FP_REG bp
|
||||
#define __PT_RC_REG ax
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG ip
|
||||
|
||||
#else
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
#define __PT_PARM1_REG eax
|
||||
#define __PT_PARM2_REG edx
|
||||
#define __PT_PARM3_REG ecx
|
||||
/* i386 kernel is built with -mregparm=3 */
|
||||
#define PT_REGS_PARM1(x) ((x)->eax)
|
||||
#define PT_REGS_PARM2(x) ((x)->edx)
|
||||
#define PT_REGS_PARM3(x) ((x)->ecx)
|
||||
#define PT_REGS_PARM4(x) 0
|
||||
#define PT_REGS_PARM5(x) 0
|
||||
#define PT_REGS_RET(x) ((x)->esp)
|
||||
#define PT_REGS_FP(x) ((x)->ebp)
|
||||
#define PT_REGS_RC(x) ((x)->eax)
|
||||
#define PT_REGS_SP(x) ((x)->esp)
|
||||
#define PT_REGS_IP(x) ((x)->eip)
|
||||
#define __PT_PARM4_REG __unsupported__
|
||||
#define __PT_PARM5_REG __unsupported__
|
||||
#define __PT_RET_REG esp
|
||||
#define __PT_FP_REG ebp
|
||||
#define __PT_RC_REG eax
|
||||
#define __PT_SP_REG esp
|
||||
#define __PT_IP_REG eip
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), eax)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), edx)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), ecx)
|
||||
#define PT_REGS_PARM4_CORE(x) 0
|
||||
#define PT_REGS_PARM5_CORE(x) 0
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), esp)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), ebp)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), eax)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), esp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), eip)
|
||||
#else /* __i386__ */
|
||||
|
||||
#else
|
||||
#define __PT_PARM1_REG rdi
|
||||
#define __PT_PARM2_REG rsi
|
||||
#define __PT_PARM3_REG rdx
|
||||
#define __PT_PARM4_REG rcx
|
||||
#define __PT_PARM5_REG r8
|
||||
#define __PT_RET_REG rsp
|
||||
#define __PT_FP_REG rbp
|
||||
#define __PT_RC_REG rax
|
||||
#define __PT_SP_REG rsp
|
||||
#define __PT_IP_REG rip
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->rdi)
|
||||
#define PT_REGS_PARM2(x) ((x)->rsi)
|
||||
#define PT_REGS_PARM3(x) ((x)->rdx)
|
||||
#define PT_REGS_PARM4(x) ((x)->rcx)
|
||||
#define PT_REGS_PARM5(x) ((x)->r8)
|
||||
#define PT_REGS_RET(x) ((x)->rsp)
|
||||
#define PT_REGS_FP(x) ((x)->rbp)
|
||||
#define PT_REGS_RC(x) ((x)->rax)
|
||||
#define PT_REGS_SP(x) ((x)->rsp)
|
||||
#define PT_REGS_IP(x) ((x)->rip)
|
||||
#endif /* __i386__ */
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), rdi)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), rsi)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), rdx)
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), rcx)
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), rsp)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), rbp)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), rax)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), rsp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), rip)
|
||||
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __KERNEL__ || __VMLINUX_H__ */
|
||||
|
||||
#elif defined(bpf_target_s390)
|
||||
|
||||
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
|
||||
struct pt_regs;
|
||||
#define PT_REGS_S390 const volatile user_pt_regs
|
||||
#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
|
||||
#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
|
||||
#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
|
||||
#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
|
||||
#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
|
||||
#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
|
||||
/* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
|
||||
#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
|
||||
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
|
||||
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[3])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[14])
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15])
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), psw.addr)
|
||||
#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
|
||||
#define __PT_PARM1_REG gprs[2]
|
||||
#define __PT_PARM2_REG gprs[3]
|
||||
#define __PT_PARM3_REG gprs[4]
|
||||
#define __PT_PARM4_REG gprs[5]
|
||||
#define __PT_PARM5_REG gprs[6]
|
||||
#define __PT_RET_REG grps[14]
|
||||
#define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG gprs[2]
|
||||
#define __PT_SP_REG gprs[15]
|
||||
#define __PT_IP_REG psw.addr
|
||||
|
||||
#elif defined(bpf_target_arm)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->uregs[0])
|
||||
#define PT_REGS_PARM2(x) ((x)->uregs[1])
|
||||
#define PT_REGS_PARM3(x) ((x)->uregs[2])
|
||||
#define PT_REGS_PARM4(x) ((x)->uregs[3])
|
||||
#define PT_REGS_PARM5(x) ((x)->uregs[4])
|
||||
#define PT_REGS_RET(x) ((x)->uregs[14])
|
||||
#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_RC(x) ((x)->uregs[0])
|
||||
#define PT_REGS_SP(x) ((x)->uregs[13])
|
||||
#define PT_REGS_IP(x) ((x)->uregs[12])
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), uregs[0])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), uregs[1])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), uregs[2])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), uregs[3])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), uregs[4])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), uregs[14])
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), uregs[11])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), uregs[0])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), uregs[13])
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), uregs[12])
|
||||
#define __PT_PARM1_REG uregs[0]
|
||||
#define __PT_PARM2_REG uregs[1]
|
||||
#define __PT_PARM3_REG uregs[2]
|
||||
#define __PT_PARM4_REG uregs[3]
|
||||
#define __PT_PARM5_REG uregs[4]
|
||||
#define __PT_RET_REG uregs[14]
|
||||
#define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG uregs[0]
|
||||
#define __PT_SP_REG uregs[13]
|
||||
#define __PT_IP_REG uregs[12]
|
||||
|
||||
#elif defined(bpf_target_arm64)
|
||||
|
||||
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
struct pt_regs;
|
||||
#define PT_REGS_ARM64 const volatile struct user_pt_regs
|
||||
#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
|
||||
#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
|
||||
#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
|
||||
#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
|
||||
#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
|
||||
#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
|
||||
/* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
|
||||
#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
|
||||
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
|
||||
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[1])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[2])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[3])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[4])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[30])
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[29])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), sp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), pc)
|
||||
#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
|
||||
#define __PT_PARM1_REG regs[0]
|
||||
#define __PT_PARM2_REG regs[1]
|
||||
#define __PT_PARM3_REG regs[2]
|
||||
#define __PT_PARM4_REG regs[3]
|
||||
#define __PT_PARM5_REG regs[4]
|
||||
#define __PT_RET_REG regs[30]
|
||||
#define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG regs[0]
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG pc
|
||||
|
||||
#elif defined(bpf_target_mips)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->regs[4])
|
||||
#define PT_REGS_PARM2(x) ((x)->regs[5])
|
||||
#define PT_REGS_PARM3(x) ((x)->regs[6])
|
||||
#define PT_REGS_PARM4(x) ((x)->regs[7])
|
||||
#define PT_REGS_PARM5(x) ((x)->regs[8])
|
||||
#define PT_REGS_RET(x) ((x)->regs[31])
|
||||
#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_RC(x) ((x)->regs[2])
|
||||
#define PT_REGS_SP(x) ((x)->regs[29])
|
||||
#define PT_REGS_IP(x) ((x)->cp0_epc)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), regs[4])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), regs[5])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), regs[6])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), regs[7])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31])
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[2])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29])
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc)
|
||||
#define __PT_PARM1_REG regs[4]
|
||||
#define __PT_PARM2_REG regs[5]
|
||||
#define __PT_PARM3_REG regs[6]
|
||||
#define __PT_PARM4_REG regs[7]
|
||||
#define __PT_PARM5_REG regs[8]
|
||||
#define __PT_RET_REG regs[31]
|
||||
#define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define __PT_RC_REG regs[2]
|
||||
#define __PT_SP_REG regs[29]
|
||||
#define __PT_IP_REG cp0_epc
|
||||
|
||||
#elif defined(bpf_target_powerpc)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->gpr[3])
|
||||
#define PT_REGS_PARM2(x) ((x)->gpr[4])
|
||||
#define PT_REGS_PARM3(x) ((x)->gpr[5])
|
||||
#define PT_REGS_PARM4(x) ((x)->gpr[6])
|
||||
#define PT_REGS_PARM5(x) ((x)->gpr[7])
|
||||
#define PT_REGS_RC(x) ((x)->gpr[3])
|
||||
#define PT_REGS_SP(x) ((x)->sp)
|
||||
#define PT_REGS_IP(x) ((x)->nip)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), gpr[3])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), gpr[4])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), gpr[5])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), gpr[6])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), gpr[7])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), gpr[3])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), nip)
|
||||
#define __PT_PARM1_REG gpr[3]
|
||||
#define __PT_PARM2_REG gpr[4]
|
||||
#define __PT_PARM3_REG gpr[5]
|
||||
#define __PT_PARM4_REG gpr[6]
|
||||
#define __PT_PARM5_REG gpr[7]
|
||||
#define __PT_RET_REG regs[31]
|
||||
#define __PT_FP_REG __unsupported__
|
||||
#define __PT_RC_REG gpr[3]
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG nip
|
||||
|
||||
#elif defined(bpf_target_sparc)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
|
||||
#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
|
||||
#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
|
||||
#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
|
||||
#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
|
||||
#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
|
||||
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
|
||||
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I1])
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I2])
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I3])
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I4])
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I7])
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), u_regs[UREG_FP])
|
||||
|
||||
#define __PT_PARM1_REG u_regs[UREG_I0]
|
||||
#define __PT_PARM2_REG u_regs[UREG_I1]
|
||||
#define __PT_PARM3_REG u_regs[UREG_I2]
|
||||
#define __PT_PARM4_REG u_regs[UREG_I3]
|
||||
#define __PT_PARM5_REG u_regs[UREG_I4]
|
||||
#define __PT_RET_REG u_regs[UREG_I7]
|
||||
#define __PT_FP_REG __unsupported__
|
||||
#define __PT_RC_REG u_regs[UREG_I0]
|
||||
#define __PT_SP_REG u_regs[UREG_FP]
|
||||
/* Should this also be a bpf_target check for the sparc case? */
|
||||
#if defined(__arch64__)
|
||||
#define PT_REGS_IP(x) ((x)->tpc)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), tpc)
|
||||
#define __PT_IP_REG tpc
|
||||
#else
|
||||
#define PT_REGS_IP(x) ((x)->pc)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc)
|
||||
#define __PT_IP_REG pc
|
||||
#endif
|
||||
|
||||
#elif defined(bpf_target_riscv)
|
||||
|
||||
struct pt_regs;
|
||||
#define PT_REGS_RV const volatile struct user_regs_struct
|
||||
#define PT_REGS_PARM1(x) (((PT_REGS_RV *)(x))->a0)
|
||||
#define PT_REGS_PARM2(x) (((PT_REGS_RV *)(x))->a1)
|
||||
#define PT_REGS_PARM3(x) (((PT_REGS_RV *)(x))->a2)
|
||||
#define PT_REGS_PARM4(x) (((PT_REGS_RV *)(x))->a3)
|
||||
#define PT_REGS_PARM5(x) (((PT_REGS_RV *)(x))->a4)
|
||||
#define PT_REGS_RET(x) (((PT_REGS_RV *)(x))->ra)
|
||||
#define PT_REGS_FP(x) (((PT_REGS_RV *)(x))->s5)
|
||||
#define PT_REGS_RC(x) (((PT_REGS_RV *)(x))->a5)
|
||||
#define PT_REGS_SP(x) (((PT_REGS_RV *)(x))->sp)
|
||||
#define PT_REGS_IP(x) (((PT_REGS_RV *)(x))->epc)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a0)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a1)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a2)
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a3)
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a4)
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), ra)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), fp)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a5)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), sp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), epc)
|
||||
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
|
||||
#define __PT_PARM1_REG a0
|
||||
#define __PT_PARM2_REG a1
|
||||
#define __PT_PARM3_REG a2
|
||||
#define __PT_PARM4_REG a3
|
||||
#define __PT_PARM5_REG a4
|
||||
#define __PT_RET_REG ra
|
||||
#define __PT_FP_REG fp
|
||||
#define __PT_RC_REG a5
|
||||
#define __PT_SP_REG sp
|
||||
#define __PT_IP_REG epc
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(bpf_target_defined)
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
/* allow some architecutres to override `struct pt_regs` */
|
||||
#ifndef __PT_REGS_CAST
|
||||
#define __PT_REGS_CAST(x) (x)
|
||||
#endif
|
||||
|
||||
#define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
|
||||
#define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
|
||||
#define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
|
||||
#define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG)
|
||||
#define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
|
||||
#define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
|
||||
#define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
|
||||
#define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
|
||||
#define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG)
|
||||
#define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG)
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG)
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG)
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG)
|
||||
|
||||
#if defined(bpf_target_powerpc)
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
|
||||
#elif defined(bpf_target_sparc)
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
#elif defined(bpf_target_defined)
|
||||
|
||||
#else
|
||||
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
|
||||
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
|
||||
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
|
||||
({ bpf_probe_read_kernel(&(ip), sizeof(ip), \
|
||||
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
|
||||
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
|
||||
|
||||
#endif
|
||||
|
||||
#if !defined(bpf_target_defined)
|
||||
#else /* defined(bpf_target_defined) */
|
||||
|
||||
#define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
@@ -363,7 +290,7 @@ struct pt_regs;
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
|
||||
|
||||
#endif /* !defined(bpf_target_defined) */
|
||||
#endif /* defined(bpf_target_defined) */
|
||||
|
||||
#ifndef ___bpf_concat
|
||||
#define ___bpf_concat(a, b) a ## b
|
||||
@@ -375,25 +302,23 @@ struct pt_regs;
|
||||
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
|
||||
#endif
|
||||
#ifndef ___bpf_narg
|
||||
#define ___bpf_narg(...) \
|
||||
___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
#define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
#endif
|
||||
|
||||
#define ___bpf_ctx_cast0() ctx
|
||||
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
|
||||
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
|
||||
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
|
||||
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
|
||||
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
|
||||
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
|
||||
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
|
||||
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
|
||||
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
|
||||
#define ___bpf_ctx_cast0() ctx
|
||||
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
|
||||
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
|
||||
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
|
||||
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
|
||||
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
|
||||
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
|
||||
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
|
||||
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
|
||||
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
|
||||
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
|
||||
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
|
||||
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
|
||||
#define ___bpf_ctx_cast(args...) \
|
||||
___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
|
||||
#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
|
||||
@@ -426,19 +351,13 @@ ____##name(unsigned long long *ctx, ##args)
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
#define ___bpf_kprobe_args0() ctx
|
||||
#define ___bpf_kprobe_args1(x) \
|
||||
___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
|
||||
#define ___bpf_kprobe_args2(x, args...) \
|
||||
___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
|
||||
#define ___bpf_kprobe_args3(x, args...) \
|
||||
___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
|
||||
#define ___bpf_kprobe_args4(x, args...) \
|
||||
___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
|
||||
#define ___bpf_kprobe_args5(x, args...) \
|
||||
___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
|
||||
#define ___bpf_kprobe_args(args...) \
|
||||
___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
|
||||
#define ___bpf_kprobe_args0() ctx
|
||||
#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
|
||||
#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
|
||||
#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
|
||||
#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
|
||||
#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
|
||||
#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
|
||||
@@ -464,11 +383,9 @@ typeof(name(0)) name(struct pt_regs *ctx) \
|
||||
static __attribute__((always_inline)) typeof(name(0)) \
|
||||
____##name(struct pt_regs *ctx, ##args)
|
||||
|
||||
#define ___bpf_kretprobe_args0() ctx
|
||||
#define ___bpf_kretprobe_args1(x) \
|
||||
___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
|
||||
#define ___bpf_kretprobe_args(args...) \
|
||||
___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
|
||||
#define ___bpf_kretprobe_args0() ctx
|
||||
#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
|
||||
#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
|
||||
|
||||
/*
|
||||
* BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
|
||||
|
||||
@@ -313,12 +313,18 @@ LIBBPF_API struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
|
||||
*
|
||||
* The rest works just like in case of ___libbpf_override() usage with symbol
|
||||
* versioning.
|
||||
*
|
||||
* C++ compilers don't support __builtin_types_compatible_p(), so at least
|
||||
* don't screw up compilation for them and let C++ users pick btf_dump__new
|
||||
* vs btf_dump__new_deprecated explicitly.
|
||||
*/
|
||||
#ifndef __cplusplus
|
||||
#define btf_dump__new(a1, a2, a3, a4) __builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(a4), btf_dump_printf_fn_t) || \
|
||||
__builtin_types_compatible_p(typeof(a4), void(void *, const char *, va_list)), \
|
||||
btf_dump__new_deprecated((void *)a1, (void *)a2, (void *)a3, (void *)a4), \
|
||||
btf_dump__new((void *)a1, (void *)a2, (void *)a3, (void *)a4))
|
||||
#endif
|
||||
|
||||
LIBBPF_API void btf_dump__free(struct btf_dump *d);
|
||||
|
||||
|
||||
@@ -2321,8 +2321,8 @@ int btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
|
||||
if (!opts->indent_str)
|
||||
d->typed_dump->indent_str[0] = '\t';
|
||||
else
|
||||
strncat(d->typed_dump->indent_str, opts->indent_str,
|
||||
sizeof(d->typed_dump->indent_str) - 1);
|
||||
libbpf_strlcpy(d->typed_dump->indent_str, opts->indent_str,
|
||||
sizeof(d->typed_dump->indent_str));
|
||||
|
||||
d->typed_dump->compact = OPTS_GET(opts, compact, false);
|
||||
d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false);
|
||||
|
||||
@@ -371,8 +371,9 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (nr_progs != gen->nr_progs || nr_maps != gen->nr_maps) {
|
||||
pr_warn("progs/maps mismatch\n");
|
||||
if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) {
|
||||
pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
|
||||
nr_progs, gen->nr_progs, nr_maps, gen->nr_maps);
|
||||
gen->error = -EFAULT;
|
||||
return gen->error;
|
||||
}
|
||||
@@ -462,8 +463,7 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
attr.map_flags = map_attr->map_flags;
|
||||
attr.map_extra = map_attr->map_extra;
|
||||
if (map_name)
|
||||
memcpy(attr.map_name, map_name,
|
||||
min((unsigned)strlen(map_name), BPF_OBJ_NAME_LEN - 1));
|
||||
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
|
||||
attr.numa_node = map_attr->numa_node;
|
||||
attr.map_ifindex = map_attr->map_ifindex;
|
||||
attr.max_entries = max_entries;
|
||||
@@ -969,8 +969,7 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||
core_relos = add_data(gen, gen->core_relos,
|
||||
attr.core_relo_cnt * attr.core_relo_rec_size);
|
||||
|
||||
memcpy(attr.prog_name, prog_name,
|
||||
min((unsigned)strlen(prog_name), BPF_OBJ_NAME_LEN - 1));
|
||||
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
|
||||
prog_load_attr = add_data(gen, &attr, attr_size);
|
||||
|
||||
/* populate union bpf_attr with a pointer to license */
|
||||
|
||||
@@ -187,42 +187,6 @@ const char *libbpf_version_string(void)
|
||||
#undef __S
|
||||
}
|
||||
|
||||
enum kern_feature_id {
|
||||
/* v4.14: kernel support for program & map names. */
|
||||
FEAT_PROG_NAME,
|
||||
/* v5.2: kernel support for global data sections. */
|
||||
FEAT_GLOBAL_DATA,
|
||||
/* BTF support */
|
||||
FEAT_BTF,
|
||||
/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
|
||||
FEAT_BTF_FUNC,
|
||||
/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
|
||||
FEAT_BTF_DATASEC,
|
||||
/* BTF_FUNC_GLOBAL is supported */
|
||||
FEAT_BTF_GLOBAL_FUNC,
|
||||
/* BPF_F_MMAPABLE is supported for arrays */
|
||||
FEAT_ARRAY_MMAP,
|
||||
/* kernel support for expected_attach_type in BPF_PROG_LOAD */
|
||||
FEAT_EXP_ATTACH_TYPE,
|
||||
/* bpf_probe_read_{kernel,user}[_str] helpers */
|
||||
FEAT_PROBE_READ_KERN,
|
||||
/* BPF_PROG_BIND_MAP is supported */
|
||||
FEAT_PROG_BIND_MAP,
|
||||
/* Kernel support for module BTFs */
|
||||
FEAT_MODULE_BTF,
|
||||
/* BTF_KIND_FLOAT support */
|
||||
FEAT_BTF_FLOAT,
|
||||
/* BPF perf link support */
|
||||
FEAT_PERF_LINK,
|
||||
/* BTF_KIND_DECL_TAG support */
|
||||
FEAT_BTF_DECL_TAG,
|
||||
/* BTF_KIND_TYPE_TAG support */
|
||||
FEAT_BTF_TYPE_TAG,
|
||||
__FEAT_CNT,
|
||||
};
|
||||
|
||||
static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
|
||||
|
||||
enum reloc_type {
|
||||
RELO_LD64,
|
||||
RELO_CALL,
|
||||
@@ -831,11 +795,36 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __u32 get_kernel_version(void)
|
||||
__u32 get_kernel_version(void)
|
||||
{
|
||||
/* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release,
|
||||
* but Ubuntu provides /proc/version_signature file, as described at
|
||||
* https://ubuntu.com/kernel, with an example contents below, which we
|
||||
* can use to get a proper LINUX_VERSION_CODE.
|
||||
*
|
||||
* Ubuntu 5.4.0-12.15-generic 5.4.8
|
||||
*
|
||||
* In the above, 5.4.8 is what kernel is actually expecting, while
|
||||
* uname() call will return 5.4.0 in info.release.
|
||||
*/
|
||||
const char *ubuntu_kver_file = "/proc/version_signature";
|
||||
__u32 major, minor, patch;
|
||||
struct utsname info;
|
||||
|
||||
if (access(ubuntu_kver_file, R_OK) == 0) {
|
||||
FILE *f;
|
||||
|
||||
f = fopen(ubuntu_kver_file, "r");
|
||||
if (f) {
|
||||
if (fscanf(f, "%*s %*s %d.%d.%d\n", &major, &minor, &patch) == 3) {
|
||||
fclose(f);
|
||||
return KERNEL_VERSION(major, minor, patch);
|
||||
}
|
||||
fclose(f);
|
||||
}
|
||||
/* something went wrong, fall back to uname() approach */
|
||||
}
|
||||
|
||||
uname(&info);
|
||||
if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
|
||||
return 0;
|
||||
@@ -1201,12 +1190,10 @@ static struct bpf_object *bpf_object__new(const char *path,
|
||||
|
||||
strcpy(obj->path, path);
|
||||
if (obj_name) {
|
||||
strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
|
||||
obj->name[sizeof(obj->name) - 1] = 0;
|
||||
libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
|
||||
} else {
|
||||
/* Using basename() GNU version which doesn't modify arg. */
|
||||
strncpy(obj->name, basename((void *)path),
|
||||
sizeof(obj->name) - 1);
|
||||
libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
|
||||
end = strchr(obj->name, '.');
|
||||
if (end)
|
||||
*end = 0;
|
||||
@@ -1358,7 +1345,10 @@ static int bpf_object__check_endianness(struct bpf_object *obj)
|
||||
static int
|
||||
bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
|
||||
{
|
||||
memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
|
||||
/* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
|
||||
* go over allowed ELF data section buffer
|
||||
*/
|
||||
libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
|
||||
pr_debug("license of %s is %s\n", obj->path, obj->license);
|
||||
return 0;
|
||||
}
|
||||
@@ -4354,6 +4344,10 @@ bpf_object__probe_loading(struct bpf_object *obj)
|
||||
if (obj->gen_loader)
|
||||
return 0;
|
||||
|
||||
ret = bump_rlimit_memlock();
|
||||
if (ret)
|
||||
pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
|
||||
|
||||
/* make sure basic loading works */
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
|
||||
if (ret < 0)
|
||||
@@ -4720,14 +4714,17 @@ static struct kern_feature_desc {
|
||||
[FEAT_BTF_TYPE_TAG] = {
|
||||
"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
|
||||
},
|
||||
[FEAT_MEMCG_ACCOUNT] = {
|
||||
"memcg-based memory accounting", probe_memcg_account,
|
||||
},
|
||||
};
|
||||
|
||||
static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
|
||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
|
||||
{
|
||||
struct kern_feature_desc *feat = &feature_probes[feat_id];
|
||||
int ret;
|
||||
|
||||
if (obj->gen_loader)
|
||||
if (obj && obj->gen_loader)
|
||||
/* To generate loader program assume the latest kernel
|
||||
* to avoid doing extra prog_load, map_create syscalls.
|
||||
*/
|
||||
|
||||
@@ -227,6 +227,7 @@ struct btf;
|
||||
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
|
||||
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__find_program_by_name() instead")
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__find_program_by_title(const struct bpf_object *obj,
|
||||
const char *title);
|
||||
@@ -339,7 +340,31 @@ LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated"
|
||||
LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
|
||||
const char *path,
|
||||
int instance);
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__pin()** pins the BPF program to a file
|
||||
* in the BPF FS specified by a path. This increments the programs
|
||||
* reference count, allowing it to stay loaded after the process
|
||||
* which loaded it has exited.
|
||||
*
|
||||
* @param prog BPF program to pin, must already be loaded
|
||||
* @param path file path in a BPF file system
|
||||
* @return 0, on success; negative error code, otherwise
|
||||
*/
|
||||
LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__unpin()** unpins the BPF program from a file
|
||||
* in the BPFFS specified by a path. This decrements the programs
|
||||
* reference count.
|
||||
*
|
||||
* The file pinning the BPF program can also be unlinked by a different
|
||||
* process in which case this function will return an error.
|
||||
*
|
||||
* @param prog BPF program to unpin
|
||||
* @param path file path to the pin in a BPF file system
|
||||
* @return 0, on success; negative error code, otherwise
|
||||
*/
|
||||
LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path);
|
||||
LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
|
||||
|
||||
@@ -1027,13 +1052,57 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
|
||||
* user, causing subsequent probes to fail. In this case, the caller may want
|
||||
* to adjust that limit with setrlimit().
|
||||
*/
|
||||
LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
|
||||
__u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_prog_type() instead")
|
||||
LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_map_type() instead")
|
||||
LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
|
||||
LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
|
||||
enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_helper() instead")
|
||||
LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, __u32 ifindex);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 8, "implement your own or use bpftool for feature detection")
|
||||
LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex);
|
||||
|
||||
/**
|
||||
* @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports
|
||||
* BPF programs of a given type.
|
||||
* @param prog_type BPF program type to detect kernel support for
|
||||
* @param opts reserved for future extensibility, should be NULL
|
||||
* @return 1, if given program type is supported; 0, if given program type is
|
||||
* not supported; negative error code if feature detection failed or can't be
|
||||
* performed
|
||||
*
|
||||
* Make sure the process has required set of CAP_* permissions (or runs as
|
||||
* root) when performing feature checking.
|
||||
*/
|
||||
LIBBPF_API int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts);
|
||||
/**
|
||||
* @brief **libbpf_probe_bpf_map_type()** detects if host kernel supports
|
||||
* BPF maps of a given type.
|
||||
* @param map_type BPF map type to detect kernel support for
|
||||
* @param opts reserved for future extensibility, should be NULL
|
||||
* @return 1, if given map type is supported; 0, if given map type is
|
||||
* not supported; negative error code if feature detection failed or can't be
|
||||
* performed
|
||||
*
|
||||
* Make sure the process has required set of CAP_* permissions (or runs as
|
||||
* root) when performing feature checking.
|
||||
*/
|
||||
LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts);
|
||||
/**
|
||||
* @brief **libbpf_probe_bpf_helper()** detects if host kernel supports the
|
||||
* use of a given BPF helper from specified BPF program type.
|
||||
* @param prog_type BPF program type used to check the support of BPF helper
|
||||
* @param helper_id BPF helper ID (enum bpf_func_id) to check support for
|
||||
* @param opts reserved for future extensibility, should be NULL
|
||||
* @return 1, if given combination of program type and helper is supported; 0,
|
||||
* if the combination is not supported; negative error code if feature
|
||||
* detection for provided input arguments failed or can't be performed
|
||||
*
|
||||
* Make sure the process has required set of CAP_* permissions (or runs as
|
||||
* root) when performing feature checking.
|
||||
*/
|
||||
LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type,
|
||||
enum bpf_func_id helper_id, const void *opts);
|
||||
|
||||
/*
|
||||
* Get bpf_prog_info in continuous memory
|
||||
*
|
||||
|
||||
@@ -427,4 +427,8 @@ LIBBPF_0.7.0 {
|
||||
bpf_program__log_level;
|
||||
bpf_program__set_log_buf;
|
||||
bpf_program__set_log_level;
|
||||
libbpf_probe_bpf_helper;
|
||||
libbpf_probe_bpf_map_type;
|
||||
libbpf_probe_bpf_prog_type;
|
||||
libbpf_set_memlock_rlim_max;
|
||||
};
|
||||
|
||||
@@ -169,6 +169,27 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
|
||||
return realloc(ptr, total);
|
||||
}
|
||||
|
||||
/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst
|
||||
* is zero-terminated string no matter what (unless sz == 0, in which case
|
||||
* it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs
|
||||
* in what is returned. Given this is internal helper, it's trivial to extend
|
||||
* this, when necessary. Use this instead of strncpy inside libbpf source code.
|
||||
*/
|
||||
static inline void libbpf_strlcpy(char *dst, const char *src, size_t sz)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
if (sz == 0)
|
||||
return;
|
||||
|
||||
sz--;
|
||||
for (i = 0; i < sz && src[i]; i++)
|
||||
dst[i] = src[i];
|
||||
dst[i] = '\0';
|
||||
}
|
||||
|
||||
__u32 get_kernel_version(void);
|
||||
|
||||
struct btf;
|
||||
struct btf_type;
|
||||
|
||||
@@ -272,6 +293,45 @@ static inline bool libbpf_validate_opts(const char *opts,
|
||||
(opts)->sz - __off); \
|
||||
})
|
||||
|
||||
enum kern_feature_id {
|
||||
/* v4.14: kernel support for program & map names. */
|
||||
FEAT_PROG_NAME,
|
||||
/* v5.2: kernel support for global data sections. */
|
||||
FEAT_GLOBAL_DATA,
|
||||
/* BTF support */
|
||||
FEAT_BTF,
|
||||
/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
|
||||
FEAT_BTF_FUNC,
|
||||
/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
|
||||
FEAT_BTF_DATASEC,
|
||||
/* BTF_FUNC_GLOBAL is supported */
|
||||
FEAT_BTF_GLOBAL_FUNC,
|
||||
/* BPF_F_MMAPABLE is supported for arrays */
|
||||
FEAT_ARRAY_MMAP,
|
||||
/* kernel support for expected_attach_type in BPF_PROG_LOAD */
|
||||
FEAT_EXP_ATTACH_TYPE,
|
||||
/* bpf_probe_read_{kernel,user}[_str] helpers */
|
||||
FEAT_PROBE_READ_KERN,
|
||||
/* BPF_PROG_BIND_MAP is supported */
|
||||
FEAT_PROG_BIND_MAP,
|
||||
/* Kernel support for module BTFs */
|
||||
FEAT_MODULE_BTF,
|
||||
/* BTF_KIND_FLOAT support */
|
||||
FEAT_BTF_FLOAT,
|
||||
/* BPF perf link support */
|
||||
FEAT_PERF_LINK,
|
||||
/* BTF_KIND_DECL_TAG support */
|
||||
FEAT_BTF_DECL_TAG,
|
||||
/* BTF_KIND_TYPE_TAG support */
|
||||
FEAT_BTF_TYPE_TAG,
|
||||
/* memcg-based accounting for BPF maps and progs */
|
||||
FEAT_MEMCG_ACCOUNT,
|
||||
__FEAT_CNT,
|
||||
};
|
||||
|
||||
int probe_memcg_account(void);
|
||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
|
||||
int bump_rlimit_memlock(void);
|
||||
|
||||
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
|
||||
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
|
||||
|
||||
@@ -45,7 +45,6 @@ enum libbpf_strict_mode {
|
||||
* (positive) error code.
|
||||
*/
|
||||
LIBBPF_STRICT_DIRECT_ERRS = 0x02,
|
||||
|
||||
/*
|
||||
* Enforce strict BPF program section (SEC()) names.
|
||||
* E.g., while prefiously SEC("xdp_whatever") or SEC("perf_event_blah") were
|
||||
@@ -63,6 +62,17 @@ enum libbpf_strict_mode {
|
||||
* Clients can maintain it on their own if it is valuable for them.
|
||||
*/
|
||||
LIBBPF_STRICT_NO_OBJECT_LIST = 0x08,
|
||||
/*
|
||||
* Automatically bump RLIMIT_MEMLOCK using setrlimit() before the
|
||||
* first BPF program or map creation operation. This is done only if
|
||||
* kernel is too old to support memcg-based memory accounting for BPF
|
||||
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
|
||||
* but it can be overriden with libbpf_set_memlock_rlim_max() API.
|
||||
* Note that libbpf_set_memlock_rlim_max() needs to be called before
|
||||
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
|
||||
* operation.
|
||||
*/
|
||||
LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK = 0x10,
|
||||
|
||||
__LIBBPF_STRICT_LAST,
|
||||
};
|
||||
|
||||
@@ -48,28 +48,20 @@ static int get_vendor_id(int ifindex)
|
||||
return strtol(buf, NULL, 0);
|
||||
}
|
||||
|
||||
static int get_kernel_version(void)
|
||||
static int probe_prog_load(enum bpf_prog_type prog_type,
|
||||
const struct bpf_insn *insns, size_t insns_cnt,
|
||||
char *log_buf, size_t log_buf_sz,
|
||||
__u32 ifindex)
|
||||
{
|
||||
int version, subversion, patchlevel;
|
||||
struct utsname utsn;
|
||||
|
||||
/* Return 0 on failure, and attempt to probe with empty kversion */
|
||||
if (uname(&utsn))
|
||||
return 0;
|
||||
|
||||
if (sscanf(utsn.release, "%d.%d.%d",
|
||||
&version, &subversion, &patchlevel) != 3)
|
||||
return 0;
|
||||
|
||||
return (version << 16) + (subversion << 8) + patchlevel;
|
||||
}
|
||||
|
||||
static void
|
||||
probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
|
||||
size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts);
|
||||
int fd;
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, opts,
|
||||
.log_buf = log_buf,
|
||||
.log_size = log_buf_sz,
|
||||
.log_level = log_buf ? 1 : 0,
|
||||
.prog_ifindex = ifindex,
|
||||
);
|
||||
int fd, err, exp_err = 0;
|
||||
const char *exp_msg = NULL;
|
||||
char buf[4096];
|
||||
|
||||
switch (prog_type) {
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
|
||||
@@ -84,6 +76,38 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
|
||||
case BPF_PROG_TYPE_KPROBE:
|
||||
opts.kern_version = get_kernel_version();
|
||||
break;
|
||||
case BPF_PROG_TYPE_LIRC_MODE2:
|
||||
opts.expected_attach_type = BPF_LIRC_MODE2;
|
||||
break;
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
opts.log_buf = buf;
|
||||
opts.log_size = sizeof(buf);
|
||||
opts.log_level = 1;
|
||||
if (prog_type == BPF_PROG_TYPE_TRACING)
|
||||
opts.expected_attach_type = BPF_TRACE_FENTRY;
|
||||
else
|
||||
opts.expected_attach_type = BPF_MODIFY_RETURN;
|
||||
opts.attach_btf_id = 1;
|
||||
|
||||
exp_err = -EINVAL;
|
||||
exp_msg = "attach_btf_id 1 is not a function";
|
||||
break;
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
opts.log_buf = buf;
|
||||
opts.log_size = sizeof(buf);
|
||||
opts.log_level = 1;
|
||||
opts.attach_btf_id = 1;
|
||||
|
||||
exp_err = -EINVAL;
|
||||
exp_msg = "Cannot replace kernel functions";
|
||||
break;
|
||||
case BPF_PROG_TYPE_SYSCALL:
|
||||
opts.prog_flags = BPF_F_SLEEPABLE;
|
||||
break;
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
exp_err = -524; /* -ENOTSUPP */
|
||||
break;
|
||||
case BPF_PROG_TYPE_UNSPEC:
|
||||
case BPF_PROG_TYPE_SOCKET_FILTER:
|
||||
case BPF_PROG_TYPE_SCHED_CLS:
|
||||
@@ -103,25 +127,42 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
|
||||
case BPF_PROG_TYPE_RAW_TRACEPOINT:
|
||||
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
|
||||
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
|
||||
case BPF_PROG_TYPE_LIRC_MODE2:
|
||||
case BPF_PROG_TYPE_SK_REUSEPORT:
|
||||
case BPF_PROG_TYPE_FLOW_DISSECTOR:
|
||||
case BPF_PROG_TYPE_CGROUP_SYSCTL:
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
default:
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
opts.prog_ifindex = ifindex;
|
||||
opts.log_buf = buf;
|
||||
opts.log_size = buf_len;
|
||||
|
||||
fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, NULL);
|
||||
fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
|
||||
err = -errno;
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
if (exp_err) {
|
||||
if (fd >= 0 || err != exp_err)
|
||||
return 0;
|
||||
if (exp_msg && !strstr(buf, exp_msg))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
return fd >= 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
|
||||
{
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
const size_t insn_cnt = ARRAY_SIZE(insns);
|
||||
int ret;
|
||||
|
||||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0, 0);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
|
||||
@@ -131,12 +172,16 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
|
||||
BPF_EXIT_INSN()
|
||||
};
|
||||
|
||||
/* prefer libbpf_probe_bpf_prog_type() unless offload is requested */
|
||||
if (ifindex == 0)
|
||||
return libbpf_probe_bpf_prog_type(prog_type, NULL) == 1;
|
||||
|
||||
if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
|
||||
/* nfp returns -EINVAL on exit(0) with TC offload */
|
||||
insns[0].imm = 2;
|
||||
|
||||
errno = 0;
|
||||
probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
|
||||
probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
|
||||
|
||||
return errno != EINVAL && errno != EOPNOTSUPP;
|
||||
}
|
||||
@@ -197,16 +242,18 @@ static int load_local_storage_btf(void)
|
||||
strs, sizeof(strs));
|
||||
}
|
||||
|
||||
bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
|
||||
{
|
||||
int key_size, value_size, max_entries, map_flags;
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
int key_size, value_size, max_entries;
|
||||
__u32 btf_key_type_id = 0, btf_value_type_id = 0;
|
||||
int fd = -1, btf_fd = -1, fd_inner;
|
||||
int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err;
|
||||
|
||||
opts.map_ifindex = ifindex;
|
||||
|
||||
key_size = sizeof(__u32);
|
||||
value_size = sizeof(__u32);
|
||||
max_entries = 1;
|
||||
map_flags = 0;
|
||||
|
||||
switch (map_type) {
|
||||
case BPF_MAP_TYPE_STACK_TRACE:
|
||||
@@ -215,7 +262,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
case BPF_MAP_TYPE_LPM_TRIE:
|
||||
key_size = sizeof(__u64);
|
||||
value_size = sizeof(__u64);
|
||||
map_flags = BPF_F_NO_PREALLOC;
|
||||
opts.map_flags = BPF_F_NO_PREALLOC;
|
||||
break;
|
||||
case BPF_MAP_TYPE_CGROUP_STORAGE:
|
||||
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
|
||||
@@ -234,17 +281,25 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
btf_value_type_id = 3;
|
||||
value_size = 8;
|
||||
max_entries = 0;
|
||||
map_flags = BPF_F_NO_PREALLOC;
|
||||
opts.map_flags = BPF_F_NO_PREALLOC;
|
||||
btf_fd = load_local_storage_btf();
|
||||
if (btf_fd < 0)
|
||||
return false;
|
||||
return btf_fd;
|
||||
break;
|
||||
case BPF_MAP_TYPE_RINGBUF:
|
||||
key_size = 0;
|
||||
value_size = 0;
|
||||
max_entries = 4096;
|
||||
break;
|
||||
case BPF_MAP_TYPE_UNSPEC:
|
||||
case BPF_MAP_TYPE_STRUCT_OPS:
|
||||
/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
|
||||
opts.btf_vmlinux_value_type_id = 1;
|
||||
exp_err = -524; /* -ENOTSUPP */
|
||||
break;
|
||||
case BPF_MAP_TYPE_BLOOM_FILTER:
|
||||
key_size = 0;
|
||||
max_entries = 1;
|
||||
break;
|
||||
case BPF_MAP_TYPE_HASH:
|
||||
case BPF_MAP_TYPE_ARRAY:
|
||||
case BPF_MAP_TYPE_PROG_ARRAY:
|
||||
@@ -263,49 +318,114 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
case BPF_MAP_TYPE_SOCKHASH:
|
||||
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
|
||||
case BPF_MAP_TYPE_STRUCT_OPS:
|
||||
default:
|
||||
break;
|
||||
case BPF_MAP_TYPE_UNSPEC:
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
|
||||
map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
|
||||
/* TODO: probe for device, once libbpf has a function to create
|
||||
* map-in-map for offload
|
||||
*/
|
||||
if (ifindex)
|
||||
return false;
|
||||
goto cleanup;
|
||||
|
||||
fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
|
||||
sizeof(__u32), sizeof(__u32), 1, NULL);
|
||||
if (fd_inner < 0)
|
||||
return false;
|
||||
goto cleanup;
|
||||
|
||||
opts.inner_map_fd = fd_inner;
|
||||
fd = bpf_map_create(map_type, NULL, sizeof(__u32), sizeof(__u32), 1, &opts);
|
||||
close(fd_inner);
|
||||
} else {
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts);
|
||||
|
||||
/* Note: No other restriction on map type probes for offload */
|
||||
opts.map_flags = map_flags;
|
||||
opts.map_ifindex = ifindex;
|
||||
if (btf_fd >= 0) {
|
||||
opts.btf_fd = btf_fd;
|
||||
opts.btf_key_type_id = btf_key_type_id;
|
||||
opts.btf_value_type_id = btf_value_type_id;
|
||||
}
|
||||
|
||||
fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
|
||||
}
|
||||
|
||||
if (btf_fd >= 0) {
|
||||
opts.btf_fd = btf_fd;
|
||||
opts.btf_key_type_id = btf_key_type_id;
|
||||
opts.btf_value_type_id = btf_value_type_id;
|
||||
}
|
||||
|
||||
fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
|
||||
err = -errno;
|
||||
|
||||
cleanup:
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
if (fd_inner >= 0)
|
||||
close(fd_inner);
|
||||
if (btf_fd >= 0)
|
||||
close(btf_fd);
|
||||
|
||||
return fd >= 0;
|
||||
if (exp_err)
|
||||
return fd < 0 && err == exp_err ? 1 : 0;
|
||||
else
|
||||
return fd >= 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
ret = probe_map_create(map_type, 0);
|
||||
return libbpf_err(ret);
|
||||
}
|
||||
|
||||
bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
|
||||
{
|
||||
return probe_map_create(map_type, ifindex) == 1;
|
||||
}
|
||||
|
||||
int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id,
|
||||
const void *opts)
|
||||
{
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_EMIT_CALL((__u32)helper_id),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
const size_t insn_cnt = ARRAY_SIZE(insns);
|
||||
char buf[4096];
|
||||
int ret;
|
||||
|
||||
if (opts)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
/* we can't successfully load all prog types to check for BPF helper
|
||||
* support, so bail out with -EOPNOTSUPP error
|
||||
*/
|
||||
switch (prog_type) {
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
case BPF_PROG_TYPE_EXT:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
return -EOPNOTSUPP;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
buf[0] = '\0';
|
||||
ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf), 0);
|
||||
if (ret < 0)
|
||||
return libbpf_err(ret);
|
||||
|
||||
/* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id)
|
||||
* at all, it will emit something like "invalid func unknown#181".
|
||||
* If BPF verifier recognizes BPF helper but it's not supported for
|
||||
* given BPF program type, it will emit "unknown func bpf_sys_bpf#166".
|
||||
* In both cases, provided combination of BPF program type and BPF
|
||||
* helper is not supported by the kernel.
|
||||
* In all other cases, probe_prog_load() above will either succeed (e.g.,
|
||||
* because BPF helper happens to accept no input arguments or it
|
||||
* accepts one input argument and initial PTR_TO_CTX is fine for
|
||||
* that), or we'll get some more specific BPF verifier error about
|
||||
* some unsatisfied conditions.
|
||||
*/
|
||||
if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ")))
|
||||
return 0;
|
||||
return 1; /* assume supported */
|
||||
}
|
||||
|
||||
bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
|
||||
@@ -318,8 +438,7 @@ bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
|
||||
char buf[4096] = {};
|
||||
bool res;
|
||||
|
||||
probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
|
||||
ifindex);
|
||||
probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), ifindex);
|
||||
res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
|
||||
|
||||
if (ifindex) {
|
||||
@@ -351,8 +470,8 @@ bool bpf_probe_large_insn_limit(__u32 ifindex)
|
||||
insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
|
||||
|
||||
errno = 0;
|
||||
probe_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
|
||||
ifindex);
|
||||
probe_prog_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
|
||||
ifindex);
|
||||
|
||||
return errno != E2BIG && errno != EINVAL;
|
||||
}
|
||||
|
||||
@@ -709,10 +709,14 @@ static int bpf_core_calc_field_relo(const char *prog_name,
|
||||
|
||||
static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
||||
const struct bpf_core_spec *spec,
|
||||
__u32 *val)
|
||||
__u32 *val, bool *validate)
|
||||
{
|
||||
__s64 sz;
|
||||
|
||||
/* by default, always check expected value in bpf_insn */
|
||||
if (validate)
|
||||
*validate = true;
|
||||
|
||||
/* type-based relos return zero when target type is not found */
|
||||
if (!spec) {
|
||||
*val = 0;
|
||||
@@ -722,6 +726,11 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
|
||||
switch (relo->kind) {
|
||||
case BPF_CORE_TYPE_ID_TARGET:
|
||||
*val = spec->root_type_id;
|
||||
/* type ID, embedded in bpf_insn, might change during linking,
|
||||
* so enforcing it is pointless
|
||||
*/
|
||||
if (validate)
|
||||
*validate = false;
|
||||
break;
|
||||
case BPF_CORE_TYPE_EXISTS:
|
||||
*val = 1;
|
||||
@@ -861,8 +870,8 @@ static int bpf_core_calc_relo(const char *prog_name,
|
||||
res->fail_memsz_adjust = true;
|
||||
}
|
||||
} else if (core_relo_is_type_based(relo->kind)) {
|
||||
err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
|
||||
err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
|
||||
err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate);
|
||||
err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL);
|
||||
} else if (core_relo_is_enumval_based(relo->kind)) {
|
||||
err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
|
||||
err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
|
||||
@@ -1213,7 +1222,9 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
|
||||
/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
|
||||
if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
|
||||
targ_res.validate = true;
|
||||
/* bpf_insn's imm value could get out of sync during linking */
|
||||
memset(&targ_res, 0, sizeof(targ_res));
|
||||
targ_res.validate = false;
|
||||
targ_res.poison = false;
|
||||
targ_res.orig_val = local_spec->root_type_id;
|
||||
targ_res.new_val = local_spec->root_type_id;
|
||||
@@ -1227,7 +1238,6 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0, j = 0; i < cands->len; i++) {
|
||||
err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
|
||||
cands->cands[i].id, cand_spec);
|
||||
|
||||
@@ -548,8 +548,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
|
||||
return -errno;
|
||||
|
||||
ifr.ifr_data = (void *)&channels;
|
||||
memcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ - 1);
|
||||
ifr.ifr_name[IFNAMSIZ - 1] = '\0';
|
||||
libbpf_strlcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ);
|
||||
err = ioctl(fd, SIOCETHTOOL, &ifr);
|
||||
if (err && errno != EOPNOTSUPP) {
|
||||
ret = -errno;
|
||||
@@ -768,8 +767,7 @@ static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
|
||||
}
|
||||
|
||||
ctx->ifindex = ifindex;
|
||||
memcpy(ctx->ifname, ifname, IFNAMSIZ -1);
|
||||
ctx->ifname[IFNAMSIZ - 1] = 0;
|
||||
libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
|
||||
|
||||
xsk->ctx = ctx;
|
||||
xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
|
||||
@@ -951,8 +949,7 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
|
||||
ctx->refcount = 1;
|
||||
ctx->umem = umem;
|
||||
ctx->queue_id = queue_id;
|
||||
memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
|
||||
ctx->ifname[IFNAMSIZ - 1] = '\0';
|
||||
libbpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);
|
||||
|
||||
ctx->fill = fill;
|
||||
ctx->comp = comp;
|
||||
|
||||
@@ -3257,10 +3257,21 @@ static void trace__set_bpf_map_syscalls(struct trace *trace)
|
||||
|
||||
static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
|
||||
{
|
||||
struct bpf_program *pos, *prog = NULL;
|
||||
const char *sec_name;
|
||||
|
||||
if (trace->bpf_obj == NULL)
|
||||
return NULL;
|
||||
|
||||
return bpf_object__find_program_by_title(trace->bpf_obj, name);
|
||||
bpf_object__for_each_program(pos, trace->bpf_obj) {
|
||||
sec_name = bpf_program__section_name(pos);
|
||||
if (sec_name && !strcmp(sec_name, name)) {
|
||||
prog = pos;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return prog;
|
||||
}
|
||||
|
||||
static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
|
||||
|
||||
@@ -87,7 +87,18 @@ LLVM_STRIP ?= llvm-strip
|
||||
|
||||
ifeq ($(CC_NO_CLANG), 1)
|
||||
EXTRA_WARNINGS += -Wstrict-aliasing=3
|
||||
endif
|
||||
|
||||
else ifneq ($(CROSS_COMPILE),)
|
||||
CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
|
||||
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc))
|
||||
ifneq ($(GCC_TOOLCHAIN_DIR),)
|
||||
CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
|
||||
CLANG_CROSS_FLAGS += --sysroot=$(shell $(CROSS_COMPILE)gcc -print-sysroot)
|
||||
CLANG_CROSS_FLAGS += --gcc-toolchain=$(realpath $(GCC_TOOLCHAIN_DIR)/..)
|
||||
endif # GCC_TOOLCHAIN_DIR
|
||||
CFLAGS += $(CLANG_CROSS_FLAGS)
|
||||
AFLAGS += $(CLANG_CROSS_FLAGS)
|
||||
endif # CROSS_COMPILE
|
||||
|
||||
# Hack to avoid type-punned warnings on old systems such as RHEL5:
|
||||
# We should be changing CFLAGS and checking gcc version, but this
|
||||
|
||||
@@ -170,7 +170,7 @@ $(OUTPUT)/%:%.c
|
||||
|
||||
$(OUTPUT)/urandom_read: urandom_read.c
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(CC) $(LDFLAGS) $< $(LDLIBS) -Wl,--build-id=sha1 -o $@
|
||||
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $< $(LDLIBS) -Wl,--build-id=sha1 -o $@
|
||||
|
||||
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
|
||||
$(call msg,MOD,,$@)
|
||||
@@ -217,7 +217,7 @@ BPFTOOL ?= $(DEFAULT_BPFTOOL)
|
||||
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
|
||||
$(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
|
||||
CC=$(HOSTCC) LD=$(HOSTLD) \
|
||||
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \
|
||||
EXTRA_CFLAGS='-g -O0' \
|
||||
OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
|
||||
LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \
|
||||
@@ -248,7 +248,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
|
||||
$(APIDIR)/linux/bpf.h \
|
||||
| $(HOST_BUILD_DIR)/libbpf
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
|
||||
EXTRA_CFLAGS='-g -O0' \
|
||||
EXTRA_CFLAGS='-g -O0' ARCH= CROSS_COMPILE= \
|
||||
OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \
|
||||
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
|
||||
endif
|
||||
@@ -537,6 +537,7 @@ $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \
|
||||
$(OUTPUT)/perfbuf_bench.skel.h
|
||||
$(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_bench.skel.h
|
||||
$(OUTPUT)/bench_bpf_loop.o: $(OUTPUT)/bpf_loop_bench.skel.h
|
||||
$(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h
|
||||
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
|
||||
$(OUTPUT)/bench: LDLIBS += -lm
|
||||
$(OUTPUT)/bench: $(OUTPUT)/bench.o \
|
||||
@@ -547,9 +548,10 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
|
||||
$(OUTPUT)/bench_trigger.o \
|
||||
$(OUTPUT)/bench_ringbufs.o \
|
||||
$(OUTPUT)/bench_bloom_filter_map.o \
|
||||
$(OUTPUT)/bench_bpf_loop.o
|
||||
$(OUTPUT)/bench_bpf_loop.o \
|
||||
$(OUTPUT)/bench_strncmp.o
|
||||
$(call msg,BINARY,,$@)
|
||||
$(Q)$(CC) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
|
||||
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
|
||||
|
||||
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
|
||||
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
|
||||
|
||||
@@ -29,26 +29,10 @@ static int libbpf_print_fn(enum libbpf_print_level level,
|
||||
return vfprintf(stderr, format, args);
|
||||
}
|
||||
|
||||
static int bump_memlock_rlimit(void)
|
||||
void setup_libbpf(void)
|
||||
{
|
||||
struct rlimit rlim_new = {
|
||||
.rlim_cur = RLIM_INFINITY,
|
||||
.rlim_max = RLIM_INFINITY,
|
||||
};
|
||||
|
||||
return setrlimit(RLIMIT_MEMLOCK, &rlim_new);
|
||||
}
|
||||
|
||||
void setup_libbpf()
|
||||
{
|
||||
int err;
|
||||
|
||||
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
|
||||
libbpf_set_print(libbpf_print_fn);
|
||||
|
||||
err = bump_memlock_rlimit();
|
||||
if (err)
|
||||
fprintf(stderr, "failed to increase RLIMIT_MEMLOCK: %d", err);
|
||||
}
|
||||
|
||||
void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns)
|
||||
@@ -205,11 +189,13 @@ static const struct argp_option opts[] = {
|
||||
extern struct argp bench_ringbufs_argp;
|
||||
extern struct argp bench_bloom_map_argp;
|
||||
extern struct argp bench_bpf_loop_argp;
|
||||
extern struct argp bench_strncmp_argp;
|
||||
|
||||
static const struct argp_child bench_parsers[] = {
|
||||
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
|
||||
{ &bench_bloom_map_argp, 0, "Bloom filter map benchmark", 0 },
|
||||
{ &bench_bpf_loop_argp, 0, "bpf_loop helper benchmark", 0 },
|
||||
{ &bench_strncmp_argp, 0, "bpf_strncmp helper benchmark", 0 },
|
||||
{},
|
||||
};
|
||||
|
||||
@@ -409,6 +395,8 @@ extern const struct bench bench_bloom_false_positive;
|
||||
extern const struct bench bench_hashmap_without_bloom;
|
||||
extern const struct bench bench_hashmap_with_bloom;
|
||||
extern const struct bench bench_bpf_loop;
|
||||
extern const struct bench bench_strncmp_no_helper;
|
||||
extern const struct bench bench_strncmp_helper;
|
||||
|
||||
static const struct bench *benchs[] = {
|
||||
&bench_count_global,
|
||||
@@ -441,6 +429,8 @@ static const struct bench *benchs[] = {
|
||||
&bench_hashmap_without_bloom,
|
||||
&bench_hashmap_with_bloom,
|
||||
&bench_bpf_loop,
|
||||
&bench_strncmp_no_helper,
|
||||
&bench_strncmp_helper,
|
||||
};
|
||||
|
||||
static void setup_benchmark()
|
||||
|
||||
@@ -38,8 +38,8 @@ struct bench_res {
|
||||
|
||||
struct bench {
|
||||
const char *name;
|
||||
void (*validate)();
|
||||
void (*setup)();
|
||||
void (*validate)(void);
|
||||
void (*setup)(void);
|
||||
void *(*producer_thread)(void *ctx);
|
||||
void *(*consumer_thread)(void *ctx);
|
||||
void (*measure)(struct bench_res* res);
|
||||
@@ -54,7 +54,7 @@ struct counter {
|
||||
extern struct env env;
|
||||
extern const struct bench *bench;
|
||||
|
||||
void setup_libbpf();
|
||||
void setup_libbpf(void);
|
||||
void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns);
|
||||
void hits_drops_report_final(struct bench_res res[], int res_cnt);
|
||||
void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns);
|
||||
@@ -62,7 +62,8 @@ void false_hits_report_final(struct bench_res res[], int res_cnt);
|
||||
void ops_report_progress(int iter, struct bench_res *res, long delta_ns);
|
||||
void ops_report_final(struct bench_res res[], int res_cnt);
|
||||
|
||||
static inline __u64 get_time_ns() {
|
||||
static inline __u64 get_time_ns(void)
|
||||
{
|
||||
struct timespec t;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &t);
|
||||
|
||||
@@ -36,7 +36,7 @@ static struct count_local_ctx {
|
||||
struct counter *hits;
|
||||
} count_local_ctx;
|
||||
|
||||
static void count_local_setup()
|
||||
static void count_local_setup(void)
|
||||
{
|
||||
struct count_local_ctx *ctx = &count_local_ctx;
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ static struct ctx {
|
||||
int fd;
|
||||
} ctx;
|
||||
|
||||
static void validate()
|
||||
static void validate(void)
|
||||
{
|
||||
if (env.producer_cnt != 1) {
|
||||
fprintf(stderr, "benchmark doesn't support multi-producer!\n");
|
||||
@@ -43,7 +43,7 @@ static void measure(struct bench_res *res)
|
||||
res->hits = atomic_swap(&ctx.hits.value, 0);
|
||||
}
|
||||
|
||||
static void setup_ctx()
|
||||
static void setup_ctx(void)
|
||||
{
|
||||
setup_libbpf();
|
||||
|
||||
@@ -71,36 +71,36 @@ static void attach_bpf(struct bpf_program *prog)
|
||||
}
|
||||
}
|
||||
|
||||
static void setup_base()
|
||||
static void setup_base(void)
|
||||
{
|
||||
setup_ctx();
|
||||
}
|
||||
|
||||
static void setup_kprobe()
|
||||
static void setup_kprobe(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.prog1);
|
||||
}
|
||||
|
||||
static void setup_kretprobe()
|
||||
static void setup_kretprobe(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.prog2);
|
||||
}
|
||||
|
||||
static void setup_rawtp()
|
||||
static void setup_rawtp(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.prog3);
|
||||
}
|
||||
|
||||
static void setup_fentry()
|
||||
static void setup_fentry(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.prog4);
|
||||
}
|
||||
|
||||
static void setup_fexit()
|
||||
static void setup_fexit(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.prog5);
|
||||
|
||||
@@ -88,12 +88,12 @@ const struct argp bench_ringbufs_argp = {
|
||||
|
||||
static struct counter buf_hits;
|
||||
|
||||
static inline void bufs_trigger_batch()
|
||||
static inline void bufs_trigger_batch(void)
|
||||
{
|
||||
(void)syscall(__NR_getpgid);
|
||||
}
|
||||
|
||||
static void bufs_validate()
|
||||
static void bufs_validate(void)
|
||||
{
|
||||
if (env.consumer_cnt != 1) {
|
||||
fprintf(stderr, "rb-libbpf benchmark doesn't support multi-consumer!\n");
|
||||
@@ -132,7 +132,7 @@ static void ringbuf_libbpf_measure(struct bench_res *res)
|
||||
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
|
||||
}
|
||||
|
||||
static struct ringbuf_bench *ringbuf_setup_skeleton()
|
||||
static struct ringbuf_bench *ringbuf_setup_skeleton(void)
|
||||
{
|
||||
struct ringbuf_bench *skel;
|
||||
|
||||
@@ -167,7 +167,7 @@ static int buf_process_sample(void *ctx, void *data, size_t len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ringbuf_libbpf_setup()
|
||||
static void ringbuf_libbpf_setup(void)
|
||||
{
|
||||
struct ringbuf_libbpf_ctx *ctx = &ringbuf_libbpf_ctx;
|
||||
struct bpf_link *link;
|
||||
@@ -223,7 +223,7 @@ static void ringbuf_custom_measure(struct bench_res *res)
|
||||
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
|
||||
}
|
||||
|
||||
static void ringbuf_custom_setup()
|
||||
static void ringbuf_custom_setup(void)
|
||||
{
|
||||
struct ringbuf_custom_ctx *ctx = &ringbuf_custom_ctx;
|
||||
const size_t page_size = getpagesize();
|
||||
@@ -352,7 +352,7 @@ static void perfbuf_measure(struct bench_res *res)
|
||||
res->drops = atomic_swap(&ctx->skel->bss->dropped, 0);
|
||||
}
|
||||
|
||||
static struct perfbuf_bench *perfbuf_setup_skeleton()
|
||||
static struct perfbuf_bench *perfbuf_setup_skeleton(void)
|
||||
{
|
||||
struct perfbuf_bench *skel;
|
||||
|
||||
@@ -390,7 +390,7 @@ perfbuf_process_sample_raw(void *input_ctx, int cpu,
|
||||
return LIBBPF_PERF_EVENT_CONT;
|
||||
}
|
||||
|
||||
static void perfbuf_libbpf_setup()
|
||||
static void perfbuf_libbpf_setup(void)
|
||||
{
|
||||
struct perfbuf_libbpf_ctx *ctx = &perfbuf_libbpf_ctx;
|
||||
struct perf_event_attr attr;
|
||||
|
||||
161
tools/testing/selftests/bpf/benchs/bench_strncmp.c
Normal file
161
tools/testing/selftests/bpf/benchs/bench_strncmp.c
Normal file
@@ -0,0 +1,161 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
|
||||
#include <argp.h>
|
||||
#include "bench.h"
|
||||
#include "strncmp_bench.skel.h"
|
||||
|
||||
static struct strncmp_ctx {
|
||||
struct strncmp_bench *skel;
|
||||
} ctx;
|
||||
|
||||
static struct strncmp_args {
|
||||
u32 cmp_str_len;
|
||||
} args = {
|
||||
.cmp_str_len = 32,
|
||||
};
|
||||
|
||||
enum {
|
||||
ARG_CMP_STR_LEN = 5000,
|
||||
};
|
||||
|
||||
static const struct argp_option opts[] = {
|
||||
{ "cmp-str-len", ARG_CMP_STR_LEN, "CMP_STR_LEN", 0,
|
||||
"Set the length of compared string" },
|
||||
{},
|
||||
};
|
||||
|
||||
static error_t strncmp_parse_arg(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
switch (key) {
|
||||
case ARG_CMP_STR_LEN:
|
||||
args.cmp_str_len = strtoul(arg, NULL, 10);
|
||||
if (!args.cmp_str_len ||
|
||||
args.cmp_str_len >= sizeof(ctx.skel->bss->str)) {
|
||||
fprintf(stderr, "Invalid cmp str len (limit %zu)\n",
|
||||
sizeof(ctx.skel->bss->str));
|
||||
argp_usage(state);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct argp bench_strncmp_argp = {
|
||||
.options = opts,
|
||||
.parser = strncmp_parse_arg,
|
||||
};
|
||||
|
||||
static void strncmp_validate(void)
|
||||
{
|
||||
if (env.consumer_cnt != 1) {
|
||||
fprintf(stderr, "strncmp benchmark doesn't support multi-consumer!\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void strncmp_setup(void)
|
||||
{
|
||||
int err;
|
||||
char *target;
|
||||
size_t i, sz;
|
||||
|
||||
sz = sizeof(ctx.skel->rodata->target);
|
||||
if (!sz || sz < sizeof(ctx.skel->bss->str)) {
|
||||
fprintf(stderr, "invalid string size (target %zu, src %zu)\n",
|
||||
sz, sizeof(ctx.skel->bss->str));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
setup_libbpf();
|
||||
|
||||
ctx.skel = strncmp_bench__open();
|
||||
if (!ctx.skel) {
|
||||
fprintf(stderr, "failed to open skeleton\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
srandom(time(NULL));
|
||||
target = ctx.skel->rodata->target;
|
||||
for (i = 0; i < sz - 1; i++)
|
||||
target[i] = '1' + random() % 9;
|
||||
target[sz - 1] = '\0';
|
||||
|
||||
ctx.skel->rodata->cmp_str_len = args.cmp_str_len;
|
||||
|
||||
memcpy(ctx.skel->bss->str, target, args.cmp_str_len);
|
||||
ctx.skel->bss->str[args.cmp_str_len] = '\0';
|
||||
/* Make bss->str < rodata->target */
|
||||
ctx.skel->bss->str[args.cmp_str_len - 1] -= 1;
|
||||
|
||||
err = strncmp_bench__load(ctx.skel);
|
||||
if (err) {
|
||||
fprintf(stderr, "failed to load skeleton\n");
|
||||
strncmp_bench__destroy(ctx.skel);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void strncmp_attach_prog(struct bpf_program *prog)
|
||||
{
|
||||
struct bpf_link *link;
|
||||
|
||||
link = bpf_program__attach(prog);
|
||||
if (!link) {
|
||||
fprintf(stderr, "failed to attach program!\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void strncmp_no_helper_setup(void)
|
||||
{
|
||||
strncmp_setup();
|
||||
strncmp_attach_prog(ctx.skel->progs.strncmp_no_helper);
|
||||
}
|
||||
|
||||
static void strncmp_helper_setup(void)
|
||||
{
|
||||
strncmp_setup();
|
||||
strncmp_attach_prog(ctx.skel->progs.strncmp_helper);
|
||||
}
|
||||
|
||||
static void *strncmp_producer(void *ctx)
|
||||
{
|
||||
while (true)
|
||||
(void)syscall(__NR_getpgid);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *strncmp_consumer(void *ctx)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void strncmp_measure(struct bench_res *res)
|
||||
{
|
||||
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
|
||||
}
|
||||
|
||||
const struct bench bench_strncmp_no_helper = {
|
||||
.name = "strncmp-no-helper",
|
||||
.validate = strncmp_validate,
|
||||
.setup = strncmp_no_helper_setup,
|
||||
.producer_thread = strncmp_producer,
|
||||
.consumer_thread = strncmp_consumer,
|
||||
.measure = strncmp_measure,
|
||||
.report_progress = hits_drops_report_progress,
|
||||
.report_final = hits_drops_report_final,
|
||||
};
|
||||
|
||||
const struct bench bench_strncmp_helper = {
|
||||
.name = "strncmp-helper",
|
||||
.validate = strncmp_validate,
|
||||
.setup = strncmp_helper_setup,
|
||||
.producer_thread = strncmp_producer,
|
||||
.consumer_thread = strncmp_consumer,
|
||||
.measure = strncmp_measure,
|
||||
.report_progress = hits_drops_report_progress,
|
||||
.report_final = hits_drops_report_final,
|
||||
};
|
||||
@@ -11,7 +11,7 @@ static struct trigger_ctx {
|
||||
|
||||
static struct counter base_hits;
|
||||
|
||||
static void trigger_validate()
|
||||
static void trigger_validate(void)
|
||||
{
|
||||
if (env.consumer_cnt != 1) {
|
||||
fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
|
||||
@@ -45,7 +45,7 @@ static void trigger_measure(struct bench_res *res)
|
||||
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
|
||||
}
|
||||
|
||||
static void setup_ctx()
|
||||
static void setup_ctx(void)
|
||||
{
|
||||
setup_libbpf();
|
||||
|
||||
@@ -67,37 +67,37 @@ static void attach_bpf(struct bpf_program *prog)
|
||||
}
|
||||
}
|
||||
|
||||
static void trigger_tp_setup()
|
||||
static void trigger_tp_setup(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.bench_trigger_tp);
|
||||
}
|
||||
|
||||
static void trigger_rawtp_setup()
|
||||
static void trigger_rawtp_setup(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.bench_trigger_raw_tp);
|
||||
}
|
||||
|
||||
static void trigger_kprobe_setup()
|
||||
static void trigger_kprobe_setup(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.bench_trigger_kprobe);
|
||||
}
|
||||
|
||||
static void trigger_fentry_setup()
|
||||
static void trigger_fentry_setup(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.bench_trigger_fentry);
|
||||
}
|
||||
|
||||
static void trigger_fentry_sleep_setup()
|
||||
static void trigger_fentry_sleep_setup(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.bench_trigger_fentry_sleep);
|
||||
}
|
||||
|
||||
static void trigger_fmodret_setup()
|
||||
static void trigger_fmodret_setup(void)
|
||||
{
|
||||
setup_ctx();
|
||||
attach_bpf(ctx.skel->progs.bench_trigger_fmodret);
|
||||
@@ -183,22 +183,22 @@ static void usetup(bool use_retprobe, bool use_nop)
|
||||
ctx.skel->links.bench_trigger_uprobe = link;
|
||||
}
|
||||
|
||||
static void uprobe_setup_with_nop()
|
||||
static void uprobe_setup_with_nop(void)
|
||||
{
|
||||
usetup(false, true);
|
||||
}
|
||||
|
||||
static void uretprobe_setup_with_nop()
|
||||
static void uretprobe_setup_with_nop(void)
|
||||
{
|
||||
usetup(true, true);
|
||||
}
|
||||
|
||||
static void uprobe_setup_without_nop()
|
||||
static void uprobe_setup_without_nop(void)
|
||||
{
|
||||
usetup(false, false);
|
||||
}
|
||||
|
||||
static void uretprobe_setup_without_nop()
|
||||
static void uretprobe_setup_without_nop(void)
|
||||
{
|
||||
usetup(true, false);
|
||||
}
|
||||
|
||||
12
tools/testing/selftests/bpf/benchs/run_bench_strncmp.sh
Executable file
12
tools/testing/selftests/bpf/benchs/run_bench_strncmp.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
source ./benchs/run_common.sh
|
||||
|
||||
set -eufo pipefail
|
||||
|
||||
for s in 1 8 64 512 2048 4095; do
|
||||
for b in no-helper helper; do
|
||||
summarize ${b}-${s} "$($RUN_BENCH --cmp-str-len=$s strncmp-${b})"
|
||||
done
|
||||
done
|
||||
@@ -38,7 +38,9 @@ CONFIG_IPV6_SIT=m
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_BPF_LSM=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_RC_CORE=y
|
||||
CONFIG_LIRC=y
|
||||
CONFIG_BPF_LIRC_MODE2=y
|
||||
CONFIG_IMA=y
|
||||
CONFIG_SECURITYFS=y
|
||||
CONFIG_IMA_WRITE_POLICY=y
|
||||
|
||||
@@ -39,13 +39,13 @@ static struct bpf_align_test tests[] = {
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.matches = {
|
||||
{1, "R1=ctx(id=0,off=0,imm=0)"},
|
||||
{1, "R10=fp0"},
|
||||
{1, "R3_w=inv2"},
|
||||
{2, "R3_w=inv4"},
|
||||
{3, "R3_w=inv8"},
|
||||
{4, "R3_w=inv16"},
|
||||
{5, "R3_w=inv32"},
|
||||
{0, "R1=ctx(id=0,off=0,imm=0)"},
|
||||
{0, "R10=fp0"},
|
||||
{0, "R3_w=inv2"},
|
||||
{1, "R3_w=inv4"},
|
||||
{2, "R3_w=inv8"},
|
||||
{3, "R3_w=inv16"},
|
||||
{4, "R3_w=inv32"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -67,19 +67,19 @@ static struct bpf_align_test tests[] = {
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.matches = {
|
||||
{1, "R1=ctx(id=0,off=0,imm=0)"},
|
||||
{1, "R10=fp0"},
|
||||
{1, "R3_w=inv1"},
|
||||
{2, "R3_w=inv2"},
|
||||
{3, "R3_w=inv4"},
|
||||
{4, "R3_w=inv8"},
|
||||
{5, "R3_w=inv16"},
|
||||
{6, "R3_w=inv1"},
|
||||
{7, "R4_w=inv32"},
|
||||
{8, "R4_w=inv16"},
|
||||
{9, "R4_w=inv8"},
|
||||
{10, "R4_w=inv4"},
|
||||
{11, "R4_w=inv2"},
|
||||
{0, "R1=ctx(id=0,off=0,imm=0)"},
|
||||
{0, "R10=fp0"},
|
||||
{0, "R3_w=inv1"},
|
||||
{1, "R3_w=inv2"},
|
||||
{2, "R3_w=inv4"},
|
||||
{3, "R3_w=inv8"},
|
||||
{4, "R3_w=inv16"},
|
||||
{5, "R3_w=inv1"},
|
||||
{6, "R4_w=inv32"},
|
||||
{7, "R4_w=inv16"},
|
||||
{8, "R4_w=inv8"},
|
||||
{9, "R4_w=inv4"},
|
||||
{10, "R4_w=inv2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -96,14 +96,14 @@ static struct bpf_align_test tests[] = {
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.matches = {
|
||||
{1, "R1=ctx(id=0,off=0,imm=0)"},
|
||||
{1, "R10=fp0"},
|
||||
{1, "R3_w=inv4"},
|
||||
{2, "R3_w=inv8"},
|
||||
{3, "R3_w=inv10"},
|
||||
{4, "R4_w=inv8"},
|
||||
{5, "R4_w=inv12"},
|
||||
{6, "R4_w=inv14"},
|
||||
{0, "R1=ctx(id=0,off=0,imm=0)"},
|
||||
{0, "R10=fp0"},
|
||||
{0, "R3_w=inv4"},
|
||||
{1, "R3_w=inv8"},
|
||||
{2, "R3_w=inv10"},
|
||||
{3, "R4_w=inv8"},
|
||||
{4, "R4_w=inv12"},
|
||||
{5, "R4_w=inv14"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -118,12 +118,12 @@ static struct bpf_align_test tests[] = {
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.matches = {
|
||||
{1, "R1=ctx(id=0,off=0,imm=0)"},
|
||||
{1, "R10=fp0"},
|
||||
{0, "R1=ctx(id=0,off=0,imm=0)"},
|
||||
{0, "R10=fp0"},
|
||||
{0, "R3_w=inv7"},
|
||||
{1, "R3_w=inv7"},
|
||||
{2, "R3_w=inv7"},
|
||||
{3, "R3_w=inv14"},
|
||||
{4, "R3_w=inv56"},
|
||||
{2, "R3_w=inv14"},
|
||||
{3, "R3_w=inv56"},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -161,19 +161,19 @@ static struct bpf_align_test tests[] = {
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.matches = {
|
||||
{7, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
|
||||
{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
|
||||
{9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
|
||||
{11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
|
||||
{18, "R3=pkt_end(id=0,off=0,imm=0)"},
|
||||
{18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
|
||||
{20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
|
||||
{21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
|
||||
{22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
|
||||
{6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
|
||||
{6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{7, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
|
||||
{8, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{9, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
|
||||
{10, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
|
||||
{12, "R3_w=pkt_end(id=0,off=0,imm=0)"},
|
||||
{17, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{18, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
|
||||
{19, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
|
||||
{20, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
|
||||
{21, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{22, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -194,16 +194,16 @@ static struct bpf_align_test tests[] = {
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.matches = {
|
||||
{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{8, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{10, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
|
||||
{12, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{14, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
|
||||
{16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
|
||||
{6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{7, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{9, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{10, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
|
||||
{11, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{12, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{13, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{14, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
|
||||
{15, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -234,14 +234,14 @@ static struct bpf_align_test tests[] = {
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.matches = {
|
||||
{4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
|
||||
{5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
|
||||
{6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
|
||||
{10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
|
||||
{2, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
|
||||
{4, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
|
||||
{5, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
|
||||
{9, "R2=pkt(id=0,off=0,r=18,imm=0)"},
|
||||
{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
|
||||
{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{13, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
|
||||
{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
|
||||
{15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -296,8 +296,8 @@ static struct bpf_align_test tests[] = {
|
||||
/* Calculated offset in R6 has unknown value, but known
|
||||
* alignment of 4.
|
||||
*/
|
||||
{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Offset is added to packet pointer R5, resulting in
|
||||
* known fixed offset, and variable offset from R6.
|
||||
*/
|
||||
@@ -313,11 +313,11 @@ static struct bpf_align_test tests[] = {
|
||||
/* Variable offset is added to R5 packet pointer,
|
||||
* resulting in auxiliary alignment of 4.
|
||||
*/
|
||||
{18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{17, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Constant offset is added to R5, resulting in
|
||||
* reg->off of 14.
|
||||
*/
|
||||
{19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{18, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* At the time the word size load is performed from R5,
|
||||
* its total fixed offset is NET_IP_ALIGN + reg->off
|
||||
* (14) which is 16. Then the variable offset is 4-byte
|
||||
@@ -329,18 +329,18 @@ static struct bpf_align_test tests[] = {
|
||||
/* Constant offset is added to R5 packet pointer,
|
||||
* resulting in reg->off value of 14.
|
||||
*/
|
||||
{26, "R5_w=pkt(id=0,off=14,r=8"},
|
||||
{25, "R5_w=pkt(id=0,off=14,r=8"},
|
||||
/* Variable offset is added to R5, resulting in a
|
||||
* variable offset of (4n).
|
||||
*/
|
||||
{27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{26, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Constant is added to R5 again, setting reg->off to 18. */
|
||||
{28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{27, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* And once more we add a variable; resulting var_off
|
||||
* is still (4n), fixed offset is not changed.
|
||||
* Also, we create a new reg->id.
|
||||
*/
|
||||
{29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
|
||||
{28, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
|
||||
/* At the time the word size load is performed from R5,
|
||||
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
|
||||
* which is 20. Then the variable offset is (4n), so
|
||||
@@ -386,13 +386,13 @@ static struct bpf_align_test tests[] = {
|
||||
/* Calculated offset in R6 has unknown value, but known
|
||||
* alignment of 4.
|
||||
*/
|
||||
{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Adding 14 makes R6 be (4n+2) */
|
||||
{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
|
||||
{8, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
|
||||
/* Packet pointer has (4n+2) offset */
|
||||
{11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
|
||||
{13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
|
||||
{12, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
|
||||
/* At the time the word size load is performed from R5,
|
||||
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
|
||||
* which is 2. Then the variable offset is (4n+2), so
|
||||
@@ -403,12 +403,12 @@ static struct bpf_align_test tests[] = {
|
||||
/* Newly read value in R6 was shifted left by 2, so has
|
||||
* known alignment of 4.
|
||||
*/
|
||||
{18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{17, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Added (4n) to packet pointer's (4n+2) var_off, giving
|
||||
* another (4n+2).
|
||||
*/
|
||||
{19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
|
||||
{21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
|
||||
{20, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
|
||||
/* At the time the word size load is performed from R5,
|
||||
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
|
||||
* which is 2. Then the variable offset is (4n+2), so
|
||||
@@ -448,18 +448,18 @@ static struct bpf_align_test tests[] = {
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = REJECT,
|
||||
.matches = {
|
||||
{4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
|
||||
{3, "R5_w=pkt_end(id=0,off=0,imm=0)"},
|
||||
/* (ptr - ptr) << 2 == unknown, (4n) */
|
||||
{6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
|
||||
{5, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
|
||||
/* (4n) + 14 == (4n+2). We blow our bounds, because
|
||||
* the add could overflow.
|
||||
*/
|
||||
{7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
|
||||
{6, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
|
||||
/* Checked s>=0 */
|
||||
{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
|
||||
/* packet pointer + nonnegative (4n+2) */
|
||||
{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
|
||||
{13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
|
||||
{12, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
|
||||
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
|
||||
* We checked the bounds, but it might have been able
|
||||
* to overflow if the packet pointer started in the
|
||||
@@ -502,14 +502,14 @@ static struct bpf_align_test tests[] = {
|
||||
/* Calculated offset in R6 has unknown value, but known
|
||||
* alignment of 4.
|
||||
*/
|
||||
{7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Adding 14 makes R6 be (4n+2) */
|
||||
{10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
|
||||
{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
|
||||
/* New unknown value in R7 is (4n) */
|
||||
{11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
{10, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Subtracting it from R6 blows our unsigned bounds */
|
||||
{12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
|
||||
{11, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
|
||||
/* Checked s>= 0 */
|
||||
{14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
|
||||
/* At the time the word size load is performed from R5,
|
||||
@@ -556,14 +556,14 @@ static struct bpf_align_test tests[] = {
|
||||
/* Calculated offset in R6 has unknown value, but known
|
||||
* alignment of 4.
|
||||
*/
|
||||
{7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
|
||||
{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{9, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
|
||||
/* Adding 14 makes R6 be (4n+2) */
|
||||
{11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
|
||||
{10, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
|
||||
/* Subtracting from packet pointer overflows ubounds */
|
||||
{13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
|
||||
/* New unknown value in R7 is (4n), >= 76 */
|
||||
{15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
|
||||
{14, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
|
||||
/* Adding it to packet pointer gives nice bounds again */
|
||||
{16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
|
||||
/* At the time the word size load is performed from R5,
|
||||
@@ -625,12 +625,15 @@ static int do_test_single(struct bpf_align_test *test)
|
||||
line_ptr = strtok(bpf_vlog_copy, "\n");
|
||||
for (i = 0; i < MAX_MATCHES; i++) {
|
||||
struct bpf_reg_match m = test->matches[i];
|
||||
int tmp;
|
||||
|
||||
if (!m.match)
|
||||
break;
|
||||
while (line_ptr) {
|
||||
cur_line = -1;
|
||||
sscanf(line_ptr, "%u: ", &cur_line);
|
||||
if (cur_line == -1)
|
||||
sscanf(line_ptr, "from %u to %u: ", &tmp, &cur_line);
|
||||
if (cur_line == m.line)
|
||||
break;
|
||||
line_ptr = strtok(NULL, "\n");
|
||||
@@ -642,7 +645,19 @@ static int do_test_single(struct bpf_align_test *test)
|
||||
printf("%s", bpf_vlog);
|
||||
break;
|
||||
}
|
||||
/* Check the next line as well in case the previous line
|
||||
* did not have a corresponding bpf insn. Example:
|
||||
* func#0 @0
|
||||
* 0: R1=ctx(id=0,off=0,imm=0) R10=fp0
|
||||
* 0: (b7) r3 = 2 ; R3_w=inv2
|
||||
*/
|
||||
if (!strstr(line_ptr, m.match)) {
|
||||
cur_line = -1;
|
||||
line_ptr = strtok(NULL, "\n");
|
||||
sscanf(line_ptr, "%u: ", &cur_line);
|
||||
}
|
||||
if (cur_line != m.line || !line_ptr ||
|
||||
!strstr(line_ptr, m.match)) {
|
||||
printf("Failed to find match %u: %s\n",
|
||||
m.line, m.match);
|
||||
ret = 1;
|
||||
|
||||
@@ -65,8 +65,8 @@ void serial_test_bpf_obj_id(void)
|
||||
if (CHECK_FAIL(err))
|
||||
goto done;
|
||||
|
||||
prog = bpf_object__find_program_by_title(objs[i],
|
||||
"raw_tp/sys_enter");
|
||||
prog = bpf_object__find_program_by_name(objs[i],
|
||||
"test_obj_id");
|
||||
if (CHECK_FAIL(!prog))
|
||||
goto done;
|
||||
links[i] = bpf_program__attach(prog);
|
||||
|
||||
@@ -217,7 +217,7 @@ static bool found;
|
||||
static int libbpf_debug_print(enum libbpf_print_level level,
|
||||
const char *format, va_list args)
|
||||
{
|
||||
const char *log_buf;
|
||||
const char *prog_name, *log_buf;
|
||||
|
||||
if (level != LIBBPF_WARN ||
|
||||
!strstr(format, "-- BEGIN PROG LOAD LOG --")) {
|
||||
@@ -225,15 +225,14 @@ static int libbpf_debug_print(enum libbpf_print_level level,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* skip prog_name */
|
||||
va_arg(args, char *);
|
||||
prog_name = va_arg(args, char *);
|
||||
log_buf = va_arg(args, char *);
|
||||
if (!log_buf)
|
||||
goto out;
|
||||
if (err_str && strstr(log_buf, err_str) != NULL)
|
||||
found = true;
|
||||
out:
|
||||
printf(format, log_buf);
|
||||
printf(format, prog_name, log_buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include <bpf/libbpf.h>
|
||||
#include <bpf/btf.h>
|
||||
|
||||
#include "bpf_rlimit.h"
|
||||
#include "bpf_util.h"
|
||||
#include "../test_btf.h"
|
||||
#include "test_progs.h"
|
||||
|
||||
@@ -67,9 +67,9 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
|
||||
goto close_bpf_object;
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, v4 ?
|
||||
"cgroup/connect4" :
|
||||
"cgroup/connect6");
|
||||
prog = bpf_object__find_program_by_name(obj, v4 ?
|
||||
"connect4" :
|
||||
"connect6");
|
||||
if (CHECK(!prog, "find_prog", "connect prog not found\n")) {
|
||||
err = -EIO;
|
||||
goto close_bpf_object;
|
||||
@@ -83,9 +83,9 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
|
||||
goto close_bpf_object;
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, v4 ?
|
||||
"cgroup/getpeername4" :
|
||||
"cgroup/getpeername6");
|
||||
prog = bpf_object__find_program_by_name(obj, v4 ?
|
||||
"getpeername4" :
|
||||
"getpeername6");
|
||||
if (CHECK(!prog, "find_prog", "getpeername prog not found\n")) {
|
||||
err = -EIO;
|
||||
goto close_bpf_object;
|
||||
@@ -99,9 +99,9 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
|
||||
goto close_bpf_object;
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, v4 ?
|
||||
"cgroup/getsockname4" :
|
||||
"cgroup/getsockname6");
|
||||
prog = bpf_object__find_program_by_name(obj, v4 ?
|
||||
"getsockname4" :
|
||||
"getsockname6");
|
||||
if (CHECK(!prog, "find_prog", "getsockname prog not found\n")) {
|
||||
err = -EIO;
|
||||
goto close_bpf_object;
|
||||
|
||||
@@ -10,7 +10,7 @@ static int duration = 0;
|
||||
|
||||
#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
|
||||
|
||||
#define MODULES_CASE(name, sec_name, tp_name) { \
|
||||
#define MODULES_CASE(name, pg_name, tp_name) { \
|
||||
.case_name = name, \
|
||||
.bpf_obj_file = "test_core_reloc_module.o", \
|
||||
.btf_src_file = NULL, /* find in kernel module BTFs */ \
|
||||
@@ -28,7 +28,7 @@ static int duration = 0;
|
||||
.comm_len = sizeof("test_progs"), \
|
||||
}, \
|
||||
.output_len = sizeof(struct core_reloc_module_output), \
|
||||
.prog_sec_name = sec_name, \
|
||||
.prog_name = pg_name, \
|
||||
.raw_tp_name = tp_name, \
|
||||
.trigger = __trigger_module_test_read, \
|
||||
.needs_testmod = true, \
|
||||
@@ -43,7 +43,9 @@ static int duration = 0;
|
||||
#define FLAVORS_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_flavors.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o" \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_flavors" \
|
||||
|
||||
#define FLAVORS_CASE(name) { \
|
||||
FLAVORS_CASE_COMMON(name), \
|
||||
@@ -66,7 +68,9 @@ static int duration = 0;
|
||||
#define NESTING_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_nesting.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o"
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_nesting" \
|
||||
|
||||
#define NESTING_CASE(name) { \
|
||||
NESTING_CASE_COMMON(name), \
|
||||
@@ -91,7 +95,9 @@ static int duration = 0;
|
||||
#define ARRAYS_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_arrays.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o"
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_arrays" \
|
||||
|
||||
#define ARRAYS_CASE(name) { \
|
||||
ARRAYS_CASE_COMMON(name), \
|
||||
@@ -123,7 +129,9 @@ static int duration = 0;
|
||||
#define PRIMITIVES_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_primitives.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o"
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_primitives" \
|
||||
|
||||
#define PRIMITIVES_CASE(name) { \
|
||||
PRIMITIVES_CASE_COMMON(name), \
|
||||
@@ -158,6 +166,8 @@ static int duration = 0;
|
||||
.e = 5, .f = 6, .g = 7, .h = 8, \
|
||||
}, \
|
||||
.output_len = sizeof(struct core_reloc_mods_output), \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_mods", \
|
||||
}
|
||||
|
||||
#define PTR_AS_ARR_CASE(name) { \
|
||||
@@ -174,6 +184,8 @@ static int duration = 0;
|
||||
.a = 3, \
|
||||
}, \
|
||||
.output_len = sizeof(struct core_reloc_ptr_as_arr), \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_ptr_as_arr", \
|
||||
}
|
||||
|
||||
#define INTS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
|
||||
@@ -190,7 +202,9 @@ static int duration = 0;
|
||||
#define INTS_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_ints.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o"
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_ints"
|
||||
|
||||
#define INTS_CASE(name) { \
|
||||
INTS_CASE_COMMON(name), \
|
||||
@@ -208,7 +222,9 @@ static int duration = 0;
|
||||
#define FIELD_EXISTS_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_existence.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o" \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_existence"
|
||||
|
||||
#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
|
||||
.case_name = test_name_prefix#name, \
|
||||
@@ -223,6 +239,8 @@ static int duration = 0;
|
||||
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
|
||||
__VA_ARGS__, \
|
||||
.output_len = sizeof(struct core_reloc_bitfields_output), \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_bitfields", \
|
||||
}, { \
|
||||
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
|
||||
"direct:", name), \
|
||||
@@ -231,7 +249,7 @@ static int duration = 0;
|
||||
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
|
||||
__VA_ARGS__, \
|
||||
.output_len = sizeof(struct core_reloc_bitfields_output), \
|
||||
.prog_sec_name = "tp_btf/sys_enter", \
|
||||
.prog_name = "test_core_bitfields_direct", \
|
||||
}
|
||||
|
||||
|
||||
@@ -239,17 +257,21 @@ static int duration = 0;
|
||||
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
|
||||
"probed:", name), \
|
||||
.fails = true, \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_bitfields", \
|
||||
}, { \
|
||||
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
|
||||
"direct:", name), \
|
||||
.prog_sec_name = "tp_btf/sys_enter", \
|
||||
.fails = true, \
|
||||
.prog_name = "test_core_bitfields_direct", \
|
||||
}
|
||||
|
||||
#define SIZE_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_size.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o"
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_size"
|
||||
|
||||
#define SIZE_OUTPUT_DATA(type) \
|
||||
STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \
|
||||
@@ -277,8 +299,10 @@ static int duration = 0;
|
||||
|
||||
#define TYPE_BASED_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_type_based.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o" \
|
||||
.bpf_obj_file = "test_core_reloc_type_based.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_type_based"
|
||||
|
||||
#define TYPE_BASED_CASE(name, ...) { \
|
||||
TYPE_BASED_CASE_COMMON(name), \
|
||||
@@ -295,7 +319,9 @@ static int duration = 0;
|
||||
#define TYPE_ID_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_type_id.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o" \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_type_id"
|
||||
|
||||
#define TYPE_ID_CASE(name, setup_fn) { \
|
||||
TYPE_ID_CASE_COMMON(name), \
|
||||
@@ -312,7 +338,9 @@ static int duration = 0;
|
||||
#define ENUMVAL_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_enumval.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o" \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.raw_tp_name = "sys_enter", \
|
||||
.prog_name = "test_core_enumval"
|
||||
|
||||
#define ENUMVAL_CASE(name, ...) { \
|
||||
ENUMVAL_CASE_COMMON(name), \
|
||||
@@ -342,7 +370,7 @@ struct core_reloc_test_case {
|
||||
bool fails;
|
||||
bool needs_testmod;
|
||||
bool relaxed_core_relocs;
|
||||
const char *prog_sec_name;
|
||||
const char *prog_name;
|
||||
const char *raw_tp_name;
|
||||
setup_test_fn setup;
|
||||
trigger_test_fn trigger;
|
||||
@@ -497,11 +525,13 @@ static struct core_reloc_test_case test_cases[] = {
|
||||
.comm_len = sizeof("test_progs"),
|
||||
},
|
||||
.output_len = sizeof(struct core_reloc_kernel_output),
|
||||
.raw_tp_name = "sys_enter",
|
||||
.prog_name = "test_core_kernel",
|
||||
},
|
||||
|
||||
/* validate we can find kernel module BTF types for relocs/attach */
|
||||
MODULES_CASE("module_probed", "raw_tp/bpf_testmod_test_read", "bpf_testmod_test_read"),
|
||||
MODULES_CASE("module_direct", "tp_btf/bpf_testmod_test_read", NULL),
|
||||
MODULES_CASE("module_probed", "test_core_module_probed", "bpf_testmod_test_read"),
|
||||
MODULES_CASE("module_direct", "test_core_module_direct", NULL),
|
||||
|
||||
/* validate BPF program can use multiple flavors to match against
|
||||
* single target BTF type
|
||||
@@ -580,6 +610,8 @@ static struct core_reloc_test_case test_cases[] = {
|
||||
.c = 0, /* BUG in clang, should be 3 */
|
||||
},
|
||||
.output_len = sizeof(struct core_reloc_misc_output),
|
||||
.raw_tp_name = "sys_enter",
|
||||
.prog_name = "test_core_misc",
|
||||
},
|
||||
|
||||
/* validate field existence checks */
|
||||
@@ -848,14 +880,9 @@ void test_core_reloc(void)
|
||||
if (!ASSERT_OK_PTR(obj, "obj_open"))
|
||||
goto cleanup;
|
||||
|
||||
probe_name = "raw_tracepoint/sys_enter";
|
||||
tp_name = "sys_enter";
|
||||
if (test_case->prog_sec_name) {
|
||||
probe_name = test_case->prog_sec_name;
|
||||
tp_name = test_case->raw_tp_name; /* NULL for tp_btf */
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, probe_name);
|
||||
probe_name = test_case->prog_name;
|
||||
tp_name = test_case->raw_tp_name; /* NULL for tp_btf */
|
||||
prog = bpf_object__find_program_by_name(obj, probe_name);
|
||||
if (CHECK(!prog, "find_probe",
|
||||
"prog '%s' not found\n", probe_name))
|
||||
goto cleanup;
|
||||
|
||||
@@ -101,6 +101,8 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||
|
||||
for (i = 0; i < prog_cnt; i++) {
|
||||
struct bpf_link_info link_info;
|
||||
struct bpf_program *pos;
|
||||
const char *pos_sec_name;
|
||||
char *tgt_name;
|
||||
__s32 btf_id;
|
||||
|
||||
@@ -109,7 +111,14 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||
goto close_prog;
|
||||
btf_id = btf__find_by_name_kind(btf, tgt_name + 1, BTF_KIND_FUNC);
|
||||
|
||||
prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]);
|
||||
prog[i] = NULL;
|
||||
bpf_object__for_each_program(pos, obj) {
|
||||
pos_sec_name = bpf_program__section_name(pos);
|
||||
if (pos_sec_name && !strcmp(pos_sec_name, prog_name[i])) {
|
||||
prog[i] = pos;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!ASSERT_OK_PTR(prog[i], prog_name[i]))
|
||||
goto close_prog;
|
||||
|
||||
@@ -211,8 +220,8 @@ static void test_func_replace_verify(void)
|
||||
|
||||
static int test_second_attach(struct bpf_object *obj)
|
||||
{
|
||||
const char *prog_name = "freplace/get_constant";
|
||||
const char *tgt_name = prog_name + 9; /* cut off freplace/ */
|
||||
const char *prog_name = "security_new_get_constant";
|
||||
const char *tgt_name = "get_constant";
|
||||
const char *tgt_obj_file = "./test_pkt_access.o";
|
||||
struct bpf_program *prog = NULL;
|
||||
struct bpf_object *tgt_obj;
|
||||
@@ -220,7 +229,7 @@ static int test_second_attach(struct bpf_object *obj)
|
||||
struct bpf_link *link;
|
||||
int err = 0, tgt_fd;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
if (CHECK(!prog, "find_prog", "prog %s not found\n", prog_name))
|
||||
return -ENOENT;
|
||||
|
||||
|
||||
44
tools/testing/selftests/bpf/prog_tests/get_func_args_test.c
Normal file
44
tools/testing/selftests/bpf/prog_tests/get_func_args_test.c
Normal file
@@ -0,0 +1,44 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
#include "get_func_args_test.skel.h"
|
||||
|
||||
void test_get_func_args_test(void)
|
||||
{
|
||||
struct get_func_args_test *skel = NULL;
|
||||
__u32 duration = 0, retval;
|
||||
int err, prog_fd;
|
||||
|
||||
skel = get_func_args_test__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "get_func_args_test__open_and_load"))
|
||||
return;
|
||||
|
||||
err = get_func_args_test__attach(skel);
|
||||
if (!ASSERT_OK(err, "get_func_args_test__attach"))
|
||||
goto cleanup;
|
||||
|
||||
/* This runs bpf_fentry_test* functions and triggers
|
||||
* fentry/fexit programs.
|
||||
*/
|
||||
prog_fd = bpf_program__fd(skel->progs.test1);
|
||||
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||
NULL, NULL, &retval, &duration);
|
||||
ASSERT_OK(err, "test_run");
|
||||
ASSERT_EQ(retval, 0, "test_run");
|
||||
|
||||
/* This runs bpf_modify_return_test function and triggers
|
||||
* fmod_ret_test and fexit_test programs.
|
||||
*/
|
||||
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
|
||||
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||
NULL, NULL, &retval, &duration);
|
||||
ASSERT_OK(err, "test_run");
|
||||
ASSERT_EQ(retval, 1234, "test_run");
|
||||
|
||||
ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
|
||||
ASSERT_EQ(skel->bss->test2_result, 1, "test2_result");
|
||||
ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");
|
||||
ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
|
||||
|
||||
cleanup:
|
||||
get_func_args_test__destroy(skel);
|
||||
}
|
||||
@@ -89,7 +89,7 @@ void test_get_stack_raw_tp(void)
|
||||
{
|
||||
const char *file = "./test_get_stack_rawtp.o";
|
||||
const char *file_err = "./test_get_stack_rawtp_err.o";
|
||||
const char *prog_name = "raw_tracepoint/sys_enter";
|
||||
const char *prog_name = "bpf_prog1";
|
||||
int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
|
||||
struct perf_buffer *pb = NULL;
|
||||
struct bpf_link *link = NULL;
|
||||
@@ -107,7 +107,7 @@ void test_get_stack_raw_tp(void)
|
||||
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
|
||||
goto close_prog;
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include "test_ksyms_btf_null_check.skel.h"
|
||||
#include "test_ksyms_weak.skel.h"
|
||||
#include "test_ksyms_weak.lskel.h"
|
||||
#include "test_ksyms_btf_write_check.skel.h"
|
||||
|
||||
static int duration;
|
||||
|
||||
@@ -137,6 +138,16 @@ cleanup:
|
||||
test_ksyms_weak_lskel__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_write_check(void)
|
||||
{
|
||||
struct test_ksyms_btf_write_check *skel;
|
||||
|
||||
skel = test_ksyms_btf_write_check__open_and_load();
|
||||
ASSERT_ERR_PTR(skel, "unexpected load of a prog writing to ksym memory\n");
|
||||
|
||||
test_ksyms_btf_write_check__destroy(skel);
|
||||
}
|
||||
|
||||
void test_ksyms_btf(void)
|
||||
{
|
||||
int percpu_datasec;
|
||||
@@ -167,4 +178,7 @@ void test_ksyms_btf(void)
|
||||
|
||||
if (test__start_subtest("weak_ksyms_lskel"))
|
||||
test_weak_syms_lskel();
|
||||
|
||||
if (test__start_subtest("write_check"))
|
||||
test_write_check();
|
||||
}
|
||||
|
||||
124
tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
Normal file
124
tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
Normal file
@@ -0,0 +1,124 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <bpf/btf.h>
|
||||
|
||||
void test_libbpf_probe_prog_types(void)
|
||||
{
|
||||
struct btf *btf;
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
int i, n, id;
|
||||
|
||||
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
|
||||
if (!ASSERT_OK_PTR(btf, "btf_parse"))
|
||||
return;
|
||||
|
||||
/* find enum bpf_prog_type and enumerate each value */
|
||||
id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM);
|
||||
if (!ASSERT_GT(id, 0, "bpf_prog_type_id"))
|
||||
goto cleanup;
|
||||
t = btf__type_by_id(btf, id);
|
||||
if (!ASSERT_OK_PTR(t, "bpf_prog_type_enum"))
|
||||
goto cleanup;
|
||||
|
||||
for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) {
|
||||
const char *prog_type_name = btf__str_by_offset(btf, e->name_off);
|
||||
enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val;
|
||||
int res;
|
||||
|
||||
if (prog_type == BPF_PROG_TYPE_UNSPEC)
|
||||
continue;
|
||||
|
||||
if (!test__start_subtest(prog_type_name))
|
||||
continue;
|
||||
|
||||
res = libbpf_probe_bpf_prog_type(prog_type, NULL);
|
||||
ASSERT_EQ(res, 1, prog_type_name);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
btf__free(btf);
|
||||
}
|
||||
|
||||
void test_libbpf_probe_map_types(void)
|
||||
{
|
||||
struct btf *btf;
|
||||
const struct btf_type *t;
|
||||
const struct btf_enum *e;
|
||||
int i, n, id;
|
||||
|
||||
btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
|
||||
if (!ASSERT_OK_PTR(btf, "btf_parse"))
|
||||
return;
|
||||
|
||||
/* find enum bpf_map_type and enumerate each value */
|
||||
id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM);
|
||||
if (!ASSERT_GT(id, 0, "bpf_map_type_id"))
|
||||
goto cleanup;
|
||||
t = btf__type_by_id(btf, id);
|
||||
if (!ASSERT_OK_PTR(t, "bpf_map_type_enum"))
|
||||
goto cleanup;
|
||||
|
||||
for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) {
|
||||
const char *map_type_name = btf__str_by_offset(btf, e->name_off);
|
||||
enum bpf_map_type map_type = (enum bpf_map_type)e->val;
|
||||
int res;
|
||||
|
||||
if (map_type == BPF_MAP_TYPE_UNSPEC)
|
||||
continue;
|
||||
|
||||
if (!test__start_subtest(map_type_name))
|
||||
continue;
|
||||
|
||||
res = libbpf_probe_bpf_map_type(map_type, NULL);
|
||||
ASSERT_EQ(res, 1, map_type_name);
|
||||
}
|
||||
|
||||
cleanup:
|
||||
btf__free(btf);
|
||||
}
|
||||
|
||||
void test_libbpf_probe_helpers(void)
|
||||
{
|
||||
#define CASE(prog, helper, supp) { \
|
||||
.prog_type_name = "BPF_PROG_TYPE_" # prog, \
|
||||
.helper_name = "bpf_" # helper, \
|
||||
.prog_type = BPF_PROG_TYPE_ ## prog, \
|
||||
.helper_id = BPF_FUNC_ ## helper, \
|
||||
.supported = supp, \
|
||||
}
|
||||
const struct case_def {
|
||||
const char *prog_type_name;
|
||||
const char *helper_name;
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_func_id helper_id;
|
||||
bool supported;
|
||||
} cases[] = {
|
||||
CASE(KPROBE, unspec, false),
|
||||
CASE(KPROBE, map_lookup_elem, true),
|
||||
CASE(KPROBE, loop, true),
|
||||
|
||||
CASE(KPROBE, ktime_get_coarse_ns, false),
|
||||
CASE(SOCKET_FILTER, ktime_get_coarse_ns, true),
|
||||
|
||||
CASE(KPROBE, sys_bpf, false),
|
||||
CASE(SYSCALL, sys_bpf, true),
|
||||
};
|
||||
size_t case_cnt = ARRAY_SIZE(cases), i;
|
||||
char buf[128];
|
||||
|
||||
for (i = 0; i < case_cnt; i++) {
|
||||
const struct case_def *d = &cases[i];
|
||||
int res;
|
||||
|
||||
snprintf(buf, sizeof(buf), "%s+%s", d->prog_type_name, d->helper_name);
|
||||
|
||||
if (!test__start_subtest(buf))
|
||||
continue;
|
||||
|
||||
res = libbpf_probe_bpf_helper(d->prog_type, d->helper_id, NULL);
|
||||
ASSERT_EQ(res, d->supported, buf);
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,6 @@
|
||||
#include <netinet/in.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include "bpf_rlimit.h"
|
||||
#include "bpf_util.h"
|
||||
|
||||
#include "test_progs.h"
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
#include <bpf/bpf.h>
|
||||
|
||||
#include "test_progs.h"
|
||||
#include "bpf_rlimit.h"
|
||||
#include "bpf_util.h"
|
||||
#include "cgroup_helpers.h"
|
||||
#include "network_helpers.h"
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#include "network_helpers.h"
|
||||
#include "cgroup_helpers.h"
|
||||
#include "test_progs.h"
|
||||
#include "bpf_rlimit.h"
|
||||
#include "test_sock_fields.skel.h"
|
||||
|
||||
enum bpf_linum_array_idx {
|
||||
|
||||
@@ -136,7 +136,8 @@ static int start_server(void)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
|
||||
static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title,
|
||||
const char *prog_name)
|
||||
{
|
||||
enum bpf_attach_type attach_type;
|
||||
enum bpf_prog_type prog_type;
|
||||
@@ -145,20 +146,20 @@ static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
|
||||
|
||||
err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
|
||||
if (err) {
|
||||
log_err("Failed to deduct types for %s BPF program", title);
|
||||
log_err("Failed to deduct types for %s BPF program", prog_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, title);
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
if (!prog) {
|
||||
log_err("Failed to find %s BPF program", title);
|
||||
log_err("Failed to find %s BPF program", prog_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
|
||||
attach_type, 0);
|
||||
if (err) {
|
||||
log_err("Failed to attach %s BPF program", title);
|
||||
log_err("Failed to attach %s BPF program", prog_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -181,11 +182,11 @@ static void run_test(int cgroup_fd)
|
||||
if (!ASSERT_OK(err, "obj_load"))
|
||||
goto close_bpf_object;
|
||||
|
||||
err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt");
|
||||
err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt", "_getsockopt");
|
||||
if (CHECK_FAIL(err))
|
||||
goto close_bpf_object;
|
||||
|
||||
err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt");
|
||||
err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt", "_setsockopt");
|
||||
if (CHECK_FAIL(err))
|
||||
goto close_bpf_object;
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
void test_stacktrace_map(void)
|
||||
{
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
|
||||
const char *prog_name = "tracepoint/sched/sched_switch";
|
||||
const char *prog_name = "oncpu";
|
||||
int err, prog_fd, stack_trace_len;
|
||||
const char *file = "./test_stacktrace_map.o";
|
||||
__u32 key, val, duration = 0;
|
||||
@@ -16,7 +16,7 @@ void test_stacktrace_map(void)
|
||||
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
|
||||
goto close_prog;
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
void test_stacktrace_map_raw_tp(void)
|
||||
{
|
||||
const char *prog_name = "tracepoint/sched/sched_switch";
|
||||
const char *prog_name = "oncpu";
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd;
|
||||
const char *file = "./test_stacktrace_map.o";
|
||||
__u32 key, val, duration = 0;
|
||||
@@ -16,7 +16,7 @@ void test_stacktrace_map_raw_tp(void)
|
||||
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
|
||||
goto close_prog;
|
||||
|
||||
|
||||
@@ -28,10 +28,6 @@ static unsigned int duration;
|
||||
struct storage {
|
||||
void *inode;
|
||||
unsigned int value;
|
||||
/* Lock ensures that spin locked versions of local stoage operations
|
||||
* also work, most operations in this tests are still single threaded
|
||||
*/
|
||||
struct bpf_spin_lock lock;
|
||||
};
|
||||
|
||||
/* Fork and exec the provided rm binary and return the exit code of the
|
||||
@@ -66,27 +62,24 @@ static int run_self_unlink(int *monitored_pid, const char *rm_path)
|
||||
|
||||
static bool check_syscall_operations(int map_fd, int obj_fd)
|
||||
{
|
||||
struct storage val = { .value = TEST_STORAGE_VALUE, .lock = { 0 } },
|
||||
lookup_val = { .value = 0, .lock = { 0 } };
|
||||
struct storage val = { .value = TEST_STORAGE_VALUE },
|
||||
lookup_val = { .value = 0 };
|
||||
int err;
|
||||
|
||||
/* Looking up an existing element should fail initially */
|
||||
err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val,
|
||||
BPF_F_LOCK);
|
||||
err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0);
|
||||
if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
|
||||
"err:%d errno:%d\n", err, errno))
|
||||
return false;
|
||||
|
||||
/* Create a new element */
|
||||
err = bpf_map_update_elem(map_fd, &obj_fd, &val,
|
||||
BPF_NOEXIST | BPF_F_LOCK);
|
||||
err = bpf_map_update_elem(map_fd, &obj_fd, &val, BPF_NOEXIST);
|
||||
if (CHECK(err < 0, "bpf_map_update_elem", "err:%d errno:%d\n", err,
|
||||
errno))
|
||||
return false;
|
||||
|
||||
/* Lookup the newly created element */
|
||||
err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val,
|
||||
BPF_F_LOCK);
|
||||
err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0);
|
||||
if (CHECK(err < 0, "bpf_map_lookup_elem", "err:%d errno:%d", err,
|
||||
errno))
|
||||
return false;
|
||||
@@ -102,8 +95,7 @@ static bool check_syscall_operations(int map_fd, int obj_fd)
|
||||
return false;
|
||||
|
||||
/* The lookup should fail, now that the element has been deleted */
|
||||
err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val,
|
||||
BPF_F_LOCK);
|
||||
err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0);
|
||||
if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
|
||||
"err:%d errno:%d\n", err, errno))
|
||||
return false;
|
||||
|
||||
@@ -56,11 +56,11 @@ static void setaffinity(void)
|
||||
|
||||
void test_test_overhead(void)
|
||||
{
|
||||
const char *kprobe_name = "kprobe/__set_task_comm";
|
||||
const char *kretprobe_name = "kretprobe/__set_task_comm";
|
||||
const char *raw_tp_name = "raw_tp/task_rename";
|
||||
const char *fentry_name = "fentry/__set_task_comm";
|
||||
const char *fexit_name = "fexit/__set_task_comm";
|
||||
const char *kprobe_name = "prog1";
|
||||
const char *kretprobe_name = "prog2";
|
||||
const char *raw_tp_name = "prog3";
|
||||
const char *fentry_name = "prog4";
|
||||
const char *fexit_name = "prog5";
|
||||
const char *kprobe_func = "__set_task_comm";
|
||||
struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
|
||||
struct bpf_program *fentry_prog, *fexit_prog;
|
||||
@@ -76,23 +76,23 @@ void test_test_overhead(void)
|
||||
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
|
||||
return;
|
||||
|
||||
kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
|
||||
kprobe_prog = bpf_object__find_program_by_name(obj, kprobe_name);
|
||||
if (CHECK(!kprobe_prog, "find_probe",
|
||||
"prog '%s' not found\n", kprobe_name))
|
||||
goto cleanup;
|
||||
kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name);
|
||||
kretprobe_prog = bpf_object__find_program_by_name(obj, kretprobe_name);
|
||||
if (CHECK(!kretprobe_prog, "find_probe",
|
||||
"prog '%s' not found\n", kretprobe_name))
|
||||
goto cleanup;
|
||||
raw_tp_prog = bpf_object__find_program_by_title(obj, raw_tp_name);
|
||||
raw_tp_prog = bpf_object__find_program_by_name(obj, raw_tp_name);
|
||||
if (CHECK(!raw_tp_prog, "find_probe",
|
||||
"prog '%s' not found\n", raw_tp_name))
|
||||
goto cleanup;
|
||||
fentry_prog = bpf_object__find_program_by_title(obj, fentry_name);
|
||||
fentry_prog = bpf_object__find_program_by_name(obj, fentry_name);
|
||||
if (CHECK(!fentry_prog, "find_probe",
|
||||
"prog '%s' not found\n", fentry_name))
|
||||
goto cleanup;
|
||||
fexit_prog = bpf_object__find_program_by_title(obj, fexit_name);
|
||||
fexit_prog = bpf_object__find_program_by_name(obj, fexit_name);
|
||||
if (CHECK(!fexit_prog, "find_probe",
|
||||
"prog '%s' not found\n", fexit_name))
|
||||
goto cleanup;
|
||||
|
||||
167
tools/testing/selftests/bpf/prog_tests/test_strncmp.c
Normal file
167
tools/testing/selftests/bpf/prog_tests/test_strncmp.c
Normal file
@@ -0,0 +1,167 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
|
||||
#include <test_progs.h>
|
||||
#include "strncmp_test.skel.h"
|
||||
|
||||
static int trigger_strncmp(const struct strncmp_test *skel)
|
||||
{
|
||||
int cmp;
|
||||
|
||||
usleep(1);
|
||||
|
||||
cmp = skel->bss->cmp_ret;
|
||||
if (cmp > 0)
|
||||
return 1;
|
||||
if (cmp < 0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compare str and target after making str[i] != target[i].
|
||||
* When exp is -1, make str[i] < target[i] and delta = -1.
|
||||
*/
|
||||
static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name,
|
||||
int exp)
|
||||
{
|
||||
size_t nr = sizeof(skel->bss->str);
|
||||
char *str = skel->bss->str;
|
||||
int delta = exp;
|
||||
int got;
|
||||
size_t i;
|
||||
|
||||
memcpy(str, skel->rodata->target, nr);
|
||||
for (i = 0; i < nr - 1; i++) {
|
||||
str[i] += delta;
|
||||
|
||||
got = trigger_strncmp(skel);
|
||||
ASSERT_EQ(got, exp, name);
|
||||
|
||||
str[i] -= delta;
|
||||
}
|
||||
}
|
||||
|
||||
static void test_strncmp_ret(void)
|
||||
{
|
||||
struct strncmp_test *skel;
|
||||
struct bpf_program *prog;
|
||||
int err, got;
|
||||
|
||||
skel = strncmp_test__open();
|
||||
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
|
||||
return;
|
||||
|
||||
bpf_object__for_each_program(prog, skel->obj)
|
||||
bpf_program__set_autoload(prog, false);
|
||||
|
||||
bpf_program__set_autoload(skel->progs.do_strncmp, true);
|
||||
|
||||
err = strncmp_test__load(skel);
|
||||
if (!ASSERT_EQ(err, 0, "strncmp_test load"))
|
||||
goto out;
|
||||
|
||||
err = strncmp_test__attach(skel);
|
||||
if (!ASSERT_EQ(err, 0, "strncmp_test attach"))
|
||||
goto out;
|
||||
|
||||
skel->bss->target_pid = getpid();
|
||||
|
||||
/* Empty str */
|
||||
skel->bss->str[0] = '\0';
|
||||
got = trigger_strncmp(skel);
|
||||
ASSERT_EQ(got, -1, "strncmp: empty str");
|
||||
|
||||
/* Same string */
|
||||
memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str));
|
||||
got = trigger_strncmp(skel);
|
||||
ASSERT_EQ(got, 0, "strncmp: same str");
|
||||
|
||||
/* Not-null-termainted string */
|
||||
memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str));
|
||||
skel->bss->str[sizeof(skel->bss->str) - 1] = 'A';
|
||||
got = trigger_strncmp(skel);
|
||||
ASSERT_EQ(got, 1, "strncmp: not-null-term str");
|
||||
|
||||
strncmp_full_str_cmp(skel, "strncmp: less than", -1);
|
||||
strncmp_full_str_cmp(skel, "strncmp: greater than", 1);
|
||||
out:
|
||||
strncmp_test__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_strncmp_bad_not_const_str_size(void)
|
||||
{
|
||||
struct strncmp_test *skel;
|
||||
struct bpf_program *prog;
|
||||
int err;
|
||||
|
||||
skel = strncmp_test__open();
|
||||
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
|
||||
return;
|
||||
|
||||
bpf_object__for_each_program(prog, skel->obj)
|
||||
bpf_program__set_autoload(prog, false);
|
||||
|
||||
bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size,
|
||||
true);
|
||||
|
||||
err = strncmp_test__load(skel);
|
||||
ASSERT_ERR(err, "strncmp_test load bad_not_const_str_size");
|
||||
|
||||
strncmp_test__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_strncmp_bad_writable_target(void)
|
||||
{
|
||||
struct strncmp_test *skel;
|
||||
struct bpf_program *prog;
|
||||
int err;
|
||||
|
||||
skel = strncmp_test__open();
|
||||
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
|
||||
return;
|
||||
|
||||
bpf_object__for_each_program(prog, skel->obj)
|
||||
bpf_program__set_autoload(prog, false);
|
||||
|
||||
bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target,
|
||||
true);
|
||||
|
||||
err = strncmp_test__load(skel);
|
||||
ASSERT_ERR(err, "strncmp_test load bad_writable_target");
|
||||
|
||||
strncmp_test__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_strncmp_bad_not_null_term_target(void)
|
||||
{
|
||||
struct strncmp_test *skel;
|
||||
struct bpf_program *prog;
|
||||
int err;
|
||||
|
||||
skel = strncmp_test__open();
|
||||
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
|
||||
return;
|
||||
|
||||
bpf_object__for_each_program(prog, skel->obj)
|
||||
bpf_program__set_autoload(prog, false);
|
||||
|
||||
bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target,
|
||||
true);
|
||||
|
||||
err = strncmp_test__load(skel);
|
||||
ASSERT_ERR(err, "strncmp_test load bad_not_null_term_target");
|
||||
|
||||
strncmp_test__destroy(skel);
|
||||
}
|
||||
|
||||
void test_test_strncmp(void)
|
||||
{
|
||||
if (test__start_subtest("strncmp_ret"))
|
||||
test_strncmp_ret();
|
||||
if (test__start_subtest("strncmp_bad_not_const_str_size"))
|
||||
test_strncmp_bad_not_const_str_size();
|
||||
if (test__start_subtest("strncmp_bad_writable_target"))
|
||||
test_strncmp_bad_writable_target();
|
||||
if (test__start_subtest("strncmp_bad_not_null_term_target"))
|
||||
test_strncmp_bad_not_null_term_target();
|
||||
}
|
||||
@@ -35,7 +35,7 @@ static struct bpf_link *load(struct bpf_object *obj, const char *name)
|
||||
struct bpf_program *prog;
|
||||
int duration = 0;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, name);
|
||||
prog = bpf_object__find_program_by_name(obj, name);
|
||||
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", name))
|
||||
return ERR_PTR(-EINVAL);
|
||||
return bpf_program__attach_trace(prog);
|
||||
@@ -44,8 +44,8 @@ static struct bpf_link *load(struct bpf_object *obj, const char *name)
|
||||
/* TODO: use different target function to run in concurrent mode */
|
||||
void serial_test_trampoline_count(void)
|
||||
{
|
||||
const char *fentry_name = "fentry/__set_task_comm";
|
||||
const char *fexit_name = "fexit/__set_task_comm";
|
||||
const char *fentry_name = "prog1";
|
||||
const char *fexit_name = "prog2";
|
||||
const char *object = "test_trampoline_count.o";
|
||||
struct inst inst[MAX_TRAMP_PROGS] = {};
|
||||
int err, i = 0, duration = 0;
|
||||
|
||||
123
tools/testing/selftests/bpf/progs/get_func_args_test.c
Normal file
123
tools/testing/selftests/bpf/progs/get_func_args_test.c
Normal file
@@ -0,0 +1,123 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <errno.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
__u64 test1_result = 0;
|
||||
SEC("fentry/bpf_fentry_test1")
|
||||
int BPF_PROG(test1)
|
||||
{
|
||||
__u64 cnt = bpf_get_func_arg_cnt(ctx);
|
||||
__u64 a = 0, z = 0, ret = 0;
|
||||
__s64 err;
|
||||
|
||||
test1_result = cnt == 1;
|
||||
|
||||
/* valid arguments */
|
||||
err = bpf_get_func_arg(ctx, 0, &a);
|
||||
|
||||
/* We need to cast access to traced function argument values with
|
||||
* proper type cast, because trampoline uses type specific instruction
|
||||
* to save it, like for 'int a' with 32-bit mov like:
|
||||
*
|
||||
* mov %edi,-0x8(%rbp)
|
||||
*
|
||||
* so the upper 4 bytes are not zeroed.
|
||||
*/
|
||||
test1_result &= err == 0 && ((int) a == 1);
|
||||
|
||||
/* not valid argument */
|
||||
err = bpf_get_func_arg(ctx, 1, &z);
|
||||
test1_result &= err == -EINVAL;
|
||||
|
||||
/* return value fails in fentry */
|
||||
err = bpf_get_func_ret(ctx, &ret);
|
||||
test1_result &= err == -EOPNOTSUPP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u64 test2_result = 0;
|
||||
SEC("fexit/bpf_fentry_test2")
|
||||
int BPF_PROG(test2)
|
||||
{
|
||||
__u64 cnt = bpf_get_func_arg_cnt(ctx);
|
||||
__u64 a = 0, b = 0, z = 0, ret = 0;
|
||||
__s64 err;
|
||||
|
||||
test2_result = cnt == 2;
|
||||
|
||||
/* valid arguments */
|
||||
err = bpf_get_func_arg(ctx, 0, &a);
|
||||
test2_result &= err == 0 && (int) a == 2;
|
||||
|
||||
err = bpf_get_func_arg(ctx, 1, &b);
|
||||
test2_result &= err == 0 && b == 3;
|
||||
|
||||
/* not valid argument */
|
||||
err = bpf_get_func_arg(ctx, 2, &z);
|
||||
test2_result &= err == -EINVAL;
|
||||
|
||||
/* return value */
|
||||
err = bpf_get_func_ret(ctx, &ret);
|
||||
test2_result &= err == 0 && ret == 5;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u64 test3_result = 0;
|
||||
SEC("fmod_ret/bpf_modify_return_test")
|
||||
int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
|
||||
{
|
||||
__u64 cnt = bpf_get_func_arg_cnt(ctx);
|
||||
__u64 a = 0, b = 0, z = 0, ret = 0;
|
||||
__s64 err;
|
||||
|
||||
test3_result = cnt == 2;
|
||||
|
||||
/* valid arguments */
|
||||
err = bpf_get_func_arg(ctx, 0, &a);
|
||||
test3_result &= err == 0 && ((int) a == 1);
|
||||
|
||||
err = bpf_get_func_arg(ctx, 1, &b);
|
||||
test3_result &= err == 0 && ((int *) b == _b);
|
||||
|
||||
/* not valid argument */
|
||||
err = bpf_get_func_arg(ctx, 2, &z);
|
||||
test3_result &= err == -EINVAL;
|
||||
|
||||
/* return value */
|
||||
err = bpf_get_func_ret(ctx, &ret);
|
||||
test3_result &= err == 0 && ret == 0;
|
||||
|
||||
/* change return value, it's checked in fexit_test program */
|
||||
return 1234;
|
||||
}
|
||||
|
||||
__u64 test4_result = 0;
|
||||
SEC("fexit/bpf_modify_return_test")
|
||||
int BPF_PROG(fexit_test, int _a, int *_b, int _ret)
|
||||
{
|
||||
__u64 cnt = bpf_get_func_arg_cnt(ctx);
|
||||
__u64 a = 0, b = 0, z = 0, ret = 0;
|
||||
__s64 err;
|
||||
|
||||
test4_result = cnt == 2;
|
||||
|
||||
/* valid arguments */
|
||||
err = bpf_get_func_arg(ctx, 0, &a);
|
||||
test4_result &= err == 0 && ((int) a == 1);
|
||||
|
||||
err = bpf_get_func_arg(ctx, 1, &b);
|
||||
test4_result &= err == 0 && ((int *) b == _b);
|
||||
|
||||
/* not valid argument */
|
||||
err = bpf_get_func_arg(ctx, 2, &z);
|
||||
test4_result &= err == -EINVAL;
|
||||
|
||||
/* return value */
|
||||
err = bpf_get_func_ret(ctx, &ret);
|
||||
test4_result &= err == 0 && ret == 1234;
|
||||
return 0;
|
||||
}
|
||||
@@ -20,7 +20,6 @@ int sk_storage_result = -1;
|
||||
struct local_storage {
|
||||
struct inode *exec_inode;
|
||||
__u32 value;
|
||||
struct bpf_spin_lock lock;
|
||||
};
|
||||
|
||||
struct {
|
||||
@@ -58,9 +57,7 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
|
||||
bpf_get_current_task_btf(), 0, 0);
|
||||
if (storage) {
|
||||
/* Don't let an executable delete itself */
|
||||
bpf_spin_lock(&storage->lock);
|
||||
is_self_unlink = storage->exec_inode == victim->d_inode;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
if (is_self_unlink)
|
||||
return -EPERM;
|
||||
}
|
||||
@@ -68,7 +65,7 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("lsm/inode_rename")
|
||||
SEC("lsm.s/inode_rename")
|
||||
int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct inode *new_dir, struct dentry *new_dentry,
|
||||
unsigned int flags)
|
||||
@@ -89,10 +86,8 @@ int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
|
||||
if (!storage)
|
||||
return 0;
|
||||
|
||||
bpf_spin_lock(&storage->lock);
|
||||
if (storage->value != DUMMY_STORAGE_VALUE)
|
||||
inode_storage_result = -1;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
|
||||
err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
|
||||
if (!err)
|
||||
@@ -101,7 +96,7 @@ int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("lsm/socket_bind")
|
||||
SEC("lsm.s/socket_bind")
|
||||
int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
|
||||
int addrlen)
|
||||
{
|
||||
@@ -117,10 +112,8 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
|
||||
if (!storage)
|
||||
return 0;
|
||||
|
||||
bpf_spin_lock(&storage->lock);
|
||||
if (storage->value != DUMMY_STORAGE_VALUE)
|
||||
sk_storage_result = -1;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
|
||||
err = bpf_sk_storage_delete(&sk_storage_map, sock->sk);
|
||||
if (!err)
|
||||
@@ -129,7 +122,7 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("lsm/socket_post_create")
|
||||
SEC("lsm.s/socket_post_create")
|
||||
int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
|
||||
int protocol, int kern)
|
||||
{
|
||||
@@ -144,9 +137,7 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
|
||||
if (!storage)
|
||||
return 0;
|
||||
|
||||
bpf_spin_lock(&storage->lock);
|
||||
storage->value = DUMMY_STORAGE_VALUE;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -154,7 +145,7 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
|
||||
/* This uses the local storage to remember the inode of the binary that a
|
||||
* process was originally executing.
|
||||
*/
|
||||
SEC("lsm/bprm_committed_creds")
|
||||
SEC("lsm.s/bprm_committed_creds")
|
||||
void BPF_PROG(exec, struct linux_binprm *bprm)
|
||||
{
|
||||
__u32 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
@@ -166,18 +157,13 @@ void BPF_PROG(exec, struct linux_binprm *bprm)
|
||||
storage = bpf_task_storage_get(&task_storage_map,
|
||||
bpf_get_current_task_btf(), 0,
|
||||
BPF_LOCAL_STORAGE_GET_F_CREATE);
|
||||
if (storage) {
|
||||
bpf_spin_lock(&storage->lock);
|
||||
if (storage)
|
||||
storage->exec_inode = bprm->file->f_inode;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
}
|
||||
|
||||
storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
|
||||
0, BPF_LOCAL_STORAGE_GET_F_CREATE);
|
||||
if (!storage)
|
||||
return;
|
||||
|
||||
bpf_spin_lock(&storage->lock);
|
||||
storage->value = DUMMY_STORAGE_VALUE;
|
||||
bpf_spin_unlock(&storage->lock);
|
||||
}
|
||||
|
||||
50
tools/testing/selftests/bpf/progs/strncmp_bench.c
Normal file
50
tools/testing/selftests/bpf/progs/strncmp_bench.c
Normal file
@@ -0,0 +1,50 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
|
||||
#include <linux/types.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
#define STRNCMP_STR_SZ 4096
|
||||
|
||||
/* Will be updated by benchmark before program loading */
|
||||
const volatile unsigned int cmp_str_len = 1;
|
||||
const char target[STRNCMP_STR_SZ];
|
||||
|
||||
long hits = 0;
|
||||
char str[STRNCMP_STR_SZ];
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
static __always_inline int local_strncmp(const char *s1, unsigned int sz,
|
||||
const char *s2)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < sz; i++) {
|
||||
/* E.g. 0xff > 0x31 */
|
||||
ret = (unsigned char)s1[i] - (unsigned char)s2[i];
|
||||
if (ret || !s1[i])
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_getpgid")
|
||||
int strncmp_no_helper(void *ctx)
|
||||
{
|
||||
if (local_strncmp(str, cmp_str_len + 1, target) < 0)
|
||||
__sync_add_and_fetch(&hits, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_getpgid")
|
||||
int strncmp_helper(void *ctx)
|
||||
{
|
||||
if (bpf_strncmp(str, cmp_str_len + 1, target) < 0)
|
||||
__sync_add_and_fetch(&hits, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
54
tools/testing/selftests/bpf/progs/strncmp_test.c
Normal file
54
tools/testing/selftests/bpf/progs/strncmp_test.c
Normal file
@@ -0,0 +1,54 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
|
||||
#include <stdbool.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
#define STRNCMP_STR_SZ 8
|
||||
|
||||
const char target[STRNCMP_STR_SZ] = "EEEEEEE";
|
||||
char str[STRNCMP_STR_SZ];
|
||||
int cmp_ret = 0;
|
||||
int target_pid = 0;
|
||||
|
||||
const char no_str_target[STRNCMP_STR_SZ] = "12345678";
|
||||
char writable_target[STRNCMP_STR_SZ];
|
||||
unsigned int no_const_str_size = STRNCMP_STR_SZ;
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int do_strncmp(void *ctx)
|
||||
{
|
||||
if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
|
||||
return 0;
|
||||
|
||||
cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, target);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int strncmp_bad_not_const_str_size(void *ctx)
|
||||
{
|
||||
/* The value of string size is not const, so will fail */
|
||||
cmp_ret = bpf_strncmp(str, no_const_str_size, target);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int strncmp_bad_writable_target(void *ctx)
|
||||
{
|
||||
/* Compared target is not read-only, so will fail */
|
||||
cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, writable_target);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int strncmp_bad_not_null_term_target(void *ctx)
|
||||
{
|
||||
/* Compared target is not null-terminated, so will fail */
|
||||
cmp_ret = bpf_strncmp(str, STRNCMP_STR_SZ, no_str_target);
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Google */
|
||||
|
||||
#include "vmlinux.h"
|
||||
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
extern const int bpf_prog_active __ksym; /* int type global var. */
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handler(const void *ctx)
|
||||
{
|
||||
int *active;
|
||||
__u32 cpu;
|
||||
|
||||
cpu = bpf_get_smp_processor_id();
|
||||
active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
|
||||
if (active) {
|
||||
/* Kernel memory obtained from bpf_{per,this}_cpu_ptr
|
||||
* is read-only, should _not_ pass verification.
|
||||
*/
|
||||
/* WRITE_ONCE */
|
||||
*(volatile int *)active = -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
@@ -7,9 +7,15 @@
|
||||
|
||||
/* do nothing, just make sure we can link successfully */
|
||||
|
||||
static void dump_printf(void *ctx, const char *fmt, va_list args)
|
||||
{
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct btf_dump_opts opts = { };
|
||||
struct test_core_extern *skel;
|
||||
struct btf *btf;
|
||||
|
||||
/* libbpf.h */
|
||||
libbpf_set_print(NULL);
|
||||
@@ -18,7 +24,8 @@ int main(int argc, char *argv[])
|
||||
bpf_prog_get_fd_by_id(0);
|
||||
|
||||
/* btf.h */
|
||||
btf__new(NULL, 0);
|
||||
btf = btf__new(NULL, 0);
|
||||
btf_dump__new(btf, dump_printf, nullptr, &opts);
|
||||
|
||||
/* BPF skeleton */
|
||||
skel = test_core_extern__open_and_load();
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#include "bpf_util.h"
|
||||
#include "bpf_rlimit.h"
|
||||
#include "test_maps.h"
|
||||
#include "testing_helpers.h"
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#define _GNU_SOURCE
|
||||
#include "test_progs.h"
|
||||
#include "cgroup_helpers.h"
|
||||
#include "bpf_rlimit.h"
|
||||
#include <argp.h>
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
@@ -1342,7 +1341,6 @@ int main(int argc, char **argv)
|
||||
|
||||
/* Use libbpf 1.0 API mode */
|
||||
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
|
||||
|
||||
libbpf_set_print(libbpf_print_fn);
|
||||
|
||||
srand(time(NULL));
|
||||
|
||||
@@ -41,7 +41,6 @@
|
||||
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
|
||||
# endif
|
||||
#endif
|
||||
#include "bpf_rlimit.h"
|
||||
#include "bpf_rand.h"
|
||||
#include "bpf_util.h"
|
||||
#include "test_btf.h"
|
||||
@@ -701,22 +700,18 @@ static int create_sk_storage_map(void)
|
||||
|
||||
static int create_map_timer(void)
|
||||
{
|
||||
struct bpf_create_map_attr attr = {
|
||||
.name = "test_map",
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = 4,
|
||||
.value_size = 16,
|
||||
.max_entries = 1,
|
||||
LIBBPF_OPTS(bpf_map_create_opts, opts,
|
||||
.btf_key_type_id = 1,
|
||||
.btf_value_type_id = 5,
|
||||
};
|
||||
);
|
||||
int fd, btf_fd;
|
||||
|
||||
btf_fd = load_btf();
|
||||
if (btf_fd < 0)
|
||||
return -1;
|
||||
attr.btf_fd = btf_fd;
|
||||
fd = bpf_create_map_xattr(&attr);
|
||||
|
||||
opts.btf_fd = btf_fd;
|
||||
fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts);
|
||||
if (fd < 0)
|
||||
printf("Failed to create map with timer\n");
|
||||
return fd;
|
||||
@@ -1399,6 +1394,9 @@ int main(int argc, char **argv)
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
/* Use libbpf 1.0 API mode */
|
||||
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
|
||||
|
||||
bpf_semi_rand_init();
|
||||
return do_test(unpriv, from, to);
|
||||
}
|
||||
|
||||
12
tools/testing/selftests/bpf/verifier/btf_ctx_access.c
Normal file
12
tools/testing/selftests/bpf/verifier/btf_ctx_access.c
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"btf_ctx_access accept",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 8), /* load 2nd argument value (int pointer) */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACING,
|
||||
.expected_attach_type = BPF_TRACE_FENTRY,
|
||||
.kfunc = "bpf_modify_return_test",
|
||||
},
|
||||
@@ -32,7 +32,7 @@ ROOTFS_IMAGE="root.img"
|
||||
OUTPUT_DIR="$HOME/.bpf_selftests"
|
||||
KCONFIG_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/config-latest.${ARCH}"
|
||||
KCONFIG_API_URL="https://api.github.com/repos/libbpf/libbpf/contents/travis-ci/vmtest/configs/config-latest.${ARCH}"
|
||||
INDEX_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/INDEX"
|
||||
INDEX_URL="https://raw.githubusercontent.com/libbpf/ci/master/INDEX"
|
||||
NUM_COMPILE_JOBS="$(nproc)"
|
||||
LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")"
|
||||
LOG_FILE="${LOG_FILE_BASE}.log"
|
||||
|
||||
Reference in New Issue
Block a user