Alexei Starovoitov says:

====================
pull-request: bpf-next 2022-03-21 v2

We've added 137 non-merge commits during the last 17 day(s) which contain
a total of 143 files changed, 7123 insertions(+), 1092 deletions(-).

The main changes are:

1) Custom SEC() handling in libbpf, from Andrii.

2) subskeleton support, from Delyan.

3) Use btf_tag to recognize __percpu pointers in the verifier, from Hao.

4) Fix net.core.bpf_jit_harden race, from Hou.

5) Fix bpf_sk_lookup remote_port on big-endian, from Jakub.

6) Introduce fprobe (multi kprobe) _without_ arch bits, from Masami.
The arch specific bits will come later.

7) Introduce multi_kprobe bpf programs on top of fprobe, from Jiri.

8) Enable non-atomic allocations in local storage, from Joanne.

9) Various var_off ptr_to_btf_id fixed, from Kumar.

10) bpf_ima_file_hash helper, from Roberto.

11) Add "live packet" mode for XDP in BPF_PROG_RUN, from Toke.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (137 commits)
  selftests/bpf: Fix kprobe_multi test.
  Revert "rethook: x86: Add rethook x86 implementation"
  Revert "arm64: rethook: Add arm64 rethook implementation"
  Revert "powerpc: Add rethook support"
  Revert "ARM: rethook: Add rethook arm implementation"
  bpftool: Fix a bug in subskeleton code generation
  bpf: Fix bpf_prog_pack when PMU_SIZE is not defined
  bpf: Fix bpf_prog_pack for multi-node setup
  bpf: Fix warning for cast from restricted gfp_t in verifier
  bpf, arm: Fix various typos in comments
  libbpf: Close fd in bpf_object__reuse_map
  bpftool: Fix print error when show bpf map
  bpf: Fix kprobe_multi return probe backtrace
  Revert "bpf: Add support to inline bpf_get_func_ip helper on x86"
  bpf: Simplify check in btf_parse_hdr()
  selftests/bpf/test_lirc_mode2.sh: Exit with proper code
  bpf: Check for NULL return from bpf_get_btf_vmlinux
  selftests/bpf: Test skipping stacktrace
  bpf: Adjust BPF stack helper functions to accommodate skip > 0
  bpf: Select proper size for bpf_prog_pack
  ...
====================

Link: https://lore.kernel.org/r/20220322050159.5507-1-alexei.starovoitov@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2022-03-22 10:36:56 -07:00
143 changed files with 7139 additions and 1108 deletions

View File

@@ -31,6 +31,7 @@ test_tcp_check_syncookie_user
test_sysctl
xdping
test_cpp
*.subskel.h
*.skel.h
*.lskel.h
/no_alu32

View File

@@ -25,7 +25,7 @@ CFLAGS += -g -O0 -rdynamic -Wall -Werror $(GENFLAGS) $(SAN_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT)
LDFLAGS += $(SAN_CFLAGS)
LDLIBS += -lcap -lelf -lz -lrt -lpthread
LDLIBS += -lelf -lz -lrt -lpthread
# Silence some warnings when compiled with clang
ifneq ($(LLVM),)
@@ -195,6 +195,7 @@ $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o
TESTING_HELPERS := $(OUTPUT)/testing_helpers.o
TRACE_HELPERS := $(OUTPUT)/trace_helpers.o
CAP_HELPERS := $(OUTPUT)/cap_helpers.o
$(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_skb_cgroup_id_user: $(CGROUP_HELPERS) $(TESTING_HELPERS)
@@ -211,7 +212,7 @@ $(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS)
$(OUTPUT)/xdping: $(TESTING_HELPERS)
$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
$(OUTPUT)/test_maps: $(TESTING_HELPERS)
$(OUTPUT)/test_verifier: $(TESTING_HELPERS)
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS)
BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
@@ -326,7 +327,13 @@ endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h
linked_vars.skel.h linked_maps.skel.h \
test_subskeleton.skel.h test_subskeleton_lib.skel.h
# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file
# but that's created as a side-effect of the skel.h generation.
test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o
test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
@@ -404,6 +411,7 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$(@:.skel.h=.subskel.h)
$(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
@@ -421,6 +429,7 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)diff $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
$(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h)
endif
# ensure we set up tests.h header generation rule just once
@@ -479,7 +488,8 @@ TRUNNER_TESTS_DIR := prog_tests
TRUNNER_BPF_PROGS_DIR := progs
TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
network_helpers.c testing_helpers.c \
btf_helpers.c flow_dissector_load.h
btf_helpers.c flow_dissector_load.h \
cap_helpers.c
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
ima_setup.sh \
$(wildcard progs/btf_dump_test_case_*.c)
@@ -557,6 +567,6 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko)
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h no_alu32 bpf_gcc bpf_testmod.ko)
.PHONY: docs docs-clean

View File

@@ -32,11 +32,19 @@ For more information on about using the script, run:
$ tools/testing/selftests/bpf/vmtest.sh -h
In case of linker errors when running selftests, try using static linking:
.. code-block:: console
$ LDLIBS=-static vmtest.sh
.. note:: Some distros may not support static linking.
.. note:: The script uses pahole and clang based on host environment setting.
If you want to change pahole and llvm, you can change `PATH` environment
variable in the beginning of script.
.. note:: The script currently only supports x86_64.
.. note:: The script currently only supports x86_64 and s390x architectures.
Additional information about selftest failures are
documented here.

View File

@@ -33,6 +33,10 @@ struct bpf_testmod_btf_type_tag_2 {
struct bpf_testmod_btf_type_tag_1 __user *p;
};
struct bpf_testmod_btf_type_tag_3 {
struct bpf_testmod_btf_type_tag_1 __percpu *p;
};
noinline int
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
BTF_TYPE_EMIT(func_proto_typedef);
@@ -46,6 +50,16 @@ bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
return arg->p->a;
}
noinline int
bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
return arg->a;
}
noinline int
bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
return arg->p->a;
}
noinline int bpf_testmod_loop_test(int n)
{
int i, sum = 0;

View File

@@ -0,0 +1,67 @@
// SPDX-License-Identifier: GPL-2.0
#include "cap_helpers.h"
/* Avoid including <sys/capability.h> from the libcap-devel package,
* so directly declare them here and use them from glibc.
*/
int capget(cap_user_header_t header, cap_user_data_t data);
int capset(cap_user_header_t header, const cap_user_data_t data);
int cap_enable_effective(__u64 caps, __u64 *old_caps)
{
struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
struct __user_cap_header_struct hdr = {
.version = _LINUX_CAPABILITY_VERSION_3,
};
__u32 cap0 = caps;
__u32 cap1 = caps >> 32;
int err;
err = capget(&hdr, data);
if (err)
return err;
if (old_caps)
*old_caps = (__u64)(data[1].effective) << 32 | data[0].effective;
if ((data[0].effective & cap0) == cap0 &&
(data[1].effective & cap1) == cap1)
return 0;
data[0].effective |= cap0;
data[1].effective |= cap1;
err = capset(&hdr, data);
if (err)
return err;
return 0;
}
int cap_disable_effective(__u64 caps, __u64 *old_caps)
{
struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
struct __user_cap_header_struct hdr = {
.version = _LINUX_CAPABILITY_VERSION_3,
};
__u32 cap0 = caps;
__u32 cap1 = caps >> 32;
int err;
err = capget(&hdr, data);
if (err)
return err;
if (old_caps)
*old_caps = (__u64)(data[1].effective) << 32 | data[0].effective;
if (!(data[0].effective & cap0) && !(data[1].effective & cap1))
return 0;
data[0].effective &= ~cap0;
data[1].effective &= ~cap1;
err = capset(&hdr, data);
if (err)
return err;
return 0;
}

View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CAP_HELPERS_H
#define __CAP_HELPERS_H
#include <linux/types.h>
#include <linux/capability.h>
#ifndef CAP_PERFMON
#define CAP_PERFMON 38
#endif
#ifndef CAP_BPF
#define CAP_BPF 39
#endif
int cap_enable_effective(__u64 caps, __u64 *old_caps);
int cap_disable_effective(__u64 caps, __u64 *old_caps);
#endif

View File

@@ -12,7 +12,7 @@ LOG_FILE="$(mktemp /tmp/ima_setup.XXXX.log)"
usage()
{
echo "Usage: $0 <setup|cleanup|run> <existing_tmp_dir>"
echo "Usage: $0 <setup|cleanup|run|modify-bin|restore-bin|load-policy> <existing_tmp_dir>"
exit 1
}
@@ -51,6 +51,7 @@ setup()
ensure_mount_securityfs
echo "measure func=BPRM_CHECK fsuuid=${mount_uuid}" > ${IMA_POLICY_FILE}
echo "measure func=BPRM_CHECK fsuuid=${mount_uuid}" > ${mount_dir}/policy_test
}
cleanup() {
@@ -77,6 +78,32 @@ run()
exec "${copied_bin_path}"
}
modify_bin()
{
local tmp_dir="$1"
local mount_dir="${tmp_dir}/mnt"
local copied_bin_path="${mount_dir}/$(basename ${TEST_BINARY})"
echo "mod" >> "${copied_bin_path}"
}
restore_bin()
{
local tmp_dir="$1"
local mount_dir="${tmp_dir}/mnt"
local copied_bin_path="${mount_dir}/$(basename ${TEST_BINARY})"
truncate -s -4 "${copied_bin_path}"
}
load_policy()
{
local tmp_dir="$1"
local mount_dir="${tmp_dir}/mnt"
echo ${mount_dir}/policy_test > ${IMA_POLICY_FILE} 2> /dev/null
}
catch()
{
local exit_code="$1"
@@ -105,6 +132,12 @@ main()
cleanup "${tmp_dir}"
elif [[ "${action}" == "run" ]]; then
run "${tmp_dir}"
elif [[ "${action}" == "modify-bin" ]]; then
modify_bin "${tmp_dir}"
elif [[ "${action}" == "restore-bin" ]]; then
restore_bin "${tmp_dir}"
elif [[ "${action}" == "load-policy" ]]; then
load_policy "${tmp_dir}"
else
echo "Unknown action: ${action}"
exit 1

View File

@@ -1,18 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sched.h>
#include <arpa/inet.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <linux/err.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/limits.h>
#include "bpf_util.h"
#include "network_helpers.h"
#include "test_progs.h"
#define clean_errno() (errno == 0 ? "None" : strerror(errno))
#define log_err(MSG, ...) ({ \
@@ -356,3 +363,82 @@ char *ping_command(int family)
}
return "ping";
}
struct nstoken {
int orig_netns_fd;
};
static int setns_by_fd(int nsfd)
{
int err;
err = setns(nsfd, CLONE_NEWNET);
close(nsfd);
if (!ASSERT_OK(err, "setns"))
return err;
/* Switch /sys to the new namespace so that e.g. /sys/class/net
* reflects the devices in the new namespace.
*/
err = unshare(CLONE_NEWNS);
if (!ASSERT_OK(err, "unshare"))
return err;
/* Make our /sys mount private, so the following umount won't
* trigger the global umount in case it's shared.
*/
err = mount("none", "/sys", NULL, MS_PRIVATE, NULL);
if (!ASSERT_OK(err, "remount private /sys"))
return err;
err = umount2("/sys", MNT_DETACH);
if (!ASSERT_OK(err, "umount2 /sys"))
return err;
err = mount("sysfs", "/sys", "sysfs", 0, NULL);
if (!ASSERT_OK(err, "mount /sys"))
return err;
err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL);
if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
return err;
return 0;
}
struct nstoken *open_netns(const char *name)
{
int nsfd;
char nspath[PATH_MAX];
int err;
struct nstoken *token;
token = malloc(sizeof(struct nstoken));
if (!ASSERT_OK_PTR(token, "malloc token"))
return NULL;
token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY);
if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net"))
goto fail;
snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
if (!ASSERT_GE(nsfd, 0, "open netns fd"))
goto fail;
err = setns_by_fd(nsfd);
if (!ASSERT_OK(err, "setns_by_fd"))
goto fail;
return token;
fail:
free(token);
return NULL;
}
void close_netns(struct nstoken *token)
{
ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd");
free(token);
}

View File

@@ -55,4 +55,13 @@ int make_sockaddr(int family, const char *addr_str, __u16 port,
struct sockaddr_storage *addr, socklen_t *len);
char *ping_command(int family);
struct nstoken;
/**
* open_netns() - Switch to specified network namespace by name.
*
* Returns token with which to restore the original namespace
* using close_netns().
*/
struct nstoken *open_netns(const char *name);
void close_netns(struct nstoken *token);
#endif

View File

@@ -4,9 +4,9 @@
#include <stdlib.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/capability.h>
#include "test_progs.h"
#include "cap_helpers.h"
#include "bind_perm.skel.h"
static int duration;
@@ -49,41 +49,11 @@ close_socket:
close(fd);
}
bool cap_net_bind_service(cap_flag_value_t flag)
{
const cap_value_t cap_net_bind_service = CAP_NET_BIND_SERVICE;
cap_flag_value_t original_value;
bool was_effective = false;
cap_t caps;
caps = cap_get_proc();
if (CHECK(!caps, "cap_get_proc", "errno %d", errno))
goto free_caps;
if (CHECK(cap_get_flag(caps, CAP_NET_BIND_SERVICE, CAP_EFFECTIVE,
&original_value),
"cap_get_flag", "errno %d", errno))
goto free_caps;
was_effective = (original_value == CAP_SET);
if (CHECK(cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_bind_service,
flag),
"cap_set_flag", "errno %d", errno))
goto free_caps;
if (CHECK(cap_set_proc(caps), "cap_set_proc", "errno %d", errno))
goto free_caps;
free_caps:
CHECK(cap_free(caps), "cap_free", "errno %d", errno);
return was_effective;
}
void test_bind_perm(void)
{
bool cap_was_effective;
const __u64 net_bind_svc_cap = 1ULL << CAP_NET_BIND_SERVICE;
struct bind_perm *skel;
__u64 old_caps = 0;
int cgroup_fd;
if (create_netns())
@@ -105,7 +75,8 @@ void test_bind_perm(void)
if (!ASSERT_OK_PTR(skel, "bind_v6_prog"))
goto close_skeleton;
cap_was_effective = cap_net_bind_service(CAP_CLEAR);
ASSERT_OK(cap_disable_effective(net_bind_svc_cap, &old_caps),
"cap_disable_effective");
try_bind(AF_INET, 110, EACCES);
try_bind(AF_INET6, 110, EACCES);
@@ -113,8 +84,9 @@ void test_bind_perm(void)
try_bind(AF_INET, 111, 0);
try_bind(AF_INET6, 111, 0);
if (cap_was_effective)
cap_net_bind_service(CAP_SET);
if (old_caps & net_bind_svc_cap)
ASSERT_OK(cap_enable_effective(net_bind_svc_cap, NULL),
"cap_enable_effective");
close_skeleton:
bind_perm__destroy(skel);

View File

@@ -7,6 +7,7 @@
#include <unistd.h>
#include <test_progs.h>
#include "test_bpf_cookie.skel.h"
#include "kprobe_multi.skel.h"
/* uprobe attach point */
static void trigger_func(void)
@@ -63,6 +64,178 @@ cleanup:
bpf_link__destroy(retlink2);
}
static void kprobe_multi_test_run(struct kprobe_multi *skel)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
int err, prog_fd;
prog_fd = bpf_program__fd(skel->progs.trigger);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
}
static void kprobe_multi_link_api_subtest(void)
{
int prog_fd, link1_fd = -1, link2_fd = -1;
struct kprobe_multi *skel = NULL;
LIBBPF_OPTS(bpf_link_create_opts, opts);
unsigned long long addrs[8];
__u64 cookies[8];
if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
goto cleanup;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
skel->bss->test_cookie = true;
#define GET_ADDR(__sym, __addr) ({ \
__addr = ksym_get_addr(__sym); \
if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \
goto cleanup; \
})
GET_ADDR("bpf_fentry_test1", addrs[0]);
GET_ADDR("bpf_fentry_test2", addrs[1]);
GET_ADDR("bpf_fentry_test3", addrs[2]);
GET_ADDR("bpf_fentry_test4", addrs[3]);
GET_ADDR("bpf_fentry_test5", addrs[4]);
GET_ADDR("bpf_fentry_test6", addrs[5]);
GET_ADDR("bpf_fentry_test7", addrs[6]);
GET_ADDR("bpf_fentry_test8", addrs[7]);
#undef GET_ADDR
cookies[0] = 1;
cookies[1] = 2;
cookies[2] = 3;
cookies[3] = 4;
cookies[4] = 5;
cookies[5] = 6;
cookies[6] = 7;
cookies[7] = 8;
opts.kprobe_multi.addrs = (const unsigned long *) &addrs;
opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
opts.kprobe_multi.cookies = (const __u64 *) &cookies;
prog_fd = bpf_program__fd(skel->progs.test_kprobe);
link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
goto cleanup;
cookies[0] = 8;
cookies[1] = 7;
cookies[2] = 6;
cookies[3] = 5;
cookies[4] = 4;
cookies[5] = 3;
cookies[6] = 2;
cookies[7] = 1;
opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts);
if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
goto cleanup;
kprobe_multi_test_run(skel);
cleanup:
close(link1_fd);
close(link2_fd);
kprobe_multi__destroy(skel);
}
static void kprobe_multi_attach_api_subtest(void)
{
struct bpf_link *link1 = NULL, *link2 = NULL;
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct kprobe_multi *skel = NULL;
const char *syms[8] = {
"bpf_fentry_test1",
"bpf_fentry_test2",
"bpf_fentry_test3",
"bpf_fentry_test4",
"bpf_fentry_test5",
"bpf_fentry_test6",
"bpf_fentry_test7",
"bpf_fentry_test8",
};
__u64 cookies[8];
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
skel->bss->test_cookie = true;
cookies[0] = 1;
cookies[1] = 2;
cookies[2] = 3;
cookies[3] = 4;
cookies[4] = 5;
cookies[5] = 6;
cookies[6] = 7;
cookies[7] = 8;
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = cookies;
link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
NULL, &opts);
if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
cookies[0] = 8;
cookies[1] = 7;
cookies[2] = 6;
cookies[3] = 5;
cookies[4] = 4;
cookies[5] = 3;
cookies[6] = 2;
cookies[7] = 1;
opts.retprobe = true;
link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe,
NULL, &opts);
if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
kprobe_multi_test_run(skel);
cleanup:
bpf_link__destroy(link2);
bpf_link__destroy(link1);
kprobe_multi__destroy(skel);
}
static void uprobe_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
@@ -199,7 +372,7 @@ static void pe_subtest(struct test_bpf_cookie *skel)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 4000;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
@@ -249,6 +422,10 @@ void test_bpf_cookie(void)
if (test__start_subtest("kprobe"))
kprobe_subtest(skel);
if (test__start_subtest("multi_kprobe_link_api"))
kprobe_multi_link_api_subtest();
if (test__start_subtest("multi_kprobe_attach_api"))
kprobe_multi_attach_api_subtest();
if (test__start_subtest("uprobe"))
uprobe_subtest(skel);
if (test__start_subtest("tracepoint"))

View File

@@ -10,6 +10,7 @@ struct btf_type_tag_test {
};
#include "btf_type_tag.skel.h"
#include "btf_type_tag_user.skel.h"
#include "btf_type_tag_percpu.skel.h"
static void test_btf_decl_tag(void)
{
@@ -43,38 +44,81 @@ static void test_btf_type_tag(void)
btf_type_tag__destroy(skel);
}
static void test_btf_type_tag_mod_user(bool load_test_user1)
/* loads vmlinux_btf as well as module_btf. If the caller passes NULL as
* module_btf, it will not load module btf.
*
* Returns 0 on success.
* Return -1 On error. In case of error, the loaded btf will be freed and the
* input parameters will be set to pointing to NULL.
*/
static int load_btfs(struct btf **vmlinux_btf, struct btf **module_btf,
bool needs_vmlinux_tag)
{
const char *module_name = "bpf_testmod";
struct btf *vmlinux_btf, *module_btf;
struct btf_type_tag_user *skel;
__s32 type_id;
int err;
if (!env.has_testmod) {
test__skip();
return;
return -1;
}
/* skip the test if the module does not have __user tags */
vmlinux_btf = btf__load_vmlinux_btf();
if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
return;
*vmlinux_btf = btf__load_vmlinux_btf();
if (!ASSERT_OK_PTR(*vmlinux_btf, "could not load vmlinux BTF"))
return -1;
module_btf = btf__load_module_btf(module_name, vmlinux_btf);
if (!ASSERT_OK_PTR(module_btf, "could not load module BTF"))
if (!needs_vmlinux_tag)
goto load_module_btf;
/* skip the test if the vmlinux does not have __user tags */
type_id = btf__find_by_name_kind(*vmlinux_btf, "user", BTF_KIND_TYPE_TAG);
if (type_id <= 0) {
printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__);
test__skip();
goto free_vmlinux_btf;
}
load_module_btf:
/* skip loading module_btf, if not requested by caller */
if (!module_btf)
return 0;
*module_btf = btf__load_module_btf(module_name, *vmlinux_btf);
if (!ASSERT_OK_PTR(*module_btf, "could not load module BTF"))
goto free_vmlinux_btf;
type_id = btf__find_by_name_kind(module_btf, "user", BTF_KIND_TYPE_TAG);
/* skip the test if the module does not have __user tags */
type_id = btf__find_by_name_kind(*module_btf, "user", BTF_KIND_TYPE_TAG);
if (type_id <= 0) {
printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name);
test__skip();
goto free_module_btf;
}
return 0;
free_module_btf:
btf__free(*module_btf);
free_vmlinux_btf:
btf__free(*vmlinux_btf);
*vmlinux_btf = NULL;
if (module_btf)
*module_btf = NULL;
return -1;
}
static void test_btf_type_tag_mod_user(bool load_test_user1)
{
struct btf *vmlinux_btf = NULL, *module_btf = NULL;
struct btf_type_tag_user *skel;
int err;
if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
return;
skel = btf_type_tag_user__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
goto free_module_btf;
goto cleanup;
bpf_program__set_autoload(skel->progs.test_sys_getsockname, false);
if (load_test_user1)
@@ -87,34 +131,23 @@ static void test_btf_type_tag_mod_user(bool load_test_user1)
btf_type_tag_user__destroy(skel);
free_module_btf:
cleanup:
btf__free(module_btf);
free_vmlinux_btf:
btf__free(vmlinux_btf);
}
static void test_btf_type_tag_vmlinux_user(void)
{
struct btf_type_tag_user *skel;
struct btf *vmlinux_btf;
__s32 type_id;
struct btf *vmlinux_btf = NULL;
int err;
/* skip the test if the vmlinux does not have __user tags */
vmlinux_btf = btf__load_vmlinux_btf();
if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
return;
type_id = btf__find_by_name_kind(vmlinux_btf, "user", BTF_KIND_TYPE_TAG);
if (type_id <= 0) {
printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__);
test__skip();
goto free_vmlinux_btf;
}
skel = btf_type_tag_user__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_user"))
goto free_vmlinux_btf;
goto cleanup;
bpf_program__set_autoload(skel->progs.test_user2, false);
bpf_program__set_autoload(skel->progs.test_user1, false);
@@ -124,7 +157,70 @@ static void test_btf_type_tag_vmlinux_user(void)
btf_type_tag_user__destroy(skel);
free_vmlinux_btf:
cleanup:
btf__free(vmlinux_btf);
}
static void test_btf_type_tag_mod_percpu(bool load_test_percpu1)
{
struct btf *vmlinux_btf, *module_btf;
struct btf_type_tag_percpu *skel;
int err;
if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false))
return;
skel = btf_type_tag_percpu__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
goto cleanup;
bpf_program__set_autoload(skel->progs.test_percpu_load, false);
bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
if (load_test_percpu1)
bpf_program__set_autoload(skel->progs.test_percpu2, false);
else
bpf_program__set_autoload(skel->progs.test_percpu1, false);
err = btf_type_tag_percpu__load(skel);
ASSERT_ERR(err, "btf_type_tag_percpu");
btf_type_tag_percpu__destroy(skel);
cleanup:
btf__free(module_btf);
btf__free(vmlinux_btf);
}
static void test_btf_type_tag_vmlinux_percpu(bool load_test)
{
struct btf_type_tag_percpu *skel;
struct btf *vmlinux_btf = NULL;
int err;
if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true))
return;
skel = btf_type_tag_percpu__open();
if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu"))
goto cleanup;
bpf_program__set_autoload(skel->progs.test_percpu2, false);
bpf_program__set_autoload(skel->progs.test_percpu1, false);
if (load_test) {
bpf_program__set_autoload(skel->progs.test_percpu_helper, false);
err = btf_type_tag_percpu__load(skel);
ASSERT_ERR(err, "btf_type_tag_percpu_load");
} else {
bpf_program__set_autoload(skel->progs.test_percpu_load, false);
err = btf_type_tag_percpu__load(skel);
ASSERT_OK(err, "btf_type_tag_percpu_helper");
}
btf_type_tag_percpu__destroy(skel);
cleanup:
btf__free(vmlinux_btf);
}
@@ -134,10 +230,20 @@ void test_btf_tag(void)
test_btf_decl_tag();
if (test__start_subtest("btf_type_tag"))
test_btf_type_tag();
if (test__start_subtest("btf_type_tag_user_mod1"))
test_btf_type_tag_mod_user(true);
if (test__start_subtest("btf_type_tag_user_mod2"))
test_btf_type_tag_mod_user(false);
if (test__start_subtest("btf_type_tag_sys_user_vmlinux"))
test_btf_type_tag_vmlinux_user();
if (test__start_subtest("btf_type_tag_percpu_mod1"))
test_btf_type_tag_mod_percpu(true);
if (test__start_subtest("btf_type_tag_percpu_mod2"))
test_btf_type_tag_mod_percpu(false);
if (test__start_subtest("btf_type_tag_percpu_vmlinux_load"))
test_btf_type_tag_vmlinux_percpu(true);
if (test__start_subtest("btf_type_tag_percpu_vmlinux_helper"))
test_btf_type_tag_vmlinux_percpu(false);
}

View File

@@ -14,7 +14,7 @@ static int prog_load(void)
BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
size_t insns_cnt = ARRAY_SIZE(prog);
return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,

View File

@@ -63,7 +63,7 @@ static int prog_load_cnt(int verdict, int val)
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
size_t insns_cnt = ARRAY_SIZE(prog);
int ret;
ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,

View File

@@ -16,7 +16,7 @@ static int prog_load(int verdict)
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
size_t insns_cnt = ARRAY_SIZE(prog);
return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,

View File

@@ -0,0 +1,176 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include <test_progs.h>
#include "test_custom_sec_handlers.skel.h"
#define COOKIE_ABC1 1
#define COOKIE_ABC2 2
#define COOKIE_CUSTOM 3
#define COOKIE_FALLBACK 4
#define COOKIE_KPROBE 5
static int custom_setup_prog(struct bpf_program *prog, long cookie)
{
if (cookie == COOKIE_ABC1)
bpf_program__set_autoload(prog, false);
return 0;
}
static int custom_prepare_load_prog(struct bpf_program *prog,
struct bpf_prog_load_opts *opts, long cookie)
{
if (cookie == COOKIE_FALLBACK)
opts->prog_flags |= BPF_F_SLEEPABLE;
else if (cookie == COOKIE_ABC1)
ASSERT_FALSE(true, "unexpected preload for abc");
return 0;
}
static int custom_attach_prog(const struct bpf_program *prog, long cookie,
struct bpf_link **link)
{
switch (cookie) {
case COOKIE_ABC2:
*link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
return libbpf_get_error(*link);
case COOKIE_CUSTOM:
*link = bpf_program__attach_tracepoint(prog, "syscalls", "sys_enter_nanosleep");
return libbpf_get_error(*link);
case COOKIE_KPROBE:
case COOKIE_FALLBACK:
/* no auto-attach for SEC("xyz") and SEC("kprobe") */
*link = NULL;
return 0;
default:
ASSERT_FALSE(true, "unexpected cookie");
return -EINVAL;
}
}
static int abc1_id;
static int abc2_id;
static int custom_id;
static int fallback_id;
static int kprobe_id;
__attribute__((constructor))
static void register_sec_handlers(void)
{
LIBBPF_OPTS(libbpf_prog_handler_opts, abc1_opts,
.cookie = COOKIE_ABC1,
.prog_setup_fn = custom_setup_prog,
.prog_prepare_load_fn = custom_prepare_load_prog,
.prog_attach_fn = NULL,
);
LIBBPF_OPTS(libbpf_prog_handler_opts, abc2_opts,
.cookie = COOKIE_ABC2,
.prog_setup_fn = custom_setup_prog,
.prog_prepare_load_fn = custom_prepare_load_prog,
.prog_attach_fn = custom_attach_prog,
);
LIBBPF_OPTS(libbpf_prog_handler_opts, custom_opts,
.cookie = COOKIE_CUSTOM,
.prog_setup_fn = NULL,
.prog_prepare_load_fn = NULL,
.prog_attach_fn = custom_attach_prog,
);
abc1_id = libbpf_register_prog_handler("abc", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc1_opts);
abc2_id = libbpf_register_prog_handler("abc/", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc2_opts);
custom_id = libbpf_register_prog_handler("custom+", BPF_PROG_TYPE_TRACEPOINT, 0, &custom_opts);
}
__attribute__((destructor))
static void unregister_sec_handlers(void)
{
libbpf_unregister_prog_handler(abc1_id);
libbpf_unregister_prog_handler(abc2_id);
libbpf_unregister_prog_handler(custom_id);
}
void test_custom_sec_handlers(void)
{
LIBBPF_OPTS(libbpf_prog_handler_opts, opts,
.prog_setup_fn = custom_setup_prog,
.prog_prepare_load_fn = custom_prepare_load_prog,
.prog_attach_fn = custom_attach_prog,
);
struct test_custom_sec_handlers* skel;
int err;
ASSERT_GT(abc1_id, 0, "abc1_id");
ASSERT_GT(abc2_id, 0, "abc2_id");
ASSERT_GT(custom_id, 0, "custom_id");
/* override libbpf's handle of SEC("kprobe/...") but also allow pure
* SEC("kprobe") due to "kprobe+" specifier. Register it as
* TRACEPOINT, just for fun.
*/
opts.cookie = COOKIE_KPROBE;
kprobe_id = libbpf_register_prog_handler("kprobe+", BPF_PROG_TYPE_TRACEPOINT, 0, &opts);
/* fallback treats everything as BPF_PROG_TYPE_SYSCALL program to test
* setting custom BPF_F_SLEEPABLE bit in preload handler
*/
opts.cookie = COOKIE_FALLBACK;
fallback_id = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_SYSCALL, 0, &opts);
if (!ASSERT_GT(fallback_id, 0, "fallback_id") /* || !ASSERT_GT(kprobe_id, 0, "kprobe_id")*/) {
if (fallback_id > 0)
libbpf_unregister_prog_handler(fallback_id);
if (kprobe_id > 0)
libbpf_unregister_prog_handler(kprobe_id);
return;
}
/* open skeleton and validate assumptions */
skel = test_custom_sec_handlers__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__type(skel->progs.abc1), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc1_type");
ASSERT_FALSE(bpf_program__autoload(skel->progs.abc1), "abc1_autoload");
ASSERT_EQ(bpf_program__type(skel->progs.abc2), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc2_type");
ASSERT_EQ(bpf_program__type(skel->progs.custom1), BPF_PROG_TYPE_TRACEPOINT, "custom1_type");
ASSERT_EQ(bpf_program__type(skel->progs.custom2), BPF_PROG_TYPE_TRACEPOINT, "custom2_type");
ASSERT_EQ(bpf_program__type(skel->progs.kprobe1), BPF_PROG_TYPE_TRACEPOINT, "kprobe1_type");
ASSERT_EQ(bpf_program__type(skel->progs.xyz), BPF_PROG_TYPE_SYSCALL, "xyz_type");
skel->rodata->my_pid = getpid();
/* now attempt to load everything */
err = test_custom_sec_handlers__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
/* now try to auto-attach everything */
err = test_custom_sec_handlers__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
skel->links.xyz = bpf_program__attach(skel->progs.kprobe1);
ASSERT_EQ(errno, EOPNOTSUPP, "xyz_attach_err");
ASSERT_ERR_PTR(skel->links.xyz, "xyz_attach");
/* trigger programs */
usleep(1);
/* SEC("abc") is set to not auto-loaded */
ASSERT_FALSE(skel->bss->abc1_called, "abc1_called");
ASSERT_TRUE(skel->bss->abc2_called, "abc2_called");
ASSERT_TRUE(skel->bss->custom1_called, "custom1_called");
ASSERT_TRUE(skel->bss->custom2_called, "custom2_called");
/* SEC("kprobe") shouldn't be auto-attached */
ASSERT_FALSE(skel->bss->kprobe1_called, "kprobe1_called");
/* SEC("xyz") shouldn't be auto-attached */
ASSERT_FALSE(skel->bss->xyz_called, "xyz_called");
cleanup:
test_custom_sec_handlers__destroy(skel);
ASSERT_OK(libbpf_unregister_prog_handler(fallback_id), "unregister_fallback");
ASSERT_OK(libbpf_unregister_prog_handler(kprobe_id), "unregister_kprobe");
}

View File

@@ -7,12 +7,14 @@
#include "find_vma_fail1.skel.h"
#include "find_vma_fail2.skel.h"
static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret)
static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test)
{
ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec");
ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret");
ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret");
ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs");
if (need_test) {
ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec");
ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret");
ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret");
ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs");
}
skel->bss->found_vm_exec = 0;
skel->data->find_addr_ret = -1;
@@ -30,17 +32,26 @@ static int open_pe(void)
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
attr.freq = 1;
attr.sample_freq = 4000;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
return pfd >= 0 ? pfd : -errno;
}
static bool find_vma_pe_condition(struct find_vma *skel)
{
return skel->bss->found_vm_exec == 0 ||
skel->data->find_addr_ret != 0 ||
skel->data->find_zero_ret == -1 ||
strcmp(skel->bss->d_iname, "test_progs") != 0;
}
static void test_find_vma_pe(struct find_vma *skel)
{
struct bpf_link *link = NULL;
volatile int j = 0;
int pfd, i;
const int one_bn = 1000000000;
pfd = open_pe();
if (pfd < 0) {
@@ -57,10 +68,10 @@ static void test_find_vma_pe(struct find_vma *skel)
if (!ASSERT_OK_PTR(link, "attach_perf_event"))
goto cleanup;
for (i = 0; i < 1000000; ++i)
for (i = 0; i < one_bn && find_vma_pe_condition(skel); ++i)
++j;
test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */);
test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */, i == one_bn);
cleanup:
bpf_link__destroy(link);
close(pfd);
@@ -75,7 +86,7 @@ static void test_find_vma_kprobe(struct find_vma *skel)
return;
getpgid(skel->bss->target_pid);
test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */);
test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */, true);
}
static void test_illegal_write_vma(void)
@@ -108,7 +119,6 @@ void serial_test_find_vma(void)
skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe;
test_find_vma_pe(skel);
usleep(100000); /* allow the irq_work to finish */
test_find_vma_kprobe(skel);
find_vma__destroy(skel);

View File

@@ -29,7 +29,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
{ "relocate .rodata reference", 10, ~0 },
};
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
CHECK(err || num != tests[i].num, tests[i].name,
"err %d result %llx expected %llx\n",
@@ -58,7 +58,7 @@ static void test_global_data_string(struct bpf_object *obj, __u32 duration)
{ "relocate .bss reference", 4, "\0\0hello" },
};
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, str);
CHECK(err || memcmp(str, tests[i].str, sizeof(str)),
tests[i].name, "err %d result \'%s\' expected \'%s\'\n",
@@ -92,7 +92,7 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration)
{ "relocate .data reference", 3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } },
};
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
for (i = 0; i < ARRAY_SIZE(tests); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val);
CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)),
tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n",

View File

@@ -0,0 +1,323 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "kprobe_multi.skel.h"
#include "trace_helpers.h"
static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
int err, prog_fd;
prog_fd = bpf_program__fd(skel->progs.trigger);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
if (test_return) {
ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
}
}
static void test_skel_api(void)
{
struct kprobe_multi *skel = NULL;
int err;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load"))
goto cleanup;
skel->bss->pid = getpid();
err = kprobe_multi__attach(skel);
if (!ASSERT_OK(err, "kprobe_multi__attach"))
goto cleanup;
kprobe_multi_test_run(skel, true);
cleanup:
kprobe_multi__destroy(skel);
}
static void test_link_api(struct bpf_link_create_opts *opts)
{
int prog_fd, link1_fd = -1, link2_fd = -1;
struct kprobe_multi *skel = NULL;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
prog_fd = bpf_program__fd(skel->progs.test_kprobe);
link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
if (!ASSERT_GE(link1_fd, 0, "link_fd"))
goto cleanup;
opts->kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
if (!ASSERT_GE(link2_fd, 0, "link_fd"))
goto cleanup;
kprobe_multi_test_run(skel, true);
cleanup:
if (link1_fd != -1)
close(link1_fd);
if (link2_fd != -1)
close(link2_fd);
kprobe_multi__destroy(skel);
}
#define GET_ADDR(__sym, __addr) ({ \
__addr = ksym_get_addr(__sym); \
if (!ASSERT_NEQ(__addr, 0, "kallsyms load failed for " #__sym)) \
return; \
})
static void test_link_api_addrs(void)
{
LIBBPF_OPTS(bpf_link_create_opts, opts);
unsigned long long addrs[8];
GET_ADDR("bpf_fentry_test1", addrs[0]);
GET_ADDR("bpf_fentry_test2", addrs[1]);
GET_ADDR("bpf_fentry_test3", addrs[2]);
GET_ADDR("bpf_fentry_test4", addrs[3]);
GET_ADDR("bpf_fentry_test5", addrs[4]);
GET_ADDR("bpf_fentry_test6", addrs[5]);
GET_ADDR("bpf_fentry_test7", addrs[6]);
GET_ADDR("bpf_fentry_test8", addrs[7]);
opts.kprobe_multi.addrs = (const unsigned long*) addrs;
opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
test_link_api(&opts);
}
static void test_link_api_syms(void)
{
LIBBPF_OPTS(bpf_link_create_opts, opts);
const char *syms[8] = {
"bpf_fentry_test1",
"bpf_fentry_test2",
"bpf_fentry_test3",
"bpf_fentry_test4",
"bpf_fentry_test5",
"bpf_fentry_test6",
"bpf_fentry_test7",
"bpf_fentry_test8",
};
opts.kprobe_multi.syms = syms;
opts.kprobe_multi.cnt = ARRAY_SIZE(syms);
test_link_api(&opts);
}
static void
test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts)
{
struct bpf_link *link1 = NULL, *link2 = NULL;
struct kprobe_multi *skel = NULL;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
pattern, opts);
if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
if (opts) {
opts->retprobe = true;
link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe,
pattern, opts);
if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
}
kprobe_multi_test_run(skel, !!opts);
cleanup:
bpf_link__destroy(link2);
bpf_link__destroy(link1);
kprobe_multi__destroy(skel);
}
static void test_attach_api_pattern(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
test_attach_api("bpf_fentry_test*", &opts);
test_attach_api("bpf_fentry_test?", NULL);
}
static void test_attach_api_addrs(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
unsigned long long addrs[8];
GET_ADDR("bpf_fentry_test1", addrs[0]);
GET_ADDR("bpf_fentry_test2", addrs[1]);
GET_ADDR("bpf_fentry_test3", addrs[2]);
GET_ADDR("bpf_fentry_test4", addrs[3]);
GET_ADDR("bpf_fentry_test5", addrs[4]);
GET_ADDR("bpf_fentry_test6", addrs[5]);
GET_ADDR("bpf_fentry_test7", addrs[6]);
GET_ADDR("bpf_fentry_test8", addrs[7]);
opts.addrs = (const unsigned long *) addrs;
opts.cnt = ARRAY_SIZE(addrs);
test_attach_api(NULL, &opts);
}
static void test_attach_api_syms(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
const char *syms[8] = {
"bpf_fentry_test1",
"bpf_fentry_test2",
"bpf_fentry_test3",
"bpf_fentry_test4",
"bpf_fentry_test5",
"bpf_fentry_test6",
"bpf_fentry_test7",
"bpf_fentry_test8",
};
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
test_attach_api(NULL, &opts);
}
static void test_attach_api_fails(void)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct kprobe_multi *skel = NULL;
struct bpf_link *link = NULL;
unsigned long long addrs[2];
const char *syms[2] = {
"bpf_fentry_test1",
"bpf_fentry_test2",
};
__u64 cookies[2];
addrs[0] = ksym_get_addr("bpf_fentry_test1");
addrs[1] = ksym_get_addr("bpf_fentry_test2");
if (!ASSERT_FALSE(!addrs[0] || !addrs[1], "ksym_get_addr"))
goto cleanup;
skel = kprobe_multi__open_and_load();
if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
goto cleanup;
skel->bss->pid = getpid();
/* fail_1 - pattern and opts NULL */
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
NULL, NULL);
if (!ASSERT_ERR_PTR(link, "fail_1"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_1_error"))
goto cleanup;
/* fail_2 - both addrs and syms set */
opts.addrs = (const unsigned long *) addrs;
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = NULL;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
NULL, &opts);
if (!ASSERT_ERR_PTR(link, "fail_2"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_2_error"))
goto cleanup;
/* fail_3 - pattern and addrs set */
opts.addrs = (const unsigned long *) addrs;
opts.syms = NULL;
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = NULL;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
"ksys_*", &opts);
if (!ASSERT_ERR_PTR(link, "fail_3"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_3_error"))
goto cleanup;
/* fail_4 - pattern and cnt set */
opts.addrs = NULL;
opts.syms = NULL;
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = NULL;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
"ksys_*", &opts);
if (!ASSERT_ERR_PTR(link, "fail_4"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_4_error"))
goto cleanup;
/* fail_5 - pattern and cookies */
opts.addrs = NULL;
opts.syms = NULL;
opts.cnt = 0;
opts.cookies = cookies;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
"ksys_*", &opts);
if (!ASSERT_ERR_PTR(link, "fail_5"))
goto cleanup;
if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_5_error"))
goto cleanup;
cleanup:
bpf_link__destroy(link);
kprobe_multi__destroy(skel);
}
void test_kprobe_multi_test(void)
{
if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
return;
if (test__start_subtest("skel_api"))
test_skel_api();
if (test__start_subtest("link_api_addrs"))
test_link_api_syms();
if (test__start_subtest("link_api_syms"))
test_link_api_addrs();
if (test__start_subtest("attach_api_pattern"))
test_attach_api_pattern();
if (test__start_subtest("attach_api_addrs"))
test_attach_api_addrs();
if (test__start_subtest("attach_api_syms"))
test_attach_api_syms();
if (test__start_subtest("attach_api_fails"))
test_attach_api_fails();
}

View File

@@ -20,7 +20,7 @@ void test_obj_name(void)
__u32 duration = 0;
int i;
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
for (i = 0; i < ARRAY_SIZE(tests); i++) {
size_t name_len = strlen(tests[i].name) + 1;
union bpf_attr attr;
size_t ncopy;

View File

@@ -110,7 +110,7 @@ static void test_perf_branches_hw(void)
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
attr.freq = 1;
attr.sample_freq = 4000;
attr.sample_freq = 1000;
attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
@@ -151,7 +151,7 @@ static void test_perf_branches_no_hw(void)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 4000;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd))
return;

View File

@@ -39,7 +39,7 @@ void serial_test_perf_link(void)
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 4000;
attr.sample_freq = 1000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;

View File

@@ -4,11 +4,11 @@
#include <sys/resource.h>
#include "test_send_signal_kern.skel.h"
int sigusr1_received = 0;
static int sigusr1_received;
static void sigusr1_handler(int signum)
{
sigusr1_received++;
sigusr1_received = 1;
}
static void test_send_signal_common(struct perf_event_attr *attr,
@@ -40,9 +40,10 @@ static void test_send_signal_common(struct perf_event_attr *attr,
if (pid == 0) {
int old_prio;
volatile int j = 0;
/* install signal handler and notify parent */
signal(SIGUSR1, sigusr1_handler);
ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal");
close(pipe_c2p[0]); /* close read */
close(pipe_p2c[1]); /* close write */
@@ -63,9 +64,11 @@ static void test_send_signal_common(struct perf_event_attr *attr,
ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
/* wait a little for signal handler */
sleep(1);
for (int i = 0; i < 100000000 && !sigusr1_received; i++)
j /= i + j + 1;
buf[0] = sigusr1_received ? '2' : '0';
ASSERT_EQ(sigusr1_received, 1, "sigusr1_received");
ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write");
/* wait for parent notification and exit */
@@ -93,7 +96,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
goto destroy_skel;
}
} else {
pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1,
pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */,
-1 /* group id */, 0 /* flags */);
if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) {
err = -1;
@@ -110,9 +113,9 @@ static void test_send_signal_common(struct perf_event_attr *attr,
ASSERT_EQ(read(pipe_c2p[0], buf, 1), 1, "pipe_read");
/* trigger the bpf send_signal */
skel->bss->pid = pid;
skel->bss->sig = SIGUSR1;
skel->bss->signal_thread = signal_thread;
skel->bss->sig = SIGUSR1;
skel->bss->pid = pid;
/* notify child that bpf program can send_signal now */
ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write");

View File

@@ -0,0 +1,63 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "stacktrace_map_skip.skel.h"
#define TEST_STACK_DEPTH 2
void test_stacktrace_map_skip(void)
{
struct stacktrace_map_skip *skel;
int stackid_hmap_fd, stackmap_fd, stack_amap_fd;
int err, stack_trace_len;
skel = stacktrace_map_skip__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
/* find map fds */
stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
if (!ASSERT_GE(stackid_hmap_fd, 0, "stackid_hmap fd"))
goto out;
stackmap_fd = bpf_map__fd(skel->maps.stackmap);
if (!ASSERT_GE(stackmap_fd, 0, "stackmap fd"))
goto out;
stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
if (!ASSERT_GE(stack_amap_fd, 0, "stack_amap fd"))
goto out;
skel->bss->pid = getpid();
err = stacktrace_map_skip__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
/* give some time for bpf program run */
sleep(1);
/* disable stack trace collection */
skel->bss->control = 1;
/* for every element in stackid_hmap, we can find a corresponding one
* in stackmap, and vise versa.
*/
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap"))
goto out;
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
if (!ASSERT_OK(err, "compare_map_keys stackmap vs. stackid_hmap"))
goto out;
stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64);
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
if (!ASSERT_OK(err, "compare_stack_ips stackmap vs. stack_amap"))
goto out;
if (!ASSERT_EQ(skel->bss->failed, 0, "skip_failed"))
goto out;
out:
stacktrace_map_skip__destroy(skel);
}

View File

@@ -1,32 +1,83 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <time.h>
#include "test_subprogs.skel.h"
#include "test_subprogs_unused.skel.h"
static int duration;
struct toggler_ctx {
int fd;
bool stop;
};
void test_subprogs(void)
static void *toggle_jit_harden(void *arg)
{
struct toggler_ctx *ctx = arg;
char two = '2';
char zero = '0';
while (!ctx->stop) {
lseek(ctx->fd, SEEK_SET, 0);
write(ctx->fd, &two, sizeof(two));
lseek(ctx->fd, SEEK_SET, 0);
write(ctx->fd, &zero, sizeof(zero));
}
return NULL;
}
static void test_subprogs_with_jit_harden_toggling(void)
{
struct toggler_ctx ctx;
pthread_t toggler;
int err;
unsigned int i, loop = 10;
ctx.fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR);
if (!ASSERT_GE(ctx.fd, 0, "open bpf_jit_harden"))
return;
ctx.stop = false;
err = pthread_create(&toggler, NULL, toggle_jit_harden, &ctx);
if (!ASSERT_OK(err, "new toggler"))
goto out;
/* Make toggler thread to run */
usleep(1);
for (i = 0; i < loop; i++) {
struct test_subprogs *skel = test_subprogs__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel open"))
break;
test_subprogs__destroy(skel);
}
ctx.stop = true;
pthread_join(toggler, NULL);
out:
close(ctx.fd);
}
static void test_subprogs_alone(void)
{
struct test_subprogs *skel;
struct test_subprogs_unused *skel2;
int err;
skel = test_subprogs__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
err = test_subprogs__attach(skel);
if (CHECK(err, "skel_attach", "failed to attach skeleton: %d\n", err))
if (!ASSERT_OK(err, "skel attach"))
goto cleanup;
usleep(1);
CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12);
CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17);
CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19);
CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36);
ASSERT_EQ(skel->bss->res1, 12, "res1");
ASSERT_EQ(skel->bss->res2, 17, "res2");
ASSERT_EQ(skel->bss->res3, 19, "res3");
ASSERT_EQ(skel->bss->res4, 36, "res4");
skel2 = test_subprogs_unused__open_and_load();
ASSERT_OK_PTR(skel2, "unused_progs_skel");
@@ -35,3 +86,11 @@ void test_subprogs(void)
cleanup:
test_subprogs__destroy(skel);
}
void test_subprogs(void)
{
if (test__start_subtest("subprogs_alone"))
test_subprogs_alone();
if (test__start_subtest("subprogs_and_jit_harden"))
test_subprogs_with_jit_harden_toggling();
}

View File

@@ -0,0 +1,78 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "test_subskeleton.skel.h"
#include "test_subskeleton_lib.subskel.h"
static void subskeleton_lib_setup(struct bpf_object *obj)
{
struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
if (!ASSERT_OK_PTR(lib, "open subskeleton"))
return;
*lib->rodata.var1 = 1;
*lib->data.var2 = 2;
lib->bss.var3->var3_1 = 3;
lib->bss.var3->var3_2 = 4;
test_subskeleton_lib__destroy(lib);
}
static int subskeleton_lib_subresult(struct bpf_object *obj)
{
struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj);
int result;
if (!ASSERT_OK_PTR(lib, "open subskeleton"))
return -EINVAL;
result = *lib->bss.libout1;
ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult");
ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler");
ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler),
"lib_perf_handler", "program name");
ASSERT_OK_PTR(lib->maps.map1, "map1");
ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name");
ASSERT_EQ(*lib->data.var5, 5, "__weak var5");
ASSERT_EQ(*lib->data.var6, 6, "extern var6");
ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL");
test_subskeleton_lib__destroy(lib);
return result;
}
void test_subskeleton(void)
{
int err, result;
struct test_subskeleton *skel;
skel = test_subskeleton__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->rodata->rovar1 = 10;
skel->rodata->var1 = 1;
subskeleton_lib_setup(skel->obj);
err = test_subskeleton__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
err = test_subskeleton__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger tracepoint */
usleep(1);
result = subskeleton_lib_subresult(skel->obj) * 10;
ASSERT_EQ(skel->bss->out1, result, "unexpected calculation");
cleanup:
test_subskeleton__destroy(skel);
}

View File

@@ -10,8 +10,6 @@
* to drop unexpected traffic.
*/
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <linux/if.h>
#include <linux/if_tun.h>
@@ -19,10 +17,8 @@
#include <linux/sysctl.h>
#include <linux/time_types.h>
#include <linux/net_tstamp.h>
#include <sched.h>
#include <stdbool.h>
#include <stdio.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -92,91 +88,6 @@ static int write_file(const char *path, const char *newval)
return 0;
}
struct nstoken {
int orig_netns_fd;
};
static int setns_by_fd(int nsfd)
{
int err;
err = setns(nsfd, CLONE_NEWNET);
close(nsfd);
if (!ASSERT_OK(err, "setns"))
return err;
/* Switch /sys to the new namespace so that e.g. /sys/class/net
* reflects the devices in the new namespace.
*/
err = unshare(CLONE_NEWNS);
if (!ASSERT_OK(err, "unshare"))
return err;
/* Make our /sys mount private, so the following umount won't
* trigger the global umount in case it's shared.
*/
err = mount("none", "/sys", NULL, MS_PRIVATE, NULL);
if (!ASSERT_OK(err, "remount private /sys"))
return err;
err = umount2("/sys", MNT_DETACH);
if (!ASSERT_OK(err, "umount2 /sys"))
return err;
err = mount("sysfs", "/sys", "sysfs", 0, NULL);
if (!ASSERT_OK(err, "mount /sys"))
return err;
err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL);
if (!ASSERT_OK(err, "mount /sys/fs/bpf"))
return err;
return 0;
}
/**
* open_netns() - Switch to specified network namespace by name.
*
* Returns token with which to restore the original namespace
* using close_netns().
*/
static struct nstoken *open_netns(const char *name)
{
int nsfd;
char nspath[PATH_MAX];
int err;
struct nstoken *token;
token = calloc(1, sizeof(struct nstoken));
if (!ASSERT_OK_PTR(token, "malloc token"))
return NULL;
token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY);
if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net"))
goto fail;
snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name);
nsfd = open(nspath, O_RDONLY | O_CLOEXEC);
if (!ASSERT_GE(nsfd, 0, "open netns fd"))
goto fail;
err = setns_by_fd(nsfd);
if (!ASSERT_OK(err, "setns_by_fd"))
goto fail;
return token;
fail:
free(token);
return NULL;
}
static void close_netns(struct nstoken *token)
{
ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd");
free(token);
}
static int netns_setup_namespaces(const char *verb)
{
const char * const *ns = namespaces;

View File

@@ -13,14 +13,17 @@
#include "ima.skel.h"
static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
#define MAX_SAMPLES 4
static int _run_measured_process(const char *measured_dir, u32 *monitored_pid,
const char *cmd)
{
int child_pid, child_status;
child_pid = fork();
if (child_pid == 0) {
*monitored_pid = getpid();
execlp("./ima_setup.sh", "./ima_setup.sh", "run", measured_dir,
execlp("./ima_setup.sh", "./ima_setup.sh", cmd, measured_dir,
NULL);
exit(errno);
@@ -32,19 +35,39 @@ static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
return -EINVAL;
}
static u64 ima_hash_from_bpf;
static int run_measured_process(const char *measured_dir, u32 *monitored_pid)
{
return _run_measured_process(measured_dir, monitored_pid, "run");
}
static u64 ima_hash_from_bpf[MAX_SAMPLES];
static int ima_hash_from_bpf_idx;
static int process_sample(void *ctx, void *data, size_t len)
{
ima_hash_from_bpf = *((u64 *)data);
if (ima_hash_from_bpf_idx >= MAX_SAMPLES)
return -ENOSPC;
ima_hash_from_bpf[ima_hash_from_bpf_idx++] = *((u64 *)data);
return 0;
}
static void test_init(struct ima__bss *bss)
{
ima_hash_from_bpf_idx = 0;
bss->use_ima_file_hash = false;
bss->enable_bprm_creds_for_exec = false;
bss->enable_kernel_read_file = false;
bss->test_deny = false;
}
void test_test_ima(void)
{
char measured_dir_template[] = "/tmp/ima_measuredXXXXXX";
struct ring_buffer *ringbuf = NULL;
const char *measured_dir;
u64 bin_true_sample;
char cmd[256];
int err, duration = 0;
@@ -72,13 +95,127 @@ void test_test_ima(void)
if (CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno))
goto close_clean;
/*
* Test #1
* - Goal: obtain a sample with the bpf_ima_inode_hash() helper
* - Expected result: 1 sample (/bin/true)
*/
test_init(skel->bss);
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
if (CHECK(err, "run_measured_process", "err = %d\n", err))
if (CHECK(err, "run_measured_process #1", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 1, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf, 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
/*
* Test #2
* - Goal: obtain samples with the bpf_ima_file_hash() helper
* - Expected result: 2 samples (./ima_setup.sh, /bin/true)
*/
test_init(skel->bss);
skel->bss->use_ima_file_hash = true;
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
if (CHECK(err, "run_measured_process #2", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 2, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
bin_true_sample = ima_hash_from_bpf[1];
/*
* Test #3
* - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest
* - Expected result: 2 samples (/bin/true: non-fresh, fresh)
*/
test_init(skel->bss);
err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
"modify-bin");
if (CHECK(err, "modify-bin #3", "err = %d\n", err))
goto close_clean;
skel->bss->enable_bprm_creds_for_exec = true;
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
if (CHECK(err, "run_measured_process #3", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 2, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample, "sample_equal_or_err");
/* IMA refreshed the digest. */
ASSERT_NEQ(ima_hash_from_bpf[1], bin_true_sample,
"sample_different_or_err");
/*
* Test #4
* - Goal: verify that bpf_ima_file_hash() returns a fresh digest
* - Expected result: 4 samples (./ima_setup.sh: fresh, fresh;
* /bin/true: fresh, fresh)
*/
test_init(skel->bss);
skel->bss->use_ima_file_hash = true;
skel->bss->enable_bprm_creds_for_exec = true;
err = run_measured_process(measured_dir, &skel->bss->monitored_pid);
if (CHECK(err, "run_measured_process #4", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 4, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[2], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[3], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[2], bin_true_sample,
"sample_different_or_err");
ASSERT_EQ(ima_hash_from_bpf[3], ima_hash_from_bpf[2],
"sample_equal_or_err");
skel->bss->use_ima_file_hash = false;
skel->bss->enable_bprm_creds_for_exec = false;
err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
"restore-bin");
if (CHECK(err, "restore-bin #3", "err = %d\n", err))
goto close_clean;
/*
* Test #5
* - Goal: obtain a sample from the kernel_read_file hook
* - Expected result: 2 samples (./ima_setup.sh, policy_test)
*/
test_init(skel->bss);
skel->bss->use_ima_file_hash = true;
skel->bss->enable_kernel_read_file = true;
err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
"load-policy");
if (CHECK(err, "run_measured_process #5", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 2, "num_samples_or_err");
ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash");
ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash");
/*
* Test #6
* - Goal: ensure that the kernel_read_file hook denies an operation
* - Expected result: 0 samples
*/
test_init(skel->bss);
skel->bss->enable_kernel_read_file = true;
skel->bss->test_deny = true;
err = _run_measured_process(measured_dir, &skel->bss->monitored_pid,
"load-policy");
if (CHECK(!err, "run_measured_process #6", "err = %d\n", err))
goto close_clean;
err = ring_buffer__consume(ringbuf);
ASSERT_EQ(err, 0, "num_samples_or_err");
close_clean:
snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir);

View File

@@ -0,0 +1,201 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include <net/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ipv6.h>
#include <linux/in6.h>
#include <linux/udp.h>
#include <bpf/bpf_endian.h>
#include "test_xdp_do_redirect.skel.h"
#define SYS(fmt, ...) \
({ \
char cmd[1024]; \
snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
if (!ASSERT_OK(system(cmd), cmd)) \
goto out; \
})
struct udp_packet {
struct ethhdr eth;
struct ipv6hdr iph;
struct udphdr udp;
__u8 payload[64 - sizeof(struct udphdr)
- sizeof(struct ethhdr) - sizeof(struct ipv6hdr)];
} __packed;
static struct udp_packet pkt_udp = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.eth.h_dest = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55},
.eth.h_source = {0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb},
.iph.version = 6,
.iph.nexthdr = IPPROTO_UDP,
.iph.payload_len = bpf_htons(sizeof(struct udp_packet)
- offsetof(struct udp_packet, udp)),
.iph.hop_limit = 2,
.iph.saddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(1)},
.iph.daddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(2)},
.udp.source = bpf_htons(1),
.udp.dest = bpf_htons(1),
.udp.len = bpf_htons(sizeof(struct udp_packet)
- offsetof(struct udp_packet, udp)),
.payload = {0x42}, /* receiver XDP program matches on this */
};
static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
{
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd);
int ret;
ret = bpf_tc_hook_create(hook);
if (!ASSERT_OK(ret, "create tc hook"))
return ret;
ret = bpf_tc_attach(hook, &opts);
if (!ASSERT_OK(ret, "bpf_tc_attach")) {
bpf_tc_hook_destroy(hook);
return ret;
}
return 0;
}
/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) -
* sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes
*/
#define MAX_PKT_SIZE 3368
static void test_max_pkt_size(int fd)
{
char data[MAX_PKT_SIZE + 1] = {};
int err;
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
.data_size_in = MAX_PKT_SIZE,
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
.repeat = 1,
);
err = bpf_prog_test_run_opts(fd, &opts);
ASSERT_OK(err, "prog_run_max_size");
opts.data_size_in += 1;
err = bpf_prog_test_run_opts(fd, &opts);
ASSERT_EQ(err, -EINVAL, "prog_run_too_big");
}
#define NUM_PKTS 10000
void test_xdp_do_redirect(void)
{
int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst;
char data[sizeof(pkt_udp) + sizeof(__u32)];
struct test_xdp_do_redirect *skel = NULL;
struct nstoken *nstoken = NULL;
struct bpf_link *link;
struct xdp_md ctx_in = { .data = sizeof(__u32),
.data_end = sizeof(data) };
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &data,
.data_size_in = sizeof(data),
.ctx_in = &ctx_in,
.ctx_size_in = sizeof(ctx_in),
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
.repeat = NUM_PKTS,
.batch_size = 64,
);
DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
.attach_point = BPF_TC_INGRESS);
memcpy(&data[sizeof(__u32)], &pkt_udp, sizeof(pkt_udp));
*((__u32 *)data) = 0x42; /* metadata test value */
skel = test_xdp_do_redirect__open();
if (!ASSERT_OK_PTR(skel, "skel"))
return;
/* The XDP program we run with bpf_prog_run() will cycle through all
* three xmit (PASS/TX/REDIRECT) return codes starting from above, and
* ending up with PASS, so we should end up with two packets on the dst
* iface and NUM_PKTS-2 in the TC hook. We match the packets on the UDP
* payload.
*/
SYS("ip netns add testns");
nstoken = open_netns("testns");
if (!ASSERT_OK_PTR(nstoken, "setns"))
goto out;
SYS("ip link add veth_src type veth peer name veth_dst");
SYS("ip link set dev veth_src address 00:11:22:33:44:55");
SYS("ip link set dev veth_dst address 66:77:88:99:aa:bb");
SYS("ip link set dev veth_src up");
SYS("ip link set dev veth_dst up");
SYS("ip addr add dev veth_src fc00::1/64");
SYS("ip addr add dev veth_dst fc00::2/64");
SYS("ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb");
/* We enable forwarding in the test namespace because that will cause
* the packets that go through the kernel stack (with XDP_PASS) to be
* forwarded back out the same interface (because of the packet dst
* combined with the interface addresses). When this happens, the
* regular forwarding path will end up going through the same
* veth_xdp_xmit() call as the XDP_REDIRECT code, which can cause a
* deadlock if it happens on the same CPU. There's a local_bh_disable()
* in the test_run code to prevent this, but an earlier version of the
* code didn't have this, so we keep the test behaviour to make sure the
* bug doesn't resurface.
*/
SYS("sysctl -qw net.ipv6.conf.all.forwarding=1");
ifindex_src = if_nametoindex("veth_src");
ifindex_dst = if_nametoindex("veth_dst");
if (!ASSERT_NEQ(ifindex_src, 0, "ifindex_src") ||
!ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst"))
goto out;
memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN);
skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */
skel->rodata->ifindex_in = ifindex_src;
ctx_in.ingress_ifindex = ifindex_src;
tc_hook.ifindex = ifindex_src;
if (!ASSERT_OK(test_xdp_do_redirect__load(skel), "load"))
goto out;
link = bpf_program__attach_xdp(skel->progs.xdp_count_pkts, ifindex_dst);
if (!ASSERT_OK_PTR(link, "prog_attach"))
goto out;
skel->links.xdp_count_pkts = link;
tc_prog_fd = bpf_program__fd(skel->progs.tc_count_pkts);
if (attach_tc_prog(&tc_hook, tc_prog_fd))
goto out;
xdp_prog_fd = bpf_program__fd(skel->progs.xdp_redirect);
err = bpf_prog_test_run_opts(xdp_prog_fd, &opts);
if (!ASSERT_OK(err, "prog_run"))
goto out_tc;
/* wait for the packets to be flushed */
kern_sync_rcu();
/* There will be one packet sent through XDP_REDIRECT and one through
* XDP_TX; these will show up on the XDP counting program, while the
* rest will be counted at the TC ingress hook (and the counting program
* resets the packet payload so they don't get counted twice even though
* they are re-xmited out the veth device
*/
ASSERT_EQ(skel->bss->pkts_seen_xdp, 2, "pkt_count_xdp");
ASSERT_EQ(skel->bss->pkts_seen_zero, 2, "pkt_count_zero");
ASSERT_EQ(skel->bss->pkts_seen_tc, NUM_PKTS - 2, "pkt_count_tc");
test_max_pkt_size(bpf_program__fd(skel->progs.xdp_count_pkts));
out_tc:
bpf_tc_hook_destroy(&tc_hook);
out:
if (nstoken)
close_netns(nstoken);
system("ip netns del testns");
test_xdp_do_redirect__destroy(skel);
}

View File

@@ -0,0 +1,66 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Google */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct bpf_testmod_btf_type_tag_1 {
int a;
};
struct bpf_testmod_btf_type_tag_2 {
struct bpf_testmod_btf_type_tag_1 *p;
};
__u64 g;
SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_1")
int BPF_PROG(test_percpu1, struct bpf_testmod_btf_type_tag_1 *arg)
{
g = arg->a;
return 0;
}
SEC("fentry/bpf_testmod_test_btf_type_tag_percpu_2")
int BPF_PROG(test_percpu2, struct bpf_testmod_btf_type_tag_2 *arg)
{
g = arg->p->a;
return 0;
}
/* trace_cgroup_mkdir(struct cgroup *cgrp, const char *path)
*
* struct cgroup_rstat_cpu {
* ...
* struct cgroup *updated_children;
* ...
* };
*
* struct cgroup {
* ...
* struct cgroup_rstat_cpu __percpu *rstat_cpu;
* ...
* };
*/
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_percpu_load, struct cgroup *cgrp, const char *path)
{
g = (__u64)cgrp->rstat_cpu->updated_children;
return 0;
}
SEC("tp_btf/cgroup_mkdir")
int BPF_PROG(test_percpu_helper, struct cgroup *cgrp, const char *path)
{
struct cgroup_rstat_cpu *rstat;
__u32 cpu;
cpu = bpf_get_smp_processor_id();
rstat = (struct cgroup_rstat_cpu *)bpf_per_cpu_ptr(cgrp->rstat_cpu, cpu);
if (rstat) {
/* READ_ONCE */
*(volatile int *)rstat;
}
return 0;
}

View File

@@ -18,8 +18,12 @@ struct {
char _license[] SEC("license") = "GPL";
SEC("lsm.s/bprm_committed_creds")
void BPF_PROG(ima, struct linux_binprm *bprm)
bool use_ima_file_hash;
bool enable_bprm_creds_for_exec;
bool enable_kernel_read_file;
bool test_deny;
static void ima_test_common(struct file *file)
{
u64 ima_hash = 0;
u64 *sample;
@@ -28,8 +32,12 @@ void BPF_PROG(ima, struct linux_binprm *bprm)
pid = bpf_get_current_pid_tgid() >> 32;
if (pid == monitored_pid) {
ret = bpf_ima_inode_hash(bprm->file->f_inode, &ima_hash,
sizeof(ima_hash));
if (!use_ima_file_hash)
ret = bpf_ima_inode_hash(file->f_inode, &ima_hash,
sizeof(ima_hash));
else
ret = bpf_ima_file_hash(file, &ima_hash,
sizeof(ima_hash));
if (ret < 0 || ima_hash == 0)
return;
@@ -43,3 +51,53 @@ void BPF_PROG(ima, struct linux_binprm *bprm)
return;
}
static int ima_test_deny(void)
{
u32 pid;
pid = bpf_get_current_pid_tgid() >> 32;
if (pid == monitored_pid && test_deny)
return -EPERM;
return 0;
}
SEC("lsm.s/bprm_committed_creds")
void BPF_PROG(bprm_committed_creds, struct linux_binprm *bprm)
{
ima_test_common(bprm->file);
}
SEC("lsm.s/bprm_creds_for_exec")
int BPF_PROG(bprm_creds_for_exec, struct linux_binprm *bprm)
{
if (!enable_bprm_creds_for_exec)
return 0;
ima_test_common(bprm->file);
return 0;
}
SEC("lsm.s/kernel_read_file")
int BPF_PROG(kernel_read_file, struct file *file, enum kernel_read_file_id id,
bool contents)
{
int ret;
if (!enable_kernel_read_file)
return 0;
if (!contents)
return 0;
if (id != READING_POLICY)
return 0;
ret = ima_test_deny();
if (ret < 0)
return ret;
ima_test_common(file);
return 0;
}

View File

@@ -0,0 +1,100 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
char _license[] SEC("license") = "GPL";
extern const void bpf_fentry_test1 __ksym;
extern const void bpf_fentry_test2 __ksym;
extern const void bpf_fentry_test3 __ksym;
extern const void bpf_fentry_test4 __ksym;
extern const void bpf_fentry_test5 __ksym;
extern const void bpf_fentry_test6 __ksym;
extern const void bpf_fentry_test7 __ksym;
extern const void bpf_fentry_test8 __ksym;
int pid = 0;
bool test_cookie = false;
__u64 kprobe_test1_result = 0;
__u64 kprobe_test2_result = 0;
__u64 kprobe_test3_result = 0;
__u64 kprobe_test4_result = 0;
__u64 kprobe_test5_result = 0;
__u64 kprobe_test6_result = 0;
__u64 kprobe_test7_result = 0;
__u64 kprobe_test8_result = 0;
__u64 kretprobe_test1_result = 0;
__u64 kretprobe_test2_result = 0;
__u64 kretprobe_test3_result = 0;
__u64 kretprobe_test4_result = 0;
__u64 kretprobe_test5_result = 0;
__u64 kretprobe_test6_result = 0;
__u64 kretprobe_test7_result = 0;
__u64 kretprobe_test8_result = 0;
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
static void kprobe_multi_check(void *ctx, bool is_return)
{
if (bpf_get_current_pid_tgid() >> 32 != pid)
return;
__u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
__u64 addr = bpf_get_func_ip(ctx) - (CONFIG_X86_KERNEL_IBT ? 4 : 0);
#define SET(__var, __addr, __cookie) ({ \
if (((const void *) addr == __addr) && \
(!test_cookie || (cookie == __cookie))) \
__var = 1; \
})
if (is_return) {
SET(kretprobe_test1_result, &bpf_fentry_test1, 8);
SET(kretprobe_test2_result, &bpf_fentry_test2, 7);
SET(kretprobe_test3_result, &bpf_fentry_test3, 6);
SET(kretprobe_test4_result, &bpf_fentry_test4, 5);
SET(kretprobe_test5_result, &bpf_fentry_test5, 4);
SET(kretprobe_test6_result, &bpf_fentry_test6, 3);
SET(kretprobe_test7_result, &bpf_fentry_test7, 2);
SET(kretprobe_test8_result, &bpf_fentry_test8, 1);
} else {
SET(kprobe_test1_result, &bpf_fentry_test1, 1);
SET(kprobe_test2_result, &bpf_fentry_test2, 2);
SET(kprobe_test3_result, &bpf_fentry_test3, 3);
SET(kprobe_test4_result, &bpf_fentry_test4, 4);
SET(kprobe_test5_result, &bpf_fentry_test5, 5);
SET(kprobe_test6_result, &bpf_fentry_test6, 6);
SET(kprobe_test7_result, &bpf_fentry_test7, 7);
SET(kprobe_test8_result, &bpf_fentry_test8, 8);
}
#undef SET
}
/*
* No tests in here, just to trigger 'bpf_fentry_test*'
* through tracing test_run
*/
SEC("fentry/bpf_modify_return_test")
int BPF_PROG(trigger)
{
return 0;
}
SEC("kprobe.multi/bpf_fentry_tes??")
int test_kprobe(struct pt_regs *ctx)
{
kprobe_multi_check(ctx, false);
return 0;
}
SEC("kretprobe.multi/bpf_fentry_test*")
int test_kretprobe(struct pt_regs *ctx)
{
kprobe_multi_check(ctx, true);
return 0;
}

View File

@@ -36,6 +36,13 @@ struct {
__type(value, struct local_storage);
} sk_storage_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
__type(key, int);
__type(value, struct local_storage);
} sk_storage_map2 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
@@ -115,7 +122,19 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
if (storage->value != DUMMY_STORAGE_VALUE)
sk_storage_result = -1;
/* This tests that we can associate multiple elements
* with the local storage.
*/
storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
err = bpf_sk_storage_delete(&sk_storage_map, sock->sk);
if (err)
return 0;
err = bpf_sk_storage_delete(&sk_storage_map2, sock->sk);
if (!err)
sk_storage_result = err;

View File

@@ -0,0 +1,68 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#define TEST_STACK_DEPTH 2
#define TEST_MAX_ENTRIES 16384
typedef __u64 stack_trace_t[TEST_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, TEST_MAX_ENTRIES);
__type(key, __u32);
__type(value, stack_trace_t);
} stackmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, TEST_MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} stackid_hmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, TEST_MAX_ENTRIES);
__type(key, __u32);
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
int pid = 0;
int control = 0;
int failed = 0;
SEC("tracepoint/sched/sched_switch")
int oncpu(struct trace_event_raw_sched_switch *ctx)
{
__u32 max_len = TEST_STACK_DEPTH * sizeof(__u64);
__u32 key = 0, val = 0;
__u64 *stack_p;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
if (control)
return 0;
/* it should allow skipping whole buffer size entries */
key = bpf_get_stackid(ctx, &stackmap, TEST_STACK_DEPTH);
if ((int)key >= 0) {
/* The size of stackmap and stack_amap should be the same */
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
stack_p = bpf_map_lookup_elem(&stack_amap, &key);
if (stack_p) {
bpf_get_stack(ctx, stack_p, max_len, TEST_STACK_DEPTH);
/* it wrongly skipped all the entries and filled zero */
if (stack_p[0] == 0)
failed = 1;
}
} else {
/* old kernel doesn't support skipping that many entries */
failed = 2;
}
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,63 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
const volatile int my_pid;
bool abc1_called;
bool abc2_called;
bool custom1_called;
bool custom2_called;
bool kprobe1_called;
bool xyz_called;
SEC("abc")
int abc1(void *ctx)
{
abc1_called = true;
return 0;
}
SEC("abc/whatever")
int abc2(void *ctx)
{
abc2_called = true;
return 0;
}
SEC("custom")
int custom1(void *ctx)
{
custom1_called = true;
return 0;
}
SEC("custom/something")
int custom2(void *ctx)
{
custom2_called = true;
return 0;
}
SEC("kprobe")
int kprobe1(void *ctx)
{
kprobe1_called = true;
return 0;
}
SEC("xyz/blah")
int xyz(void *ctx)
{
int whatever;
/* use sleepable helper, custom handler should set sleepable flag */
bpf_copy_from_user(&whatever, sizeof(whatever), NULL);
xyz_called = true;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -10,7 +10,7 @@ static __always_inline int bpf_send_signal_test(void *ctx)
{
int ret;
if (status != 0 || sig == 0 || pid == 0)
if (status != 0 || pid == 0)
return 0;
if ((bpf_get_current_pid_tgid() >> 32) == pid) {

View File

@@ -413,15 +413,20 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from remote_port field. Expect SRC_PORT. */
if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) ||
LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff) ||
LSB(ctx->remote_port, 2) != 0 || LSB(ctx->remote_port, 3) != 0)
LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff))
return SK_DROP;
if (LSW(ctx->remote_port, 0) != SRC_PORT)
return SK_DROP;
/* Load from remote_port field with zero padding (backward compatibility) */
/*
* NOTE: 4-byte load from bpf_sk_lookup at remote_port offset
* is quirky. It gets rewritten by the access converter to a
* 2-byte load for backward compatibility. Treating the load
* result as a be16 value makes the code portable across
* little- and big-endian platforms.
*/
val_u32 = *(__u32 *)&ctx->remote_port;
if (val_u32 != bpf_htonl(bpf_ntohs(SRC_PORT) << 16))
if (val_u32 != SRC_PORT)
return SK_DROP;
/* Narrow loads from local_port field. Expect DST_PORT. */

View File

@@ -114,7 +114,7 @@ static void tpcpy(struct bpf_tcp_sock *dst,
#define RET_LOG() ({ \
linum = __LINE__; \
bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \
bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_ANY); \
return CG_OK; \
})
@@ -134,11 +134,11 @@ int egress_read_sock_fields(struct __sk_buff *skb)
if (!sk)
RET_LOG();
/* Not the testing egress traffic or
* TCP_LISTEN (10) socket will be copied at the ingress side.
/* Not testing the egress traffic or the listening socket,
* which are covered by the cgroup_skb/ingress test program.
*/
if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
sk->state == 10)
sk->state == BPF_TCP_LISTEN)
return CG_OK;
if (sk->src_port == bpf_ntohs(srv_sa6.sin6_port)) {
@@ -232,8 +232,8 @@ int ingress_read_sock_fields(struct __sk_buff *skb)
sk->src_port != bpf_ntohs(srv_sa6.sin6_port))
return CG_OK;
/* Only interested in TCP_LISTEN */
if (sk->state != 10)
/* Only interested in the listening socket */
if (sk->state != BPF_TCP_LISTEN)
return CG_OK;
/* It must be a fullsock for cgroup_skb/ingress prog */
@@ -251,10 +251,16 @@ int ingress_read_sock_fields(struct __sk_buff *skb)
return CG_OK;
}
/*
* NOTE: 4-byte load from bpf_sock at dst_port offset is quirky. It
* gets rewritten by the access converter to a 2-byte load for
* backward compatibility. Treating the load result as a be16 value
* makes the code portable across little- and big-endian platforms.
*/
static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk)
{
__u32 *word = (__u32 *)&sk->dst_port;
return word[0] == bpf_htonl(0xcafe0000);
return word[0] == bpf_htons(0xcafe);
}
static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk)
@@ -281,6 +287,10 @@ int read_sk_dst_port(struct __sk_buff *skb)
if (!sk)
RET_LOG();
/* Ignore everything but the SYN from the client socket */
if (sk->state != BPF_TCP_SYN_SENT)
return CG_OK;
if (!sk_dst_port__load_word(sk))
RET_LOG();
if (!sk_dst_port__load_half(sk))

View File

@@ -0,0 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* volatile to force a read, compiler may assume 0 otherwise */
const volatile int rovar1;
int out1;
/* Override weak symbol in test_subskeleton_lib */
int var5 = 5;
extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
extern int lib_routine(void);
SEC("raw_tp/sys_enter")
int handler1(const void *ctx)
{
(void) CONFIG_BPF_SYSCALL;
out1 = lib_routine() * rovar1;
return 0;
}
char LICENSE[] SEC("license") = "GPL";

View File

@@ -0,0 +1,61 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* volatile to force a read */
const volatile int var1;
volatile int var2 = 1;
struct {
int var3_1;
__s64 var3_2;
} var3;
int libout1;
extern volatile bool CONFIG_BPF_SYSCALL __kconfig;
int var4[4];
__weak int var5 SEC(".data");
/* Fully contained within library extern-and-definition */
extern int var6;
int var7 SEC(".data.custom");
int (*fn_ptr)(void);
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map1 SEC(".maps");
extern struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map2 SEC(".maps");
int lib_routine(void)
{
__u32 key = 1, value = 2;
(void) CONFIG_BPF_SYSCALL;
bpf_map_update_elem(&map2, &key, &value, BPF_ANY);
libout1 = var1 + var2 + var3.var3_1 + var3.var3_2 + var5 + var6;
return libout1;
}
SEC("perf_event")
int lib_perf_handler(struct pt_regs *ctx)
{
return 0;
}
char LICENSE[] SEC("license") = "GPL";

View File

@@ -0,0 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int var6 = 6;
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 16);
} map2 SEC(".maps");
char LICENSE[] SEC("license") = "GPL";

View File

@@ -174,13 +174,13 @@ int egress_host(struct __sk_buff *skb)
return TC_ACT_OK;
if (skb_proto(skb_type) == IPPROTO_TCP) {
if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO &&
if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
inc_errs(EGRESS_ENDHOST);
} else {
if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_UNSPEC &&
if (skb->tstamp_type == BPF_SKB_TSTAMP_UNSPEC &&
skb->tstamp)
inc_dtimes(EGRESS_ENDHOST);
else
@@ -204,7 +204,7 @@ int ingress_host(struct __sk_buff *skb)
if (!skb_type)
return TC_ACT_OK;
if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO &&
if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO &&
skb->tstamp == EGRESS_FWDNS_MAGIC)
inc_dtimes(INGRESS_ENDHOST);
else
@@ -226,7 +226,7 @@ int ingress_fwdns_prio100(struct __sk_buff *skb)
return TC_ACT_OK;
/* delivery_time is only available to the ingress
* if the tc-bpf checks the skb->delivery_time_type.
* if the tc-bpf checks the skb->tstamp_type.
*/
if (skb->tstamp == EGRESS_ENDHOST_MAGIC)
inc_errs(INGRESS_FWDNS_P100);
@@ -250,7 +250,7 @@ int egress_fwdns_prio100(struct __sk_buff *skb)
return TC_ACT_OK;
/* delivery_time is always available to egress even
* the tc-bpf did not use the delivery_time_type.
* the tc-bpf did not use the tstamp_type.
*/
if (skb->tstamp == INGRESS_FWDNS_MAGIC)
inc_dtimes(EGRESS_FWDNS_P100);
@@ -278,9 +278,9 @@ int ingress_fwdns_prio101(struct __sk_buff *skb)
if (skb_proto(skb_type) == IPPROTO_UDP)
expected_dtime = 0;
if (skb->delivery_time_type) {
if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
skb->delivery_time_type != BPF_SKB_DELIVERY_TIME_MONO ||
skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
skb->tstamp != expected_dtime)
inc_errs(INGRESS_FWDNS_P101);
else
@@ -290,14 +290,14 @@ int ingress_fwdns_prio101(struct __sk_buff *skb)
inc_errs(INGRESS_FWDNS_P101);
}
if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO) {
if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
skb->tstamp = INGRESS_FWDNS_MAGIC;
} else {
if (bpf_skb_set_delivery_time(skb, INGRESS_FWDNS_MAGIC,
BPF_SKB_DELIVERY_TIME_MONO))
if (bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
BPF_SKB_TSTAMP_DELIVERY_MONO))
inc_errs(SET_DTIME);
if (!bpf_skb_set_delivery_time(skb, INGRESS_FWDNS_MAGIC,
BPF_SKB_DELIVERY_TIME_UNSPEC))
if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
BPF_SKB_TSTAMP_UNSPEC))
inc_errs(SET_DTIME);
}
@@ -320,9 +320,9 @@ int egress_fwdns_prio101(struct __sk_buff *skb)
/* Should have handled in prio100 */
return TC_ACT_SHOT;
if (skb->delivery_time_type) {
if (skb->tstamp_type) {
if (fwdns_clear_dtime() ||
skb->delivery_time_type != BPF_SKB_DELIVERY_TIME_MONO ||
skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO ||
skb->tstamp != INGRESS_FWDNS_MAGIC)
inc_errs(EGRESS_FWDNS_P101);
else
@@ -332,14 +332,14 @@ int egress_fwdns_prio101(struct __sk_buff *skb)
inc_errs(EGRESS_FWDNS_P101);
}
if (skb->delivery_time_type == BPF_SKB_DELIVERY_TIME_MONO) {
if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) {
skb->tstamp = EGRESS_FWDNS_MAGIC;
} else {
if (bpf_skb_set_delivery_time(skb, EGRESS_FWDNS_MAGIC,
BPF_SKB_DELIVERY_TIME_MONO))
if (bpf_skb_set_tstamp(skb, EGRESS_FWDNS_MAGIC,
BPF_SKB_TSTAMP_DELIVERY_MONO))
inc_errs(SET_DTIME);
if (!bpf_skb_set_delivery_time(skb, EGRESS_FWDNS_MAGIC,
BPF_SKB_DELIVERY_TIME_UNSPEC))
if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC,
BPF_SKB_TSTAMP_UNSPEC))
inc_errs(SET_DTIME);
}

View File

@@ -0,0 +1,100 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#define ETH_ALEN 6
#define HDR_SZ (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + sizeof(struct udphdr))
const volatile int ifindex_out;
const volatile int ifindex_in;
const volatile __u8 expect_dst[ETH_ALEN];
volatile int pkts_seen_xdp = 0;
volatile int pkts_seen_zero = 0;
volatile int pkts_seen_tc = 0;
volatile int retcode = XDP_REDIRECT;
SEC("xdp")
int xdp_redirect(struct xdp_md *xdp)
{
__u32 *metadata = (void *)(long)xdp->data_meta;
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
__u8 *payload = data + HDR_SZ;
int ret = retcode;
if (payload + 1 > data_end)
return XDP_ABORTED;
if (xdp->ingress_ifindex != ifindex_in)
return XDP_ABORTED;
if (metadata + 1 > data)
return XDP_ABORTED;
if (*metadata != 0x42)
return XDP_ABORTED;
if (*payload == 0) {
*payload = 0x42;
pkts_seen_zero++;
}
if (bpf_xdp_adjust_meta(xdp, 4))
return XDP_ABORTED;
if (retcode > XDP_PASS)
retcode--;
if (ret == XDP_REDIRECT)
return bpf_redirect(ifindex_out, 0);
return ret;
}
static bool check_pkt(void *data, void *data_end)
{
struct ipv6hdr *iph = data + sizeof(struct ethhdr);
__u8 *payload = data + HDR_SZ;
if (payload + 1 > data_end)
return false;
if (iph->nexthdr != IPPROTO_UDP || *payload != 0x42)
return false;
/* reset the payload so the same packet doesn't get counted twice when
* it cycles back through the kernel path and out the dst veth
*/
*payload = 0;
return true;
}
SEC("xdp")
int xdp_count_pkts(struct xdp_md *xdp)
{
void *data = (void *)(long)xdp->data;
void *data_end = (void *)(long)xdp->data_end;
if (check_pkt(data, data_end))
pkts_seen_xdp++;
/* Return XDP_DROP to make sure the data page is recycled, like when it
* exits a physical NIC. Recycled pages will be counted in the
* pkts_seen_zero counter above.
*/
return XDP_DROP;
}
SEC("tc")
int tc_count_pkts(struct __sk_buff *skb)
{
void *data = (void *)(long)skb->data;
void *data_end = (void *)(long)skb->data_end;
if (check_pkt(data, data_end))
pkts_seen_tc++;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -36,7 +36,7 @@ int main(int argc, char **argv)
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
size_t insns_cnt = ARRAY_SIZE(prog);
int error = EXIT_FAILURE;
int map_fd, percpu_map_fd, prog_fd, cgroup_fd;
struct bpf_cgroup_storage_key key;

View File

@@ -3,6 +3,7 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
ret=$ksft_skip
msg="skip all tests:"
if [ $UID != 0 ]; then
@@ -25,7 +26,7 @@ do
fi
done
if [ -n $LIRCDEV ];
if [ -n "$LIRCDEV" ];
then
TYPE=lirc_mode2
./test_lirc_mode2_user $LIRCDEV $INPUTDEV
@@ -36,3 +37,5 @@ then
echo -e ${GREEN}"PASS: $TYPE"${NC}
fi
fi
exit $ret

View File

@@ -878,11 +878,11 @@ int main(int argc, char **argv)
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
for (f = 0; f < sizeof(map_flags) / sizeof(*map_flags); f++) {
for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
for (t = 0; t < sizeof(map_types) / sizeof(*map_types); t++) {
for (t = 0; t < ARRAY_SIZE(map_types); t++) {
test_lru_sanity0(map_types[t], map_flags[f]);
test_lru_sanity1(map_types[t], map_flags[f], tgt_free);
test_lru_sanity2(map_types[t], map_flags[f], tgt_free);

View File

@@ -120,6 +120,14 @@ setup()
ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
# disable IPv6 DAD because it sometimes takes too long and fails tests
ip netns exec ${NS1} sysctl -wq net.ipv6.conf.all.accept_dad=0
ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.accept_dad=0
ip netns exec ${NS3} sysctl -wq net.ipv6.conf.all.accept_dad=0
ip netns exec ${NS1} sysctl -wq net.ipv6.conf.default.accept_dad=0
ip netns exec ${NS2} sysctl -wq net.ipv6.conf.default.accept_dad=0
ip netns exec ${NS3} sysctl -wq net.ipv6.conf.default.accept_dad=0
ip link add veth1 type veth peer name veth2
ip link add veth3 type veth peer name veth4
ip link add veth5 type veth peer name veth6
@@ -289,7 +297,7 @@ test_ping()
ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null
RET=$?
elif [ "${PROTO}" == "IPv6" ] ; then
ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
ip netns exec ${NS1} ping6 -c 1 -W 1 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
RET=$?
else
echo " test_ping: unknown PROTO: ${PROTO}"

View File

@@ -723,7 +723,7 @@ static int xmsg_ret_only_prog_load(const struct sock_addr_test *test,
BPF_MOV64_IMM(BPF_REG_0, rc),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
return load_insns(test, insns, ARRAY_SIZE(insns));
}
static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
@@ -795,7 +795,7 @@ static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
return load_insns(test, insns, ARRAY_SIZE(insns));
}
static int recvmsg4_rw_c_prog_load(const struct sock_addr_test *test)
@@ -858,7 +858,7 @@ static int sendmsg6_rw_dst_asm_prog_load(const struct sock_addr_test *test,
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
return load_insns(test, insns, ARRAY_SIZE(insns));
}
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test)

View File

@@ -1786,7 +1786,7 @@ static int populate_progs(char *bpf_file)
i++;
}
for (i = 0; i < sizeof(map_fd)/sizeof(int); i++) {
for (i = 0; i < ARRAY_SIZE(map_fd); i++) {
maps[i] = bpf_object__find_map_by_name(obj, map_names[i]);
map_fd[i] = bpf_map__fd(maps[i]);
if (map_fd[i] < 0) {
@@ -1867,7 +1867,7 @@ static int __test_selftests(int cg_fd, struct sockmap_options *opt)
}
/* Tests basic commands and APIs */
for (i = 0; i < sizeof(test)/sizeof(struct _test); i++) {
for (i = 0; i < ARRAY_SIZE(test); i++) {
struct _test t = test[i];
if (check_whitelist(&t, opt) != 0)

View File

@@ -39,7 +39,7 @@
# from root namespace, the following operations happen:
# 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev.
# 2) Tnl device's egress BPF program is triggered and set the tunnel metadata,
# with remote_ip=172.16.1.200 and others.
# with remote_ip=172.16.1.100 and others.
# 3) Outer tunnel header is prepended and route the packet to veth1's egress
# 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0
# 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet

View File

@@ -22,8 +22,6 @@
#include <limits.h>
#include <assert.h>
#include <sys/capability.h>
#include <linux/unistd.h>
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
@@ -42,6 +40,7 @@
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
# endif
#endif
#include "cap_helpers.h"
#include "bpf_rand.h"
#include "bpf_util.h"
#include "test_btf.h"
@@ -62,6 +61,10 @@
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
/* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
#define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \
1ULL << CAP_PERFMON | \
1ULL << CAP_BPF)
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
static bool unpriv_disabled = false;
static int skips;
@@ -973,47 +976,19 @@ struct libcap {
static int set_admin(bool admin)
{
cap_t caps;
/* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
const cap_value_t cap_net_admin = CAP_NET_ADMIN;
const cap_value_t cap_sys_admin = CAP_SYS_ADMIN;
struct libcap *cap;
int ret = -1;
int err;
caps = cap_get_proc();
if (!caps) {
perror("cap_get_proc");
return -1;
}
cap = (struct libcap *)caps;
if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) {
perror("cap_set_flag clear admin");
goto out;
}
if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin,
admin ? CAP_SET : CAP_CLEAR)) {
perror("cap_set_flag set_or_clear net");
goto out;
}
/* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON,
* so update effective bits manually
*/
if (admin) {
cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32);
cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32);
err = cap_enable_effective(ADMIN_CAPS, NULL);
if (err)
perror("cap_enable_effective(ADMIN_CAPS)");
} else {
cap->data[1].effective &= ~(1 << (38 - 32));
cap->data[1].effective &= ~(1 << (39 - 32));
err = cap_disable_effective(ADMIN_CAPS, NULL);
if (err)
perror("cap_disable_effective(ADMIN_CAPS)");
}
if (cap_set_proc(caps)) {
perror("cap_set_proc");
goto out;
}
ret = 0;
out:
if (cap_free(caps))
perror("cap_free");
return ret;
return err;
}
static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
@@ -1291,31 +1266,18 @@ fail_log:
static bool is_admin(void)
{
cap_flag_value_t net_priv = CAP_CLEAR;
bool perfmon_priv = false;
bool bpf_priv = false;
struct libcap *cap;
cap_t caps;
__u64 caps;
#ifdef CAP_IS_SUPPORTED
if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
perror("cap_get_flag");
/* The test checks for finer cap as CAP_NET_ADMIN,
* CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN.
* Thus, disable CAP_SYS_ADMIN at the beginning.
*/
if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) {
perror("cap_disable_effective(CAP_SYS_ADMIN)");
return false;
}
#endif
caps = cap_get_proc();
if (!caps) {
perror("cap_get_proc");
return false;
}
cap = (struct libcap *)caps;
bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32));
perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32));
if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv))
perror("cap_get_flag NET");
if (cap_free(caps))
perror("cap_free");
return bpf_priv && perfmon_priv && net_priv == CAP_SET;
return (caps & ADMIN_CAPS) == ADMIN_CAPS;
}
static void get_unpriv_disabled()

View File

@@ -34,6 +34,13 @@ int load_kallsyms(void)
if (!f)
return -ENOENT;
/*
* This is called/used from multiplace places,
* load symbols just once.
*/
if (sym_cnt)
return 0;
while (fgets(buf, sizeof(buf), f)) {
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;

View File

@@ -105,7 +105,7 @@
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "dereference of modified ctx ptr",
.errstr = "negative offset ctx ptr R1 off=-1 disallowed",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},

View File

@@ -115,6 +115,89 @@
{ "bpf_kfunc_call_test_release", 5 },
},
},
{
"calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "R1 must have zero offset when passed to release func",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_memb_release", 8 },
},
},
{
"calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_release", 9 },
},
.result_unpriv = REJECT,
.result = REJECT,
.errstr = "negative offset ptr_ ptr R1 off=-4 disallowed",
},
{
"calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_release", 9 },
{ "bpf_kfunc_call_test_release", 13 },
{ "bpf_kfunc_call_test_release", 17 },
},
.result_unpriv = REJECT,
.result = REJECT,
.errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
},
{
"calls: basic sanity",
.insns = {

View File

@@ -58,7 +58,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "dereference of modified ctx ptr",
.errstr = "negative offset ctx ptr R1 off=-612 disallowed",
},
{
"pass modified ctx pointer to helper, 2",
@@ -71,8 +71,8 @@
},
.result_unpriv = REJECT,
.result = REJECT,
.errstr_unpriv = "dereference of modified ctx ptr",
.errstr = "dereference of modified ctx ptr",
.errstr_unpriv = "negative offset ctx ptr R1 off=-612 disallowed",
.errstr = "negative offset ctx ptr R1 off=-612 disallowed",
},
{
"pass modified ctx pointer to helper, 3",
@@ -141,7 +141,7 @@
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
.result = REJECT,
.errstr = "dereference of modified ctx ptr",
.errstr = "negative offset ctx ptr R1 off=-612 disallowed",
},
{
"pass ctx or null check, 5: null (connect)",