Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Alexei Starovoitov says:

====================
pull-request: bpf-next 2019-10-14

The following pull-request contains BPF updates for your *net-next* tree.

12 days of development and
85 files changed, 1889 insertions(+), 1020 deletions(-)

The main changes are:

1) auto-generation of bpf_helper_defs.h, from Andrii.

2) split of bpf_helpers.h into bpf_{helpers, helper_defs, endian, tracing}.h
   and move into libbpf, from Andrii.

3) Track contents of read-only maps as scalars in the verifier, from Andrii.

4) small x86 JIT optimization, from Daniel.

5) cross compilation support, from Ivan.

6) bpf flow_dissector enhancements, from Jakub and Stanislav.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-10-14 12:17:21 -07:00
commit a98d62c3ee
85 changed files with 1888 additions and 1019 deletions

View File

@ -142,3 +142,6 @@ BPF flow dissector doesn't support exporting all the metadata that in-kernel
C-based implementation can export. Notable example is single VLAN (802.1Q)
and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
for a set of information that's currently can be exported from the BPF context.
When BPF flow dissector is attached to the root network namespace (machine-wide
policy), users can't override it in their child network namespaces.

View File

@ -909,6 +909,16 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
/* test dst_reg, dst_reg to save one extra byte */
if (imm32 == 0) {
if (BPF_CLASS(insn->code) == BPF_JMP)
EMIT1(add_2mod(0x48, dst_reg, dst_reg));
else if (is_ereg(dst_reg))
EMIT1(add_2mod(0x40, dst_reg, dst_reg));
EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
goto emit_cond_jmp;
}
/* cmp dst_reg, imm8/32 */
if (BPF_CLASS(insn->code) == BPF_JMP)
EMIT1(add_1mod(0x48, dst_reg));

View File

@ -363,7 +363,7 @@ struct bpf_prog_stats {
u64 cnt;
u64 nsecs;
struct u64_stats_sync syncp;
};
} __aligned(2 * sizeof(u64));
struct bpf_prog_aux {
atomic_t refcnt;

View File

@ -794,7 +794,7 @@ union bpf_attr {
* A 64-bit integer containing the current GID and UID, and
* created as such: *current_gid* **<< 32 \|** *current_uid*.
*
* int bpf_get_current_comm(char *buf, u32 size_of_buf)
* int bpf_get_current_comm(void *buf, u32 size_of_buf)
* Description
* Copy the **comm** attribute of the current task into *buf* of
* *size_of_buf*. The **comm** attribute contains the name of
@ -1023,7 +1023,7 @@ union bpf_attr {
* The realm of the route for the packet associated to *skb*, or 0
* if none was found.
*
* int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@ -1068,7 +1068,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
* int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
* Description
* This helper was provided as an easy way to load data from a
* packet. It can be used to load *len* bytes from *offset* from
@ -1085,7 +1085,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags)
* int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
* Description
* Walk a user or a kernel stack and return its id. To achieve
* this, the helper needs *ctx*, which is a pointer to the context
@ -1154,7 +1154,7 @@ union bpf_attr {
* The checksum result, or a negative error code in case of
* failure.
*
* int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
* int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Retrieve tunnel options metadata for the packet associated to
* *skb*, and store the raw tunnel option data to the buffer *opt*
@ -1172,7 +1172,7 @@ union bpf_attr {
* Return
* The size of the option data retrieved.
*
* int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
* int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Set tunnel options metadata for the packet associated to *skb*
* to the option data contained in the raw buffer *opt* of *size*.
@ -1511,7 +1511,7 @@ union bpf_attr {
* Return
* 0
*
* int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
* int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **setsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@ -1595,7 +1595,7 @@ union bpf_attr {
* Return
* **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
*
* int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
* int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@ -1715,7 +1715,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
* int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **getsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@ -1947,7 +1947,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
* int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *ctx*, which is a pointer
@ -1980,7 +1980,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
* int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
* int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@ -2033,7 +2033,7 @@ union bpf_attr {
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
* packet is not forwarded or needs assist from full stack
*
* int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
* int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a sockhash *map* referencing sockets.
* The *skops* is used as a new value for the entry associated to
@ -2392,7 +2392,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
* int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* For socket policies, insert *len* bytes into *msg* at offset
* *start*.
@ -2408,9 +2408,9 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
* int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* Will remove *pop* bytes from a *msg* starting at byte *start*.
* Will remove *len* bytes from a *msg* starting at byte *start*.
* This may result in **ENOMEM** errors under certain situations if
* an allocation and copy are required due to a full ring buffer.
* However, the helper will try to avoid doing the allocation
@ -2505,7 +2505,7 @@ union bpf_attr {
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
* int bpf_skb_ecn_set_ce(struct sk_buff *skb)
* Description
* Set ECN (Explicit Congestion Notification) field of IP header
* to **CE** (Congestion Encountered) if current value is **ECT**

View File

@ -2739,6 +2739,41 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
reg->smax_value = reg->umax_value;
}
static bool bpf_map_is_rdonly(const struct bpf_map *map)
{
return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
}
static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
{
void *ptr;
u64 addr;
int err;
err = map->ops->map_direct_value_addr(map, &addr, off);
if (err)
return err;
ptr = (void *)(long)addr + off;
switch (size) {
case sizeof(u8):
*val = (u64)*(u8 *)ptr;
break;
case sizeof(u16):
*val = (u64)*(u16 *)ptr;
break;
case sizeof(u32):
*val = (u64)*(u32 *)ptr;
break;
case sizeof(u64):
*val = *(u64 *)ptr;
break;
default:
return -EINVAL;
}
return 0;
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@ -2776,9 +2811,27 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (err)
return err;
err = check_map_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
if (!err && t == BPF_READ && value_regno >= 0) {
struct bpf_map *map = reg->map_ptr;
/* if map is read-only, track its contents as scalars */
if (tnum_is_const(reg->var_off) &&
bpf_map_is_rdonly(map) &&
map->ops->map_direct_value_addr) {
int map_off = off + reg->var_off.value;
u64 val = 0;
err = bpf_map_direct_read(map, map_off, size,
&val);
if (err)
return err;
regs[value_regno].type = SCALAR_VALUE;
__mark_reg_known(&regs[value_regno], val);
} else {
mark_reg_unknown(env, regs, value_regno);
}
}
} else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = SCALAR_VALUE;

View File

@ -114,19 +114,50 @@ int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
{
struct bpf_prog *attached;
struct net *net;
int ret = 0;
net = current->nsproxy->net_ns;
mutex_lock(&flow_dissector_mutex);
if (net == &init_net) {
/* BPF flow dissector in the root namespace overrides
* any per-net-namespace one. When attaching to root,
* make sure we don't have any BPF program attached
* to the non-root namespaces.
*/
struct net *ns;
for_each_net(ns) {
if (ns == &init_net)
continue;
if (rcu_access_pointer(ns->flow_dissector_prog)) {
ret = -EEXIST;
goto out;
}
}
} else {
/* Make sure root flow dissector is not attached
* when attaching to the non-root namespace.
*/
if (rcu_access_pointer(init_net.flow_dissector_prog)) {
ret = -EEXIST;
goto out;
}
}
attached = rcu_dereference_protected(net->flow_dissector_prog,
lockdep_is_held(&flow_dissector_mutex));
if (attached) {
/* Only one BPF program can be attached at a time */
mutex_unlock(&flow_dissector_mutex);
return -EEXIST;
if (attached == prog) {
/* The same program cannot be attached twice */
ret = -EINVAL;
goto out;
}
rcu_assign_pointer(net->flow_dissector_prog, prog);
if (attached)
bpf_prog_put(attached);
out:
mutex_unlock(&flow_dissector_mutex);
return 0;
return ret;
}
int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
@ -910,7 +941,10 @@ bool __skb_flow_dissect(const struct net *net,
WARN_ON_ONCE(!net);
if (net) {
rcu_read_lock();
attached = rcu_dereference(net->flow_dissector_prog);
attached = rcu_dereference(init_net.flow_dissector_prog);
if (!attached)
attached = rcu_dereference(net->flow_dissector_prog);
if (attached) {
struct bpf_flow_keys flow_keys;

View File

@ -386,7 +386,7 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
/* XDP RX runs under NAPI protection, and in different delivery error
* scenarios (e.g. queue full), it is possible to return the xdp_frame
* while still leveraging this protection. The @napi_direct boolian
* while still leveraging this protection. The @napi_direct boolean
* is used for those calls sites. Thus, allowing for faster recycling
* of xdp_frames/pages in those cases.
*/

View File

@ -4,55 +4,53 @@ BPF_SAMPLES_PATH ?= $(abspath $(srctree)/$(src))
TOOLS_PATH := $(BPF_SAMPLES_PATH)/../../tools
# List of programs to build
hostprogs-y := test_lru_dist
hostprogs-y += sock_example
hostprogs-y += fds_example
hostprogs-y += sockex1
hostprogs-y += sockex2
hostprogs-y += sockex3
hostprogs-y += tracex1
hostprogs-y += tracex2
hostprogs-y += tracex3
hostprogs-y += tracex4
hostprogs-y += tracex5
hostprogs-y += tracex6
hostprogs-y += tracex7
hostprogs-y += test_probe_write_user
hostprogs-y += trace_output
hostprogs-y += lathist
hostprogs-y += offwaketime
hostprogs-y += spintest
hostprogs-y += map_perf_test
hostprogs-y += test_overhead
hostprogs-y += test_cgrp2_array_pin
hostprogs-y += test_cgrp2_attach
hostprogs-y += test_cgrp2_sock
hostprogs-y += test_cgrp2_sock2
hostprogs-y += xdp1
hostprogs-y += xdp2
hostprogs-y += xdp_router_ipv4
hostprogs-y += test_current_task_under_cgroup
hostprogs-y += trace_event
hostprogs-y += sampleip
hostprogs-y += tc_l2_redirect
hostprogs-y += lwt_len_hist
hostprogs-y += xdp_tx_iptunnel
hostprogs-y += test_map_in_map
hostprogs-y += per_socket_stats_example
hostprogs-y += xdp_redirect
hostprogs-y += xdp_redirect_map
hostprogs-y += xdp_redirect_cpu
hostprogs-y += xdp_monitor
hostprogs-y += xdp_rxq_info
hostprogs-y += syscall_tp
hostprogs-y += cpustat
hostprogs-y += xdp_adjust_tail
hostprogs-y += xdpsock
hostprogs-y += xdp_fwd
hostprogs-y += task_fd_query
hostprogs-y += xdp_sample_pkts
hostprogs-y += ibumad
hostprogs-y += hbm
tprogs-y := test_lru_dist
tprogs-y += sock_example
tprogs-y += fds_example
tprogs-y += sockex1
tprogs-y += sockex2
tprogs-y += sockex3
tprogs-y += tracex1
tprogs-y += tracex2
tprogs-y += tracex3
tprogs-y += tracex4
tprogs-y += tracex5
tprogs-y += tracex6
tprogs-y += tracex7
tprogs-y += test_probe_write_user
tprogs-y += trace_output
tprogs-y += lathist
tprogs-y += offwaketime
tprogs-y += spintest
tprogs-y += map_perf_test
tprogs-y += test_overhead
tprogs-y += test_cgrp2_array_pin
tprogs-y += test_cgrp2_attach
tprogs-y += test_cgrp2_sock
tprogs-y += test_cgrp2_sock2
tprogs-y += xdp1
tprogs-y += xdp2
tprogs-y += xdp_router_ipv4
tprogs-y += test_current_task_under_cgroup
tprogs-y += trace_event
tprogs-y += sampleip
tprogs-y += tc_l2_redirect
tprogs-y += lwt_len_hist
tprogs-y += xdp_tx_iptunnel
tprogs-y += test_map_in_map
tprogs-y += xdp_redirect_map
tprogs-y += xdp_redirect_cpu
tprogs-y += xdp_monitor
tprogs-y += xdp_rxq_info
tprogs-y += syscall_tp
tprogs-y += cpustat
tprogs-y += xdp_adjust_tail
tprogs-y += xdpsock
tprogs-y += xdp_fwd
tprogs-y += task_fd_query
tprogs-y += xdp_sample_pkts
tprogs-y += ibumad
tprogs-y += hbm
# Libbpf dependencies
LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
@ -111,7 +109,7 @@ ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS)
# Tell kbuild to always build the programs
always := $(hostprogs-y)
always := $(tprogs-y)
always += sockex1_kern.o
always += sockex2_kern.o
always += sockex3_kern.o
@ -145,7 +143,6 @@ always += sampleip_kern.o
always += lwt_len_hist_kern.o
always += xdp_tx_iptunnel_kern.o
always += test_map_in_map_kern.o
always += cookie_uid_helper_example.o
always += tcp_synrto_kern.o
always += tcp_rwnd_kern.o
always += tcp_bufs_kern.o
@ -171,20 +168,38 @@ always += ibumad_kern.o
always += hbm_out_kern.o
always += hbm_edt_kern.o
KBUILD_HOSTCFLAGS += -I$(objtree)/usr/include
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf
ifeq ($(ARCH), arm)
# Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux
# headers when arm instruction set identification is requested.
ARM_ARCH_SELECTOR := $(filter -D__LINUX_ARM_ARCH__%, $(KBUILD_CFLAGS))
BPF_EXTRA_CFLAGS := $(ARM_ARCH_SELECTOR)
TPROGS_CFLAGS += $(ARM_ARCH_SELECTOR)
endif
HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
TPROGS_CFLAGS += -Wall -O2
TPROGS_CFLAGS += -Wmissing-prototypes
TPROGS_CFLAGS += -Wstrict-prototypes
KBUILD_HOSTLDLIBS += $(LIBBPF) -lelf
HOSTLDLIBS_tracex4 += -lrt
HOSTLDLIBS_trace_output += -lrt
HOSTLDLIBS_map_perf_test += -lrt
HOSTLDLIBS_test_overhead += -lrt
HOSTLDLIBS_xdpsock += -pthread
TPROGS_CFLAGS += -I$(objtree)/usr/include
TPROGS_CFLAGS += -I$(srctree)/tools/lib/bpf/
TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
TPROGS_CFLAGS += -I$(srctree)/tools/lib/
TPROGS_CFLAGS += -I$(srctree)/tools/include
TPROGS_CFLAGS += -I$(srctree)/tools/perf
ifdef SYSROOT
TPROGS_CFLAGS += --sysroot=$(SYSROOT)
TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
endif
TPROGCFLAGS_bpf_load.o += -Wno-unused-variable
TPROGS_LDLIBS += $(LIBBPF) -lelf
TPROGLDLIBS_tracex4 += -lrt
TPROGLDLIBS_trace_output += -lrt
TPROGLDLIBS_map_perf_test += -lrt
TPROGLDLIBS_test_overhead += -lrt
TPROGLDLIBS_xdpsock += -pthread
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
@ -195,15 +210,14 @@ BTF_PAHOLE ?= pahole
# Detect that we're cross compiling and use the cross compiler
ifdef CROSS_COMPILE
HOSTCC = $(CROSS_COMPILE)gcc
CLANG_ARCH_ARGS = -target $(ARCH)
CLANG_ARCH_ARGS = --target=$(notdir $(CROSS_COMPILE:%-=%))
endif
# Don't evaluate probes and warnings if we need to run make recursively
ifneq ($(src),)
HDR_PROBE := $(shell echo "\#include <linux/types.h>\n struct list_head { int a; }; int main() { return 0; }" | \
$(HOSTCC) $(KBUILD_HOSTCFLAGS) -x c - -o /dev/null 2>/dev/null && \
echo okay)
HDR_PROBE := $(shell printf "\#include <linux/types.h>\n struct list_head { int a; }; int main() { return 0; }" | \
$(CC) $(TPROGS_CFLAGS) $(TPROGS_LDFLAGS) -x c - \
-o /dev/null 2>/dev/null && echo okay)
ifeq ($(HDR_PROBE),)
$(warning WARNING: Detected possible issues with include path.)
@ -219,10 +233,10 @@ BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
/bin/rm -f ./llvm_btf_verify.o)
ifneq ($(BTF_LLVM_PROBE),)
EXTRA_CFLAGS += -g
BPF_EXTRA_CFLAGS += -g
else
ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),)
EXTRA_CFLAGS += -g
BPF_EXTRA_CFLAGS += -g
LLC_FLAGS += -mattr=dwarfris
DWARF2BTF = y
endif
@ -239,7 +253,8 @@ clean:
$(LIBBPF): FORCE
# Fix up variables inherited from Kbuild that tools/ build system won't like
$(MAKE) -C $(dir $@) RM='rm -rf' LDFLAGS= srctree=$(BPF_SAMPLES_PATH)/../../ O=
$(MAKE) -C $(dir $@) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \
LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ O=
$(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE
$(call filechk,offsets,__SYSCALL_NRS_H__)
@ -276,13 +291,16 @@ $(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
$(obj)/hbm.o: $(src)/hbm.h
$(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
-include $(BPF_SAMPLES_PATH)/Makefile.target
# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
# But, there is no easy way to fix it, so just exclude it since it is
# useless for BPF samples.
$(obj)/%.o: $(src)/%.c
@echo " CLANG-bpf " $@
$(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
-I$(srctree)/tools/testing/selftests/bpf/ \
$(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \
-I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \
-I$(srctree)/tools/lib/bpf/ \
-D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
-D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \

View File

@ -0,0 +1,75 @@
# SPDX-License-Identifier: GPL-2.0
# ==========================================================================
# Building binaries on the host system
# Binaries are not used during the compilation of the kernel, and intended
# to be build for target board, target board can be host of course. Added to
# build binaries to run not on host system.
#
# Sample syntax
# tprogs-y := xsk_example
# Will compile xsk_example.c and create an executable named xsk_example
#
# tprogs-y := xdpsock
# xdpsock-objs := xdpsock_1.o xdpsock_2.o
# Will compile xdpsock_1.c and xdpsock_2.c, and then link the executable
# xdpsock, based on xdpsock_1.o and xdpsock_2.o
#
# Derived from scripts/Makefile.host
#
__tprogs := $(sort $(tprogs-y))
# C code
# Executables compiled from a single .c file
tprog-csingle := $(foreach m,$(__tprogs), \
$(if $($(m)-objs),,$(m)))
# C executables linked based on several .o files
tprog-cmulti := $(foreach m,$(__tprogs),\
$(if $($(m)-objs),$(m)))
# Object (.o) files compiled from .c files
tprog-cobjs := $(sort $(foreach m,$(__tprogs),$($(m)-objs)))
tprog-csingle := $(addprefix $(obj)/,$(tprog-csingle))
tprog-cmulti := $(addprefix $(obj)/,$(tprog-cmulti))
tprog-cobjs := $(addprefix $(obj)/,$(tprog-cobjs))
#####
# Handle options to gcc. Support building with separate output directory
_tprogc_flags = $(TPROGS_CFLAGS) \
$(TPROGCFLAGS_$(basetarget).o)
# $(objtree)/$(obj) for including generated headers from checkin source files
ifeq ($(KBUILD_EXTMOD),)
ifdef building_out_of_srctree
_tprogc_flags += -I $(objtree)/$(obj)
endif
endif
tprogc_flags = -Wp,-MD,$(depfile) $(_tprogc_flags)
# Create executable from a single .c file
# tprog-csingle -> Executable
quiet_cmd_tprog-csingle = CC $@
cmd_tprog-csingle = $(CC) $(tprogc_flags) $(TPROGS_LDFLAGS) -o $@ $< \
$(TPROGS_LDLIBS) $(TPROGLDLIBS_$(@F))
$(tprog-csingle): $(obj)/%: $(src)/%.c FORCE
$(call if_changed_dep,tprog-csingle)
# Link an executable based on list of .o files, all plain c
# tprog-cmulti -> executable
quiet_cmd_tprog-cmulti = LD $@
cmd_tprog-cmulti = $(CC) $(tprogc_flags) $(TPROGS_LDFLAGS) -o $@ \
$(addprefix $(obj)/,$($(@F)-objs)) \
$(TPROGS_LDLIBS) $(TPROGLDLIBS_$(@F))
$(tprog-cmulti): $(tprog-cobjs) FORCE
$(call if_changed,tprog-cmulti)
$(call multi_depend, $(tprog-cmulti), , -objs)
# Create .o file from a single .c file
# tprog-cobjs -> .o
quiet_cmd_tprog-cobjs = CC $@
cmd_tprog-cobjs = $(CC) $(tprogc_flags) -c -o $@ $<
$(tprog-cobjs): $(obj)/%.o: $(src)/%.c FORCE
$(call if_changed_dep,tprog-cobjs)

View File

@ -14,6 +14,20 @@ Compiling requires having installed:
Note that LLVM's tool 'llc' must support target 'bpf', list version
and supported targets with command: ``llc --version``
Clean and configuration
-----------------------
It can be needed to clean tools, samples or kernel before trying new arch or
after some changes (on demand)::
make -C tools clean
make -C samples/bpf clean
make clean
Configure kernel, defconfig for instance::
make defconfig
Kernel headers
--------------
@ -68,9 +82,26 @@ It is also possible to point make to the newly compiled 'llc' or
Cross compiling samples
-----------------------
In order to cross-compile, say for arm64 targets, export CROSS_COMPILE and ARCH
environment variables before calling make. This will direct make to build
samples for the cross target.
environment variables before calling make. But do this before clean,
cofiguration and header install steps described above. This will direct make to
build samples for the cross target::
export ARCH=arm64
export CROSS_COMPILE="aarch64-linux-gnu-"
make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
export ARCH=arm64
export CROSS_COMPILE="aarch64-linux-gnu-"
Headers can be also installed on RFS of target board if need to keep them in
sync (not necessarily and it creates a local "usr/include" directory also)::
make INSTALL_HDR_PATH=~/some_sysroot/usr headers_install
Pointing LLC and CLANG is not necessarily if it's installed on HOST and have
in its targets appropriate arm64 arch (usually it has several arches).
Build samples::
make samples/bpf/
Or build samples with SYSROOT if some header or library is absent in toolchain,
say libelf, providing address to file system containing headers and libs,
can be RFS of target board::
make samples/bpf/ SYSROOT=~/some_sysroot

View File

@ -59,21 +59,18 @@
#define BYTES_PER_NS(delta, rate) ((((u64)(delta)) * (rate)) >> 20)
#define BYTES_TO_NS(bytes, rate) div64_u64(((u64)(bytes)) << 20, (u64)(rate))
struct bpf_map_def SEC("maps") queue_state = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct hbm_vqueue),
};
BPF_ANNOTATE_KV_PAIR(queue_state, struct bpf_cgroup_storage_key,
struct hbm_vqueue);
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, struct hbm_vqueue);
} queue_state SEC(".maps");
struct bpf_map_def SEC("maps") queue_stats = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(struct hbm_queue_stats),
.max_entries = 1,
};
BPF_ANNOTATE_KV_PAIR(queue_stats, int, struct hbm_queue_stats);
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, u32);
__type(value, struct hvm_queue_stats);
} queue_stats SEC(".maps");
struct hbm_pkt_info {
int cwnd;

View File

@ -9,25 +9,27 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_legacy.h"
#include "bpf_tracing.h"
#define MAX_ENTRIES 1000
#define MAX_NR_CPUS 1024
struct bpf_map_def SEC("maps") hash_map = {
struct bpf_map_def_legacy SEC("maps") hash_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
};
struct bpf_map_def SEC("maps") lru_hash_map = {
struct bpf_map_def_legacy SEC("maps") lru_hash_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = 10000,
};
struct bpf_map_def SEC("maps") nocommon_lru_hash_map = {
struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
@ -35,7 +37,7 @@ struct bpf_map_def SEC("maps") nocommon_lru_hash_map = {
.map_flags = BPF_F_NO_COMMON_LRU,
};
struct bpf_map_def SEC("maps") inner_lru_hash_map = {
struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
@ -44,20 +46,20 @@ struct bpf_map_def SEC("maps") inner_lru_hash_map = {
.numa_node = 0,
};
struct bpf_map_def SEC("maps") array_of_lru_hashs = {
struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = {
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
.key_size = sizeof(u32),
.max_entries = MAX_NR_CPUS,
};
struct bpf_map_def SEC("maps") percpu_hash_map = {
struct bpf_map_def_legacy SEC("maps") percpu_hash_map = {
.type = BPF_MAP_TYPE_PERCPU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
};
struct bpf_map_def SEC("maps") hash_map_alloc = {
struct bpf_map_def_legacy SEC("maps") hash_map_alloc = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
@ -65,7 +67,7 @@ struct bpf_map_def SEC("maps") hash_map_alloc = {
.map_flags = BPF_F_NO_PREALLOC,
};
struct bpf_map_def SEC("maps") percpu_hash_map_alloc = {
struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = {
.type = BPF_MAP_TYPE_PERCPU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),
@ -73,7 +75,7 @@ struct bpf_map_def SEC("maps") percpu_hash_map_alloc = {
.map_flags = BPF_F_NO_PREALLOC,
};
struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = {
.type = BPF_MAP_TYPE_LPM_TRIE,
.key_size = 8,
.value_size = sizeof(long),
@ -81,14 +83,14 @@ struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
.map_flags = BPF_F_NO_PREALLOC,
};
struct bpf_map_def SEC("maps") array_map = {
struct bpf_map_def_legacy SEC("maps") array_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(long),
.max_entries = MAX_ENTRIES,
};
struct bpf_map_def SEC("maps") lru_hash_lookup_map = {
struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(long),

View File

@ -6,6 +6,7 @@
*/
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
#include <uapi/linux/ptrace.h>
#include <uapi/linux/perf_event.h>
#include <linux/version.h>

View File

@ -12,6 +12,7 @@
#include <linux/udp.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_legacy.h"
#define DEFAULT_PKTGEN_UDP_PORT 9
#define IP_MF 0x2000

View File

@ -9,6 +9,7 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/bpf_perf_event.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
#define MAX_IPS 8192

View File

@ -3,6 +3,7 @@
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include "bpf_helpers.h"
#include "bpf_legacy.h"
struct bpf_map_def SEC("maps") my_map = {
.type = BPF_MAP_TYPE_ARRAY,

View File

@ -1,5 +1,6 @@
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_legacy.h"
#include <uapi/linux/in.h>
#include <uapi/linux/if.h>
#include <uapi/linux/if_ether.h>

View File

@ -6,6 +6,7 @@
*/
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_legacy.h"
#include <uapi/linux/in.h>
#include <uapi/linux/if.h>
#include <uapi/linux/if_ether.h>

View File

@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/perf_event.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
struct bpf_map_def SEC("maps") my_map = {
.type = BPF_MAP_TYPE_HASH,

View File

@ -8,6 +8,7 @@
#include <uapi/linux/filter.h>
#include <uapi/linux/pkt_cls.h>
#include "bpf_helpers.h"
#include "bpf_legacy.h"
/* compiler workaround */
#define _htonl __builtin_bswap32

View File

@ -11,11 +11,13 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/in6.h>
#include "bpf_helpers.h"
#include "bpf_legacy.h"
#include "bpf_tracing.h"
#define MAX_NR_PORTS 65536
/* map #0 */
struct bpf_map_def SEC("maps") port_a = {
struct bpf_map_def_legacy SEC("maps") port_a = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(u32),
.value_size = sizeof(int),
@ -23,7 +25,7 @@ struct bpf_map_def SEC("maps") port_a = {
};
/* map #1 */
struct bpf_map_def SEC("maps") port_h = {
struct bpf_map_def_legacy SEC("maps") port_h = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(int),
@ -31,7 +33,7 @@ struct bpf_map_def SEC("maps") port_h = {
};
/* map #2 */
struct bpf_map_def SEC("maps") reg_result_h = {
struct bpf_map_def_legacy SEC("maps") reg_result_h = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(int),
@ -39,7 +41,7 @@ struct bpf_map_def SEC("maps") reg_result_h = {
};
/* map #3 */
struct bpf_map_def SEC("maps") inline_result_h = {
struct bpf_map_def_legacy SEC("maps") inline_result_h = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(u32),
.value_size = sizeof(int),
@ -47,7 +49,7 @@ struct bpf_map_def SEC("maps") inline_result_h = {
};
/* map #4 */ /* Test case #0 */
struct bpf_map_def SEC("maps") a_of_port_a = {
struct bpf_map_def_legacy SEC("maps") a_of_port_a = {
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
.key_size = sizeof(u32),
.inner_map_idx = 0, /* map_fd[0] is port_a */
@ -55,7 +57,7 @@ struct bpf_map_def SEC("maps") a_of_port_a = {
};
/* map #5 */ /* Test case #1 */
struct bpf_map_def SEC("maps") h_of_port_a = {
struct bpf_map_def_legacy SEC("maps") h_of_port_a = {
.type = BPF_MAP_TYPE_HASH_OF_MAPS,
.key_size = sizeof(u32),
.inner_map_idx = 0, /* map_fd[0] is port_a */
@ -63,7 +65,7 @@ struct bpf_map_def SEC("maps") h_of_port_a = {
};
/* map #6 */ /* Test case #2 */
struct bpf_map_def SEC("maps") h_of_port_h = {
struct bpf_map_def_legacy SEC("maps") h_of_port_h = {
.type = BPF_MAP_TYPE_HASH_OF_MAPS,
.key_size = sizeof(u32),
.inner_map_idx = 1, /* map_fd[1] is port_h */

View File

@ -8,6 +8,7 @@
#include <linux/ptrace.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})

View File

@ -9,6 +9,7 @@
#include <uapi/linux/bpf.h>
#include <linux/version.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
struct bpf_map_def SEC("maps") dnat_map = {
.type = BPF_MAP_TYPE_HASH,

View File

@ -10,6 +10,7 @@
#include <uapi/linux/bpf_perf_event.h>
#include <uapi/linux/perf_event.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
struct key_t {
char comm[TASK_COMM_LEN];

View File

@ -9,6 +9,7 @@
#include <uapi/linux/bpf.h>
#include <linux/version.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})

View File

@ -9,6 +9,7 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
struct bpf_map_def SEC("maps") my_map = {
.type = BPF_MAP_TYPE_HASH,

View File

@ -9,6 +9,7 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
struct bpf_map_def SEC("maps") my_map = {
.type = BPF_MAP_TYPE_HASH,

View File

@ -8,6 +8,7 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
struct pair {
u64 val;

View File

@ -11,6 +11,7 @@
#include <uapi/linux/unistd.h>
#include "syscall_nrs.h"
#include "bpf_helpers.h"
#include "bpf_tracing.h"
#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F

View File

@ -25,6 +25,9 @@
#define ICMP_TOOBIG_SIZE 98
#define ICMP_TOOBIG_PAYLOAD_SIZE 92
/* volatile to prevent compiler optimizations */
static volatile __u32 max_pcktsz = MAX_PCKT_SIZE;
struct bpf_map_def SEC("maps") icmpcnt = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
@ -92,7 +95,7 @@ static __always_inline int send_icmp4_too_big(struct xdp_md *xdp)
orig_iph = data + off;
icmp_hdr->type = ICMP_DEST_UNREACH;
icmp_hdr->code = ICMP_FRAG_NEEDED;
icmp_hdr->un.frag.mtu = htons(MAX_PCKT_SIZE-sizeof(struct ethhdr));
icmp_hdr->un.frag.mtu = htons(max_pcktsz - sizeof(struct ethhdr));
icmp_hdr->checksum = 0;
ipv4_csum(icmp_hdr, ICMP_TOOBIG_PAYLOAD_SIZE, &csum);
icmp_hdr->checksum = csum;
@ -121,7 +124,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp)
int pckt_size = data_end - data;
int offset;
if (pckt_size > MAX_PCKT_SIZE) {
if (pckt_size > max(max_pcktsz, ICMP_TOOBIG_SIZE)) {
offset = pckt_size - ICMP_TOOBIG_SIZE;
if (bpf_xdp_adjust_tail(xdp, 0 - offset))
return XDP_PASS;

View File

@ -23,6 +23,7 @@
#include "libbpf.h"
#define STATS_INTERVAL_S 2U
#define MAX_PCKT_SIZE 600
static int ifindex = -1;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
@ -72,6 +73,7 @@ static void usage(const char *cmd)
printf("Usage: %s [...]\n", cmd);
printf(" -i <ifname|ifindex> Interface\n");
printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n");
printf(" -P <MAX_PCKT_SIZE> Default: %u\n", MAX_PCKT_SIZE);
printf(" -S use skb-mode\n");
printf(" -N enforce native mode\n");
printf(" -F force loading prog\n");
@ -85,13 +87,14 @@ int main(int argc, char **argv)
.prog_type = BPF_PROG_TYPE_XDP,
};
unsigned char opt_flags[256] = {};
const char *optstr = "i:T:SNFh";
const char *optstr = "i:T:P:SNFh";
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
unsigned int kill_after_s = 0;
int i, prog_fd, map_fd, opt;
struct bpf_object *obj;
struct bpf_map *map;
__u32 max_pckt_size = 0;
__u32 key = 0;
char filename[256];
int err;
@ -110,6 +113,9 @@ int main(int argc, char **argv)
case 'T':
kill_after_s = atoi(optarg);
break;
case 'P':
max_pckt_size = atoi(optarg);
break;
case 'S':
xdp_flags |= XDP_FLAGS_SKB_MODE;
break;
@ -150,15 +156,20 @@ int main(int argc, char **argv)
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
return 1;
map = bpf_map__next(NULL, obj);
if (!map) {
printf("finding a map in obj file failed\n");
return 1;
/* static global var 'max_pcktsz' is accessible from .data section */
if (max_pckt_size) {
map_fd = bpf_object__find_map_fd_by_name(obj, "xdp_adju.data");
if (map_fd < 0) {
printf("finding a max_pcktsz map in obj file failed\n");
return 1;
}
bpf_map_update_elem(map_fd, &key, &max_pckt_size, BPF_ANY);
}
map_fd = bpf_map__fd(map);
if (!prog_fd) {
printf("load_bpf_file: %s\n", strerror(errno));
/* fetch icmpcnt map */
map_fd = bpf_object__find_map_fd_by_name(obj, "icmpcnt");
if (map_fd < 0) {
printf("finding a icmpcnt map in obj file failed\n");
return 1;
}

View File

@ -378,7 +378,7 @@ static void usage(const char *prog)
" -q, --queue=n Use queue n (default 0)\n"
" -p, --poll Use poll syscall\n"
" -S, --xdp-skb=n Use XDP skb-mod\n"
" -N, --xdp-native=n Enfore XDP native mode\n"
" -N, --xdp-native=n Enforce XDP native mode\n"
" -n, --interval=n Specify statistics update interval (default 1 sec).\n"
" -z, --zero-copy Force zero-copy mode.\n"
" -c, --copy Force copy mode.\n"

View File

@ -391,6 +391,154 @@ SEE ALSO
print('')
class PrinterHelpers(Printer):
"""
A printer for dumping collected information about helpers as C header to
be included from BPF program.
@helpers: array of Helper objects to print to standard output
"""
type_fwds = [
'struct bpf_fib_lookup',
'struct bpf_perf_event_data',
'struct bpf_perf_event_value',
'struct bpf_sock',
'struct bpf_sock_addr',
'struct bpf_sock_ops',
'struct bpf_sock_tuple',
'struct bpf_spin_lock',
'struct bpf_sysctl',
'struct bpf_tcp_sock',
'struct bpf_tunnel_key',
'struct bpf_xfrm_state',
'struct pt_regs',
'struct sk_reuseport_md',
'struct sockaddr',
'struct tcphdr',
'struct __sk_buff',
'struct sk_msg_md',
'struct xdp_md',
]
known_types = {
'...',
'void',
'const void',
'char',
'const char',
'int',
'long',
'unsigned long',
'__be16',
'__be32',
'__wsum',
'struct bpf_fib_lookup',
'struct bpf_perf_event_data',
'struct bpf_perf_event_value',
'struct bpf_sock',
'struct bpf_sock_addr',
'struct bpf_sock_ops',
'struct bpf_sock_tuple',
'struct bpf_spin_lock',
'struct bpf_sysctl',
'struct bpf_tcp_sock',
'struct bpf_tunnel_key',
'struct bpf_xfrm_state',
'struct pt_regs',
'struct sk_reuseport_md',
'struct sockaddr',
'struct tcphdr',
}
mapped_types = {
'u8': '__u8',
'u16': '__u16',
'u32': '__u32',
'u64': '__u64',
's8': '__s8',
's16': '__s16',
's32': '__s32',
's64': '__s64',
'size_t': 'unsigned long',
'struct bpf_map': 'void',
'struct sk_buff': 'struct __sk_buff',
'const struct sk_buff': 'const struct __sk_buff',
'struct sk_msg_buff': 'struct sk_msg_md',
'struct xdp_buff': 'struct xdp_md',
}
def print_header(self):
header = '''\
/* This is auto-generated file. See bpf_helpers_doc.py for details. */
/* Forward declarations of BPF structs */'''
print(header)
for fwd in self.type_fwds:
print('%s;' % fwd)
print('')
def print_footer(self):
footer = ''
print(footer)
def map_type(self, t):
if t in self.known_types:
return t
if t in self.mapped_types:
return self.mapped_types[t]
print("")
print("Unrecognized type '%s', please add it to known types!" % t)
sys.exit(1)
seen_helpers = set()
def print_one(self, helper):
proto = helper.proto_break_down()
if proto['name'] in self.seen_helpers:
return
self.seen_helpers.add(proto['name'])
print('/*')
print(" * %s" % proto['name'])
print(" *")
if (helper.desc):
# Do not strip all newline characters: formatted code at the end of
# a section must be followed by a blank line.
for line in re.sub('\n$', '', helper.desc, count=1).split('\n'):
print(' *{}{}'.format(' \t' if line else '', line))
if (helper.ret):
print(' *')
print(' * Returns')
for line in helper.ret.rstrip().split('\n'):
print(' *{}{}'.format(' \t' if line else '', line))
print(' */')
print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']),
proto['ret_star'], proto['name']), end='')
comma = ''
for i, a in enumerate(proto['args']):
t = a['type']
n = a['name']
if proto['name'] == 'bpf_get_socket_cookie' and i == 0:
t = 'void'
n = 'ctx'
one_arg = '{}{}'.format(comma, self.map_type(t))
if n:
if a['star']:
one_arg += ' {}'.format(a['star'])
else:
one_arg += ' '
one_arg += '{}'.format(n)
comma = ', '
print(one_arg, end='')
print(') = (void *) %d;' % len(self.seen_helpers))
print('')
###############################################################################
# If script is launched from scripts/ from kernel tree and can access
@ -405,6 +553,8 @@ Parse eBPF header file and generate documentation for eBPF helper functions.
The RST-formatted output produced can be turned into a manual page with the
rst2man utility.
""")
argParser.add_argument('--header', action='store_true',
help='generate C header file')
if (os.path.isfile(bpfh)):
argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h',
default=bpfh)
@ -417,5 +567,8 @@ headerParser = HeaderParser(args.filename)
headerParser.run()
# Print formatted output to standard output.
printer = PrinterRST(headerParser.helpers)
if args.header:
printer = PrinterHelpers(headerParser.helpers)
else:
printer = PrinterRST(headerParser.helpers)
printer.print_all()

View File

@ -27,7 +27,7 @@ bool json_output;
bool show_pinned;
bool block_mount;
bool verifier_logs;
int bpf_flags;
bool relaxed_maps;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
@ -396,7 +396,7 @@ int main(int argc, char **argv)
show_pinned = true;
break;
case 'm':
bpf_flags = MAPS_RELAX_COMPAT;
relaxed_maps = true;
break;
case 'n':
block_mount = true;

View File

@ -94,7 +94,7 @@ extern bool json_output;
extern bool show_pinned;
extern bool block_mount;
extern bool verifier_logs;
extern int bpf_flags;
extern bool relaxed_maps;
extern struct pinned_obj_table prog_table;
extern struct pinned_obj_table map_table;

View File

@ -1092,9 +1092,7 @@ free_data_in:
static int load_with_options(int argc, char **argv, bool first_prog_only)
{
struct bpf_object_load_attr load_attr = { 0 };
struct bpf_object_open_attr open_attr = {
.prog_type = BPF_PROG_TYPE_UNSPEC,
};
enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
enum bpf_attach_type expected_attach_type;
struct map_replace *map_replace = NULL;
struct bpf_program *prog = NULL, *pos;
@ -1105,11 +1103,16 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
const char *pinfile;
unsigned int i, j;
__u32 ifindex = 0;
const char *file;
int idx, err;
LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.relaxed_maps = relaxed_maps,
);
if (!REQ_ARGS(2))
return -1;
open_attr.file = GET_ARG();
file = GET_ARG();
pinfile = GET_ARG();
while (argc) {
@ -1118,7 +1121,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
NEXT_ARG();
if (open_attr.prog_type != BPF_PROG_TYPE_UNSPEC) {
if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
p_err("program type already specified");
goto err_free_reuse_maps;
}
@ -1135,8 +1138,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
strcat(type, *argv);
strcat(type, "/");
err = libbpf_prog_type_by_name(type,
&open_attr.prog_type,
err = libbpf_prog_type_by_name(type, &common_prog_type,
&expected_attach_type);
free(type);
if (err < 0)
@ -1224,16 +1226,16 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
set_max_rlimit();
obj = __bpf_object__open_xattr(&open_attr, bpf_flags);
obj = bpf_object__open_file(file, &open_opts);
if (IS_ERR_OR_NULL(obj)) {
p_err("failed to open object file");
goto err_free_reuse_maps;
}
bpf_object__for_each_program(pos, obj) {
enum bpf_prog_type prog_type = open_attr.prog_type;
enum bpf_prog_type prog_type = common_prog_type;
if (open_attr.prog_type == BPF_PROG_TYPE_UNSPEC) {
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
const char *sec_name = bpf_program__title(pos, false);
err = libbpf_prog_type_by_name(sec_name, &prog_type,

View File

@ -794,7 +794,7 @@ union bpf_attr {
* A 64-bit integer containing the current GID and UID, and
* created as such: *current_gid* **<< 32 \|** *current_uid*.
*
* int bpf_get_current_comm(char *buf, u32 size_of_buf)
* int bpf_get_current_comm(void *buf, u32 size_of_buf)
* Description
* Copy the **comm** attribute of the current task into *buf* of
* *size_of_buf*. The **comm** attribute contains the name of
@ -1023,7 +1023,7 @@ union bpf_attr {
* The realm of the route for the packet associated to *skb*, or 0
* if none was found.
*
* int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@ -1068,7 +1068,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
* int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
* Description
* This helper was provided as an easy way to load data from a
* packet. It can be used to load *len* bytes from *offset* from
@ -1085,7 +1085,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags)
* int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
* Description
* Walk a user or a kernel stack and return its id. To achieve
* this, the helper needs *ctx*, which is a pointer to the context
@ -1154,7 +1154,7 @@ union bpf_attr {
* The checksum result, or a negative error code in case of
* failure.
*
* int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
* int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Retrieve tunnel options metadata for the packet associated to
* *skb*, and store the raw tunnel option data to the buffer *opt*
@ -1172,7 +1172,7 @@ union bpf_attr {
* Return
* The size of the option data retrieved.
*
* int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
* int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Set tunnel options metadata for the packet associated to *skb*
* to the option data contained in the raw buffer *opt* of *size*.
@ -1511,7 +1511,7 @@ union bpf_attr {
* Return
* 0
*
* int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
* int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **setsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@ -1595,7 +1595,7 @@ union bpf_attr {
* Return
* **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
*
* int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
* int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@ -1715,7 +1715,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
* int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **getsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@ -1947,7 +1947,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
* int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *ctx*, which is a pointer
@ -1980,7 +1980,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
* int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
* int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@ -2033,7 +2033,7 @@ union bpf_attr {
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
* packet is not forwarded or needs assist from full stack
*
* int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
* int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a sockhash *map* referencing sockets.
* The *skops* is used as a new value for the entry associated to
@ -2392,7 +2392,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
* int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* For socket policies, insert *len* bytes into *msg* at offset
* *start*.
@ -2408,9 +2408,9 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
* int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* Will remove *pop* bytes from a *msg* starting at byte *start*.
* Will remove *len* bytes from a *msg* starting at byte *start*.
* This may result in **ENOMEM** errors under certain situations if
* an allocation and copy are required due to a full ring buffer.
* However, the helper will try to avoid doing the allocation
@ -2505,7 +2505,7 @@ union bpf_attr {
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
* int bpf_skb_ecn_set_ce(struct sk_buff *skb)
* Description
* Set ECN (Explicit Congestion Notification) field of IP header
* to **CE** (Congestion Encountered) if current value is **ECT**

View File

@ -3,3 +3,7 @@ libbpf.pc
FEATURE-DUMP.libbpf
test_libbpf
libbpf.so.*
TAGS
tags
cscope.*
/bpf_helper_defs.h

View File

@ -52,7 +52,7 @@ ifndef VERBOSE
endif
FEATURE_USER = .libbpf
FEATURE_TESTS = libelf libelf-mmap bpf reallocarray cxx
FEATURE_TESTS = libelf libelf-mmap bpf reallocarray
FEATURE_DISPLAY = libelf bpf
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
@ -133,6 +133,8 @@ LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE))
TAGS_PROG := $(if $(shell which etags 2>/dev/null),etags,ctags)
GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \
@ -140,22 +142,14 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
CXX_TEST_TARGET = $(OUTPUT)test_libbpf
ifeq ($(feature-cxx), 1)
CMD_TARGETS += $(CXX_TEST_TARGET)
endif
TARGETS = $(CMD_TARGETS)
CMD_TARGETS = $(LIB_TARGET) $(PC_FILE) $(OUTPUT)test_libbpf
all: fixdep
$(Q)$(MAKE) all_cmd
all_cmd: $(CMD_TARGETS) check
$(BPF_IN): force elfdep bpfdep
$(BPF_IN): force elfdep bpfdep bpf_helper_defs.h
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@ -173,19 +167,24 @@ $(BPF_IN): force elfdep bpfdep
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf
bpf_helper_defs.h: $(srctree)/include/uapi/linux/bpf.h
$(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \
--file $(srctree)/include/uapi/linux/bpf.h > bpf_helper_defs.h
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
$(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
-Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
$(QUIET_LINK)$(CC) $(LDFLAGS) \
--shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
-Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
@ln -sf $(@F) $(OUTPUT)libbpf.so
@ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
$(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
$(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
$(QUIET_LINK)$(CXX) $(INCLUDES) $^ -lelf -o $@
$(OUTPUT)test_libbpf: test_libbpf.c $(OUTPUT)libbpf.a
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(INCLUDES) $^ -lelf -o $@
$(OUTPUT)libbpf.pc:
$(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
@ -234,13 +233,18 @@ install_lib: all_cmd
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
install_headers:
install_headers: bpf_helper_defs.h
$(call QUIET_INSTALL, headers) \
$(call do_install,bpf.h,$(prefix)/include/bpf,644); \
$(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
$(call do_install,btf.h,$(prefix)/include/bpf,644); \
$(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
$(call do_install,xsk.h,$(prefix)/include/bpf,644);
$(call do_install,xsk.h,$(prefix)/include/bpf,644); \
$(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
$(call do_install,bpf_helper_defs.h,$(prefix)/include/bpf,644); \
$(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \
$(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \
$(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644);
install_pkgconfig: $(PC_FILE)
$(call QUIET_INSTALL, $(PC_FILE)) \
@ -255,14 +259,14 @@ config-clean:
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean:
$(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
$(call QUIET_CLEAN, libbpf) $(RM) $(CMD_TARGETS) \
*.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
*.pc LIBBPF-CFLAGS
*.pc LIBBPF-CFLAGS bpf_helper_defs.h
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
PHONY += force elfdep bpfdep
PHONY += force elfdep bpfdep cscope tags
force:
elfdep:
@ -271,6 +275,14 @@ elfdep:
bpfdep:
@if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit 1 ; fi
cscope:
ls *.c *.h > cscope.files
cscope -b -q -I $(srctree)/include -f cscope.out
tags:
rm -f TAGS tags
ls *.c *.h | xargs $(TAGS_PROG) -a
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)

View File

@ -0,0 +1,167 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_CORE_READ_H__
#define __BPF_CORE_READ_H__
/*
* bpf_core_read() abstracts away bpf_probe_read() call and captures offset
* relocation for source address using __builtin_preserve_access_index()
* built-in, provided by Clang.
*
* __builtin_preserve_access_index() takes as an argument an expression of
* taking an address of a field within struct/union. It makes compiler emit
* a relocation, which records BTF type ID describing root struct/union and an
* accessor string which describes exact embedded field that was used to take
* an address. See detailed description of this relocation format and
* semantics in comments to struct bpf_offset_reloc in libbpf_internal.h.
*
* This relocation allows libbpf to adjust BPF instruction to use correct
* actual field offset, based on target kernel BTF type that matches original
* (local) BTF, used to record relocation.
*/
#define bpf_core_read(dst, sz, src) \
bpf_probe_read(dst, sz, \
(const void *)__builtin_preserve_access_index(src))
/*
* bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
* additionally emitting BPF CO-RE field relocation for specified source
* argument.
*/
#define bpf_core_read_str(dst, sz, src) \
bpf_probe_read_str(dst, sz, \
(const void *)__builtin_preserve_access_index(src))
#define ___concat(a, b) a ## b
#define ___apply(fn, n) ___concat(fn, n)
#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
/*
* return number of provided arguments; used for switch-based variadic macro
* definitions (see ___last, ___arrow, etc below)
*/
#define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
/*
* return 0 if no arguments are passed, N - otherwise; used for
* recursively-defined macros to specify termination (0) case, and generic
* (N) case (e.g., ___read_ptrs, ___core_read)
*/
#define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
#define ___last1(x) x
#define ___last2(a, x) x
#define ___last3(a, b, x) x
#define ___last4(a, b, c, x) x
#define ___last5(a, b, c, d, x) x
#define ___last6(a, b, c, d, e, x) x
#define ___last7(a, b, c, d, e, f, x) x
#define ___last8(a, b, c, d, e, f, g, x) x
#define ___last9(a, b, c, d, e, f, g, h, x) x
#define ___last10(a, b, c, d, e, f, g, h, i, x) x
#define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__)
#define ___nolast2(a, _) a
#define ___nolast3(a, b, _) a, b
#define ___nolast4(a, b, c, _) a, b, c
#define ___nolast5(a, b, c, d, _) a, b, c, d
#define ___nolast6(a, b, c, d, e, _) a, b, c, d, e
#define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f
#define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g
#define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h
#define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i
#define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__)
#define ___arrow1(a) a
#define ___arrow2(a, b) a->b
#define ___arrow3(a, b, c) a->b->c
#define ___arrow4(a, b, c, d) a->b->c->d
#define ___arrow5(a, b, c, d, e) a->b->c->d->e
#define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f
#define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g
#define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h
#define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i
#define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
#define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
#define ___type(...) typeof(___arrow(__VA_ARGS__))
#define ___read(read_fn, dst, src_type, src, accessor) \
read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
/* "recursively" read a sequence of inner pointers using local __t var */
#define ___rd_first(src, a) ___read(bpf_core_read, &__t, ___type(src), src, a);
#define ___rd_last(...) \
___read(bpf_core_read, &__t, \
___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
#define ___rd_p1(...) const void *__t; ___rd_first(__VA_ARGS__)
#define ___rd_p2(...) ___rd_p1(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
#define ___rd_p3(...) ___rd_p2(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
#define ___rd_p4(...) ___rd_p3(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
#define ___rd_p5(...) ___rd_p4(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
#define ___rd_p6(...) ___rd_p5(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
#define ___rd_p7(...) ___rd_p6(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
#define ___rd_p8(...) ___rd_p7(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
#define ___rd_p9(...) ___rd_p8(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
#define ___read_ptrs(src, ...) \
___apply(___rd_p, ___narg(__VA_ARGS__))(src, __VA_ARGS__)
#define ___core_read0(fn, dst, src, a) \
___read(fn, dst, ___type(src), src, a);
#define ___core_readN(fn, dst, src, ...) \
___read_ptrs(src, ___nolast(__VA_ARGS__)) \
___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \
___last(__VA_ARGS__));
#define ___core_read(fn, dst, src, a, ...) \
___apply(___core_read, ___empty(__VA_ARGS__))(fn, dst, \
src, a, ##__VA_ARGS__)
/*
* BPF_CORE_READ_INTO() is a more performance-conscious variant of
* BPF_CORE_READ(), in which final field is read into user-provided storage.
* See BPF_CORE_READ() below for more details on general usage.
*/
#define BPF_CORE_READ_INTO(dst, src, a, ...) \
({ \
___core_read(bpf_core_read, dst, src, a, ##__VA_ARGS__) \
})
/*
* BPF_CORE_READ_STR_INTO() does same "pointer chasing" as
* BPF_CORE_READ() for intermediate pointers, but then executes (and returns
* corresponding error code) bpf_core_read_str() for final string read.
*/
#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) \
({ \
___core_read(bpf_core_read_str, dst, src, a, ##__VA_ARGS__) \
})
/*
* BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially
* when there are few pointer chasing steps.
* E.g., what in non-BPF world (or in BPF w/ BCC) would be something like:
* int x = s->a.b.c->d.e->f->g;
* can be succinctly achieved using BPF_CORE_READ as:
* int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
*
* BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
* CO-RE relocatable bpf_probe_read() wrapper) calls, logically equivalent to:
* 1. const void *__t = s->a.b.c;
* 2. __t = __t->d.e;
* 3. __t = __t->f;
* 4. return __t->g;
*
* Equivalence is logical, because there is a heavy type casting/preservation
* involved, as well as all the reads are happening through bpf_probe_read()
* calls using __builtin_preserve_access_index() to emit CO-RE relocations.
*
* N.B. Only up to 9 "field accessors" are supported, which should be more
* than enough for any practical purpose.
*/
#define BPF_CORE_READ(src, a, ...) \
({ \
___type(src, a, ##__VA_ARGS__) __r; \
BPF_CORE_READ_INTO(&__r, src, a, ##__VA_ARGS__); \
__r; \
})
#endif

View File

@ -0,0 +1,41 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_HELPERS__
#define __BPF_HELPERS__
#include "bpf_helper_defs.h"
#define __uint(name, val) int (*name)[val]
#define __type(name, val) typeof(val) *name
/* Helper macro to print out debug messages */
#define bpf_printk(fmt, ...) \
({ \
char ____fmt[] = fmt; \
bpf_trace_printk(____fmt, sizeof(____fmt), \
##__VA_ARGS__); \
})
/*
* Helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
* are interpreted by elf_bpf loader
*/
#define SEC(NAME) __attribute__((section(NAME), used))
#ifndef __always_inline
#define __always_inline __attribute__((always_inline))
#endif
/*
* Helper structure used by eBPF C program
* to describe BPF map attributes to libbpf loader
*/
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
unsigned int map_flags;
};
#endif

195
tools/lib/bpf/bpf_tracing.h Normal file
View File

@ -0,0 +1,195 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_TRACING_H__
#define __BPF_TRACING_H__
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86)
#define bpf_target_x86
#define bpf_target_defined
#elif defined(__TARGET_ARCH_s390)
#define bpf_target_s390
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm)
#define bpf_target_arm
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm64)
#define bpf_target_arm64
#define bpf_target_defined
#elif defined(__TARGET_ARCH_mips)
#define bpf_target_mips
#define bpf_target_defined
#elif defined(__TARGET_ARCH_powerpc)
#define bpf_target_powerpc
#define bpf_target_defined
#elif defined(__TARGET_ARCH_sparc)
#define bpf_target_sparc
#define bpf_target_defined
#else
#undef bpf_target_defined
#endif
/* Fall back to what the compiler says */
#ifndef bpf_target_defined
#if defined(__x86_64__)
#define bpf_target_x86
#elif defined(__s390__)
#define bpf_target_s390
#elif defined(__arm__)
#define bpf_target_arm
#elif defined(__aarch64__)
#define bpf_target_arm64
#elif defined(__mips__)
#define bpf_target_mips
#elif defined(__powerpc__)
#define bpf_target_powerpc
#elif defined(__sparc__)
#define bpf_target_sparc
#endif
#endif
#if defined(bpf_target_x86)
#ifdef __KERNEL__
#define PT_REGS_PARM1(x) ((x)->di)
#define PT_REGS_PARM2(x) ((x)->si)
#define PT_REGS_PARM3(x) ((x)->dx)
#define PT_REGS_PARM4(x) ((x)->cx)
#define PT_REGS_PARM5(x) ((x)->r8)
#define PT_REGS_RET(x) ((x)->sp)
#define PT_REGS_FP(x) ((x)->bp)
#define PT_REGS_RC(x) ((x)->ax)
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->ip)
#else
#ifdef __i386__
/* i386 kernel is built with -mregparm=3 */
#define PT_REGS_PARM1(x) ((x)->eax)
#define PT_REGS_PARM2(x) ((x)->edx)
#define PT_REGS_PARM3(x) ((x)->ecx)
#define PT_REGS_PARM4(x) 0
#define PT_REGS_PARM5(x) 0
#define PT_REGS_RET(x) ((x)->esp)
#define PT_REGS_FP(x) ((x)->ebp)
#define PT_REGS_RC(x) ((x)->eax)
#define PT_REGS_SP(x) ((x)->esp)
#define PT_REGS_IP(x) ((x)->eip)
#else
#define PT_REGS_PARM1(x) ((x)->rdi)
#define PT_REGS_PARM2(x) ((x)->rsi)
#define PT_REGS_PARM3(x) ((x)->rdx)
#define PT_REGS_PARM4(x) ((x)->rcx)
#define PT_REGS_PARM5(x) ((x)->r8)
#define PT_REGS_RET(x) ((x)->rsp)
#define PT_REGS_FP(x) ((x)->rbp)
#define PT_REGS_RC(x) ((x)->rax)
#define PT_REGS_SP(x) ((x)->rsp)
#define PT_REGS_IP(x) ((x)->rip)
#endif
#endif
#elif defined(bpf_target_s390)
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
struct pt_regs;
#define PT_REGS_S390 const volatile user_pt_regs
#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
/* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
#elif defined(bpf_target_arm)
#define PT_REGS_PARM1(x) ((x)->uregs[0])
#define PT_REGS_PARM2(x) ((x)->uregs[1])
#define PT_REGS_PARM3(x) ((x)->uregs[2])
#define PT_REGS_PARM4(x) ((x)->uregs[3])
#define PT_REGS_PARM5(x) ((x)->uregs[4])
#define PT_REGS_RET(x) ((x)->uregs[14])
#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->uregs[0])
#define PT_REGS_SP(x) ((x)->uregs[13])
#define PT_REGS_IP(x) ((x)->uregs[12])
#elif defined(bpf_target_arm64)
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
struct pt_regs;
#define PT_REGS_ARM64 const volatile struct user_pt_regs
#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
/* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
#elif defined(bpf_target_mips)
#define PT_REGS_PARM1(x) ((x)->regs[4])
#define PT_REGS_PARM2(x) ((x)->regs[5])
#define PT_REGS_PARM3(x) ((x)->regs[6])
#define PT_REGS_PARM4(x) ((x)->regs[7])
#define PT_REGS_PARM5(x) ((x)->regs[8])
#define PT_REGS_RET(x) ((x)->regs[31])
#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->regs[1])
#define PT_REGS_SP(x) ((x)->regs[29])
#define PT_REGS_IP(x) ((x)->cp0_epc)
#elif defined(bpf_target_powerpc)
#define PT_REGS_PARM1(x) ((x)->gpr[3])
#define PT_REGS_PARM2(x) ((x)->gpr[4])
#define PT_REGS_PARM3(x) ((x)->gpr[5])
#define PT_REGS_PARM4(x) ((x)->gpr[6])
#define PT_REGS_PARM5(x) ((x)->gpr[7])
#define PT_REGS_RC(x) ((x)->gpr[3])
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->nip)
#elif defined(bpf_target_sparc)
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
/* Should this also be a bpf_target check for the sparc case? */
#if defined(__arch64__)
#define PT_REGS_IP(x) ((x)->tpc)
#else
#define PT_REGS_IP(x) ((x)->pc)
#endif
#endif
#if defined(bpf_target_powerpc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#elif defined(bpf_target_sparc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#else
#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
({ bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
({ bpf_probe_read(&(ip), sizeof(ip), \
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
#endif

View File

@ -876,7 +876,6 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
__u16 vlen = btf_vlen(t);
packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
align = packed ? 1 : btf_align_of(d->btf, id);
btf_dump_printf(d, "%s%s%s {",
is_struct ? "struct" : "union",
@ -906,6 +905,13 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
btf_dump_printf(d, ";");
}
/* pad at the end, if necessary */
if (is_struct) {
align = packed ? 1 : btf_align_of(d->btf, id);
btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align,
lvl + 1);
}
if (vlen)
btf_dump_printf(d, "\n");
btf_dump_printf(d, "%s}", pfx(lvl));
@ -969,6 +975,17 @@ static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
{
const char *name = btf_dump_ident_name(d, id);
/*
* Old GCC versions are emitting invalid typedef for __gnuc_va_list
* pointing to VOID. This generates warnings from btf_dump() and
* results in uncompilable header file, so we are fixing it up here
* with valid typedef into __builtin_va_list.
*/
if (t->type == 0 && strcmp(name, "__gnuc_va_list") == 0) {
btf_dump_printf(d, "typedef __builtin_va_list __gnuc_va_list");
return;
}
btf_dump_printf(d, "typedef ");
btf_dump_emit_type_decl(d, t->type, name, lvl);
}

View File

@ -33,6 +33,7 @@
#include <linux/limits.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
#include <linux/version.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@ -255,7 +256,7 @@ struct bpf_object {
*/
struct {
int fd;
void *obj_buf;
const void *obj_buf;
size_t obj_buf_sz;
Elf *elf;
GElf_Ehdr ehdr;
@ -491,9 +492,21 @@ bpf_object__init_prog_names(struct bpf_object *obj)
return 0;
}
static __u32 get_kernel_version(void)
{
__u32 major, minor, patch;
struct utsname info;
uname(&info);
if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
return 0;
return KERNEL_VERSION(major, minor, patch);
}
static struct bpf_object *bpf_object__new(const char *path,
void *obj_buf,
size_t obj_buf_sz)
const void *obj_buf,
size_t obj_buf_sz,
const char *obj_name)
{
struct bpf_object *obj;
char *end;
@ -505,11 +518,17 @@ static struct bpf_object *bpf_object__new(const char *path,
}
strcpy(obj->path, path);
/* Using basename() GNU version which doesn't modify arg. */
strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1);
end = strchr(obj->name, '.');
if (end)
*end = 0;
if (obj_name) {
strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
obj->name[sizeof(obj->name) - 1] = 0;
} else {
/* Using basename() GNU version which doesn't modify arg. */
strncpy(obj->name, basename((void *)path),
sizeof(obj->name) - 1);
end = strchr(obj->name, '.');
if (end)
*end = 0;
}
obj->efile.fd = -1;
/*
@ -526,6 +545,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.rodata_shndx = -1;
obj->efile.bss_shndx = -1;
obj->kern_version = get_kernel_version();
obj->loaded = false;
INIT_LIST_HEAD(&obj->list);
@ -569,7 +589,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
* obj_buf should have been validated by
* bpf_object__open_buffer().
*/
obj->efile.elf = elf_memory(obj->efile.obj_buf,
obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
obj->efile.obj_buf_sz);
} else {
obj->efile.fd = open(obj->path, O_RDONLY);
@ -636,21 +656,6 @@ bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
return 0;
}
static int
bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
{
__u32 kver;
if (size != sizeof(kver)) {
pr_warning("invalid kver section in %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
memcpy(&kver, data, sizeof(kver));
obj->kern_version = kver;
pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
return 0;
}
static int compare_bpf_map(const void *_a, const void *_b)
{
const struct bpf_map *a = _a;
@ -1568,11 +1573,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
if (err)
return err;
} else if (strcmp(name, "version") == 0) {
err = bpf_object__init_kversion(obj,
data->d_buf,
data->d_size);
if (err)
return err;
/* skip, we don't need it anymore */
} else if (strcmp(name, "maps") == 0) {
obj->efile.maps_shndx = idx;
} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
@ -3551,54 +3552,9 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
return 0;
}
static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
{
switch (type) {
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_CGROUP_DEVICE:
case BPF_PROG_TYPE_SK_MSG:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_LIRC_MODE2:
case BPF_PROG_TYPE_SK_REUSEPORT:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_UNSPEC:
case BPF_PROG_TYPE_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
case BPF_PROG_TYPE_PERF_EVENT:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
return false;
case BPF_PROG_TYPE_KPROBE:
default:
return true;
}
}
static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
{
if (needs_kver && obj->kern_version == 0) {
pr_warning("%s doesn't provide kernel version\n",
obj->path);
return -LIBBPF_ERRNO__KVERSION;
}
return 0;
}
static struct bpf_object *
__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
bool needs_kver, int flags)
__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
const char *obj_name, int flags)
{
struct bpf_object *obj;
int err;
@ -3608,7 +3564,7 @@ __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
}
obj = bpf_object__new(path, obj_buf, obj_buf_sz);
obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
if (IS_ERR(obj))
return obj;
@ -3617,7 +3573,6 @@ __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
CHECK_ERR(bpf_object__probe_caps(obj), err, out);
CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
bpf_object__elf_finish(obj);
return obj;
@ -3626,8 +3581,8 @@ out:
return ERR_PTR(err);
}
struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
int flags)
static struct bpf_object *
__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
{
/* param validation */
if (!attr->file)
@ -3635,9 +3590,7 @@ struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
pr_debug("loading %s\n", attr->file);
return __bpf_object__open(attr->file, NULL, 0,
bpf_prog_type__needs_kver(attr->prog_type),
flags);
return __bpf_object__open(attr->file, NULL, 0, NULL, flags);
}
struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
@ -3655,25 +3608,67 @@ struct bpf_object *bpf_object__open(const char *path)
return bpf_object__open_xattr(&attr);
}
struct bpf_object *bpf_object__open_buffer(void *obj_buf,
size_t obj_buf_sz,
const char *name)
struct bpf_object *
bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts)
{
const char *obj_name;
bool relaxed_maps;
if (!OPTS_VALID(opts, bpf_object_open_opts))
return ERR_PTR(-EINVAL);
if (!path)
return ERR_PTR(-EINVAL);
pr_debug("loading %s\n", path);
obj_name = OPTS_GET(opts, object_name, path);
relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
return __bpf_object__open(path, NULL, 0, obj_name,
relaxed_maps ? MAPS_RELAX_COMPAT : 0);
}
struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
struct bpf_object_open_opts *opts)
{
char tmp_name[64];
const char *obj_name;
bool relaxed_maps;
/* param validation */
if (!obj_buf || obj_buf_sz <= 0)
return NULL;
if (!OPTS_VALID(opts, bpf_object_open_opts))
return ERR_PTR(-EINVAL);
if (!obj_buf || obj_buf_sz == 0)
return ERR_PTR(-EINVAL);
if (!name) {
obj_name = OPTS_GET(opts, object_name, NULL);
if (!obj_name) {
snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
(unsigned long)obj_buf,
(unsigned long)obj_buf_sz);
name = tmp_name;
obj_name = tmp_name;
}
pr_debug("loading object '%s' from buffer\n", name);
pr_debug("loading object '%s' from buffer\n", obj_name);
return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
relaxed_maps = OPTS_GET(opts, relaxed_maps, false);
return __bpf_object__open(obj_name, obj_buf, obj_buf_sz, obj_name,
relaxed_maps ? MAPS_RELAX_COMPAT : 0);
}
struct bpf_object *
bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
const char *name)
{
LIBBPF_OPTS(bpf_object_open_opts, opts,
.object_name = name,
/* wrong default, but backwards-compatible */
.relaxed_maps = true,
);
/* returning NULL is wrong, but backwards-compatible */
if (!obj_buf || obj_buf_sz == 0)
return NULL;
return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
}
int bpf_object__unload(struct bpf_object *obj)
@ -4236,7 +4231,7 @@ bpf_object__next(struct bpf_object *prev)
const char *bpf_object__name(const struct bpf_object *obj)
{
return obj ? obj->path : ERR_PTR(-EINVAL);
return obj ? obj->name : ERR_PTR(-EINVAL);
}
unsigned int bpf_object__kversion(const struct bpf_object *obj)

View File

@ -67,14 +67,52 @@ struct bpf_object_open_attr {
enum bpf_prog_type prog_type;
};
/* Helper macro to declare and initialize libbpf options struct
*
* This dance with uninitialized declaration, followed by memset to zero,
* followed by assignment using compound literal syntax is done to preserve
* ability to use a nice struct field initialization syntax and **hopefully**
* have all the padding bytes initialized to zero. It's not guaranteed though,
* when copying literal, that compiler won't copy garbage in literal's padding
* bytes, but that's the best way I've found and it seems to work in practice.
*/
#define LIBBPF_OPTS(TYPE, NAME, ...) \
struct TYPE NAME; \
memset(&NAME, 0, sizeof(struct TYPE)); \
NAME = (struct TYPE) { \
.sz = sizeof(struct TYPE), \
__VA_ARGS__ \
}
struct bpf_object_open_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
/* object name override, if provided:
* - for object open from file, this will override setting object
* name from file path's base name;
* - for object open from memory buffer, this will specify an object
* name and will override default "<addr>-<buf-size>" name;
*/
const char *object_name;
/* parse map definitions non-strictly, allowing extra attributes/data */
bool relaxed_maps;
};
#define bpf_object_open_opts__last_field relaxed_maps
LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
LIBBPF_API struct bpf_object *
bpf_object__open_file(const char *path, struct bpf_object_open_opts *opts);
LIBBPF_API struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
struct bpf_object_open_opts *opts);
/* deprecated bpf_object__open variants */
LIBBPF_API struct bpf_object *
bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
const char *name);
LIBBPF_API struct bpf_object *
bpf_object__open_xattr(struct bpf_object_open_attr *attr);
struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
int flags);
LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf,
size_t obj_buf_sz,
const char *name);
int bpf_object__section_size(const struct bpf_object *obj, const char *name,
__u32 *size);
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,

View File

@ -190,3 +190,9 @@ LIBBPF_0.0.5 {
global:
bpf_btf_get_next_id;
} LIBBPF_0.0.4;
LIBBPF_0.0.6 {
global:
bpf_object__open_file;
bpf_object__open_mem;
} LIBBPF_0.0.5;

View File

@ -47,6 +47,38 @@ do { \
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
static inline bool libbpf_validate_opts(const char *opts,
size_t opts_sz, size_t user_sz,
const char *type_name)
{
if (user_sz < sizeof(size_t)) {
pr_warning("%s size (%zu) is too small\n", type_name, user_sz);
return false;
}
if (user_sz > opts_sz) {
size_t i;
for (i = opts_sz; i < user_sz; i++) {
if (opts[i]) {
pr_warning("%s has non-zero extra bytes",
type_name);
return false;
}
}
}
return true;
}
#define OPTS_VALID(opts, type) \
(!(opts) || libbpf_validate_opts((const char *)opts, \
offsetofend(struct type, \
type##__last_field), \
(opts)->sz, #type))
#define OPTS_HAS(opts, field) \
((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
#define OPTS_GET(opts, field, fallback_value) \
(OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);

View File

@ -7,12 +7,14 @@
int main(int argc, char *argv[])
{
/* libbpf.h */
libbpf_set_print(NULL);
/* libbpf.h */
libbpf_set_print(NULL);
/* bpf.h */
bpf_prog_get_fd_by_id(0);
/* bpf.h */
bpf_prog_get_fd_by_id(0);
/* btf.h */
btf__new(NULL, 0);
/* btf.h */
btf__new(NULL, 0);
return 0;
}

View File

@ -163,6 +163,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
umem->umem_area = umem_area;
xsk_set_umem_config(&umem->config, usr_config);
memset(&mr, 0, sizeof(mr));
mr.addr = (uintptr_t)umem_area;
mr.len = size;
mr.chunk_size = umem->config.frame_size;

View File

@ -15,8 +15,6 @@ endif
CLANG ?= clang
LLC ?= llc
LLVM_OBJCOPY ?= llvm-objcopy
LLVM_READELF ?= llvm-readelf
BTF_PAHOLE ?= pahole
BPF_GCC ?= $(shell command -v bpf-gcc;)
CFLAGS += -g -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include \
-Dbpf_prog_load=bpf_prog_test_load \
@ -29,7 +27,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage test_select_reuseport test_section_names \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
test_btf_dump test_cgroup_attach xdping
test_cgroup_attach xdping
BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
TEST_GEN_FILES = $(BPF_OBJ_FILES)
@ -126,16 +124,6 @@ force:
$(BPFOBJ): force
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
# Let newer LLVM versions transparently probe the kernel for availability
# of full BPF instruction set.
ifeq ($(PROBE),)
CPU ?= probe
else
CPU ?= generic
endif
# Get Clang's default includes on this system, as opposed to those seen by
# '-target bpf'. This fixes "missing" files on some architectures/distros,
# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
@ -147,8 +135,9 @@ $(shell $(1) -v -E - </dev/null 2>&1 \
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
endef
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
BPF_CFLAGS = -I. -I./include/uapi -I../../../include/uapi \
-I$(OUTPUT)/../usr/include -D__TARGET_ARCH_$(SRCARCH)
BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \
-I. -I./include/uapi -I../../../include/uapi \
-I$(BPFDIR) -I$(OUTPUT)/../usr/include
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
-Wno-compare-distinct-pointer-types
@ -162,28 +151,6 @@ $(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
$(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
$(OUTPUT)/test_progs.o: flow_dissector_load.h
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
$(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
$(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
/bin/rm -f ./llvm_btf_verify.o)
ifneq ($(BTF_LLVM_PROBE),)
BPF_CFLAGS += -g
else
ifneq ($(BTF_LLC_PROBE),)
ifneq ($(BTF_PAHOLE_PROBE),)
ifneq ($(BTF_OBJCOPY_PROBE),)
BPF_CFLAGS += -g
LLC_FLAGS += -mattr=dwarfris
DWARF2BTF = y
endif
endif
endif
endif
TEST_PROGS_CFLAGS := -I. -I$(OUTPUT)
TEST_MAPS_CFLAGS := -I. -I$(OUTPUT)
TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier
@ -212,11 +179,8 @@ $(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR)/test_progs_32 \
| $(ALU32_BUILD_DIR)
($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -target bpf -emit-llvm \
-c $< -o - || echo "clang failed") | \
$(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
$(LLC) -march=bpf -mcpu=probe -mattr=+alu32 $(LLC_FLAGS) \
-filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
endif
ifneq ($(BPF_GCC),)
@ -251,18 +215,13 @@ endif
$(OUTPUT)/test_xdp.o: progs/test_xdp.c
($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -emit-llvm -c $< -o - || \
echo "clang failed") | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
$(LLC) -march=bpf -mcpu=probe $(LLC_FLAGS) -filetype=obj -o $@
$(OUTPUT)/%.o: progs/%.c
# libbpf has to be built before BPF programs due to bpf_helper_defs.h
$(OUTPUT)/%.o: progs/%.c | $(BPFOBJ)
($(CLANG) $(BPF_CFLAGS) $(CLANG_CFLAGS) -O2 -target bpf -emit-llvm \
-c $< -o - || echo "clang failed") | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
$(LLC) -march=bpf -mcpu=probe $(LLC_FLAGS) -filetype=obj -o $@
PROG_TESTS_DIR = $(OUTPUT)/prog_tests
$(PROG_TESTS_DIR):
@ -271,7 +230,7 @@ PROG_TESTS_H := $(PROG_TESTS_DIR)/tests.h
PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
test_progs.c: $(PROG_TESTS_H)
$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
$(OUTPUT)/test_progs: test_progs.c $(PROG_TESTS_FILES) | $(PROG_TESTS_H)
$(OUTPUT)/test_progs: test_progs.c $(PROG_TESTS_FILES) | $(OUTPUT)/test_attach_probe.o $(PROG_TESTS_H)
$(PROG_TESTS_H): $(PROG_TESTS_FILES) | $(PROG_TESTS_DIR)
$(shell ( cd prog_tests/; \
echo '/* Generated header, do not edit */'; \

View File

@ -1,535 +0,0 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_HELPERS__
#define __BPF_HELPERS__
#define __uint(name, val) int (*name)[val]
#define __type(name, val) val *name
/* helper macro to print out debug messages */
#define bpf_printk(fmt, ...) \
({ \
char ____fmt[] = fmt; \
bpf_trace_printk(____fmt, sizeof(____fmt), \
##__VA_ARGS__); \
})
#ifdef __clang__
/* helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
* are interpreted by elf_bpf loader
*/
#define SEC(NAME) __attribute__((section(NAME), used))
/* helper functions called from eBPF programs written in C */
static void *(*bpf_map_lookup_elem)(void *map, const void *key) =
(void *) BPF_FUNC_map_lookup_elem;
static int (*bpf_map_update_elem)(void *map, const void *key, const void *value,
unsigned long long flags) =
(void *) BPF_FUNC_map_update_elem;
static int (*bpf_map_delete_elem)(void *map, const void *key) =
(void *) BPF_FUNC_map_delete_elem;
static int (*bpf_map_push_elem)(void *map, const void *value,
unsigned long long flags) =
(void *) BPF_FUNC_map_push_elem;
static int (*bpf_map_pop_elem)(void *map, void *value) =
(void *) BPF_FUNC_map_pop_elem;
static int (*bpf_map_peek_elem)(void *map, void *value) =
(void *) BPF_FUNC_map_peek_elem;
static int (*bpf_probe_read)(void *dst, int size, const void *unsafe_ptr) =
(void *) BPF_FUNC_probe_read;
static unsigned long long (*bpf_ktime_get_ns)(void) =
(void *) BPF_FUNC_ktime_get_ns;
static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
(void *) BPF_FUNC_trace_printk;
static void (*bpf_tail_call)(void *ctx, void *map, int index) =
(void *) BPF_FUNC_tail_call;
static unsigned long long (*bpf_get_smp_processor_id)(void) =
(void *) BPF_FUNC_get_smp_processor_id;
static unsigned long long (*bpf_get_current_pid_tgid)(void) =
(void *) BPF_FUNC_get_current_pid_tgid;
static unsigned long long (*bpf_get_current_uid_gid)(void) =
(void *) BPF_FUNC_get_current_uid_gid;
static int (*bpf_get_current_comm)(void *buf, int buf_size) =
(void *) BPF_FUNC_get_current_comm;
static unsigned long long (*bpf_perf_event_read)(void *map,
unsigned long long flags) =
(void *) BPF_FUNC_perf_event_read;
static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
(void *) BPF_FUNC_clone_redirect;
static int (*bpf_redirect)(int ifindex, int flags) =
(void *) BPF_FUNC_redirect;
static int (*bpf_redirect_map)(void *map, int key, int flags) =
(void *) BPF_FUNC_redirect_map;
static int (*bpf_perf_event_output)(void *ctx, void *map,
unsigned long long flags, void *data,
int size) =
(void *) BPF_FUNC_perf_event_output;
static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
(void *) BPF_FUNC_get_stackid;
static int (*bpf_probe_write_user)(void *dst, const void *src, int size) =
(void *) BPF_FUNC_probe_write_user;
static int (*bpf_current_task_under_cgroup)(void *map, int index) =
(void *) BPF_FUNC_current_task_under_cgroup;
static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
(void *) BPF_FUNC_skb_get_tunnel_key;
static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
(void *) BPF_FUNC_skb_set_tunnel_key;
static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
(void *) BPF_FUNC_skb_get_tunnel_opt;
static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
(void *) BPF_FUNC_skb_set_tunnel_opt;
static unsigned long long (*bpf_get_prandom_u32)(void) =
(void *) BPF_FUNC_get_prandom_u32;
static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
(void *) BPF_FUNC_xdp_adjust_head;
static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
(void *) BPF_FUNC_xdp_adjust_meta;
static int (*bpf_get_socket_cookie)(void *ctx) =
(void *) BPF_FUNC_get_socket_cookie;
static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) =
(void *) BPF_FUNC_setsockopt;
static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) =
(void *) BPF_FUNC_getsockopt;
static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) =
(void *) BPF_FUNC_sock_ops_cb_flags_set;
static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
(void *) BPF_FUNC_sk_redirect_map;
static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) =
(void *) BPF_FUNC_sk_redirect_hash;
static int (*bpf_sock_map_update)(void *map, void *key, void *value,
unsigned long long flags) =
(void *) BPF_FUNC_sock_map_update;
static int (*bpf_sock_hash_update)(void *map, void *key, void *value,
unsigned long long flags) =
(void *) BPF_FUNC_sock_hash_update;
static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
void *buf, unsigned int buf_size) =
(void *) BPF_FUNC_perf_event_read_value;
static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
unsigned int buf_size) =
(void *) BPF_FUNC_perf_prog_read_value;
static int (*bpf_override_return)(void *ctx, unsigned long rc) =
(void *) BPF_FUNC_override_return;
static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) =
(void *) BPF_FUNC_msg_redirect_map;
static int (*bpf_msg_redirect_hash)(void *ctx,
void *map, void *key, int flags) =
(void *) BPF_FUNC_msg_redirect_hash;
static int (*bpf_msg_apply_bytes)(void *ctx, int len) =
(void *) BPF_FUNC_msg_apply_bytes;
static int (*bpf_msg_cork_bytes)(void *ctx, int len) =
(void *) BPF_FUNC_msg_cork_bytes;
static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) =
(void *) BPF_FUNC_msg_pull_data;
static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) =
(void *) BPF_FUNC_msg_push_data;
static int (*bpf_msg_pop_data)(void *ctx, int start, int cut, int flags) =
(void *) BPF_FUNC_msg_pop_data;
static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
(void *) BPF_FUNC_bind;
static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
(void *) BPF_FUNC_xdp_adjust_tail;
static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state,
int size, int flags) =
(void *) BPF_FUNC_skb_get_xfrm_state;
static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) =
(void *) BPF_FUNC_sk_select_reuseport;
static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) =
(void *) BPF_FUNC_get_stack;
static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params,
int plen, __u32 flags) =
(void *) BPF_FUNC_fib_lookup;
static int (*bpf_lwt_push_encap)(void *ctx, unsigned int type, void *hdr,
unsigned int len) =
(void *) BPF_FUNC_lwt_push_encap;
static int (*bpf_lwt_seg6_store_bytes)(void *ctx, unsigned int offset,
void *from, unsigned int len) =
(void *) BPF_FUNC_lwt_seg6_store_bytes;
static int (*bpf_lwt_seg6_action)(void *ctx, unsigned int action, void *param,
unsigned int param_len) =
(void *) BPF_FUNC_lwt_seg6_action;
static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, unsigned int offset,
unsigned int len) =
(void *) BPF_FUNC_lwt_seg6_adjust_srh;
static int (*bpf_rc_repeat)(void *ctx) =
(void *) BPF_FUNC_rc_repeat;
static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol,
unsigned long long scancode, unsigned int toggle) =
(void *) BPF_FUNC_rc_keydown;
static unsigned long long (*bpf_get_current_cgroup_id)(void) =
(void *) BPF_FUNC_get_current_cgroup_id;
static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) =
(void *) BPF_FUNC_get_local_storage;
static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
(void *) BPF_FUNC_skb_cgroup_id;
static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
(void *) BPF_FUNC_skb_ancestor_cgroup_id;
static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_tcp;
static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_skc_lookup_tcp;
static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_udp;
static int (*bpf_sk_release)(struct bpf_sock *sk) =
(void *) BPF_FUNC_sk_release;
static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) =
(void *) BPF_FUNC_skb_vlan_push;
static int (*bpf_skb_vlan_pop)(void *ctx) =
(void *) BPF_FUNC_skb_vlan_pop;
static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
(void *) BPF_FUNC_rc_pointer_rel;
static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
(void *) BPF_FUNC_spin_lock;
static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
(void *) BPF_FUNC_spin_unlock;
static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
(void *) BPF_FUNC_sk_fullsock;
static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
(void *) BPF_FUNC_tcp_sock;
static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
(void *) BPF_FUNC_get_listener_sock;
static int (*bpf_skb_ecn_set_ce)(void *ctx) =
(void *) BPF_FUNC_skb_ecn_set_ce;
static int (*bpf_tcp_check_syncookie)(struct bpf_sock *sk,
void *ip, int ip_len, void *tcp, int tcp_len) =
(void *) BPF_FUNC_tcp_check_syncookie;
static int (*bpf_sysctl_get_name)(void *ctx, char *buf,
unsigned long long buf_len,
unsigned long long flags) =
(void *) BPF_FUNC_sysctl_get_name;
static int (*bpf_sysctl_get_current_value)(void *ctx, char *buf,
unsigned long long buf_len) =
(void *) BPF_FUNC_sysctl_get_current_value;
static int (*bpf_sysctl_get_new_value)(void *ctx, char *buf,
unsigned long long buf_len) =
(void *) BPF_FUNC_sysctl_get_new_value;
static int (*bpf_sysctl_set_new_value)(void *ctx, const char *buf,
unsigned long long buf_len) =
(void *) BPF_FUNC_sysctl_set_new_value;
static int (*bpf_strtol)(const char *buf, unsigned long long buf_len,
unsigned long long flags, long *res) =
(void *) BPF_FUNC_strtol;
static int (*bpf_strtoul)(const char *buf, unsigned long long buf_len,
unsigned long long flags, unsigned long *res) =
(void *) BPF_FUNC_strtoul;
static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk,
void *value, __u64 flags) =
(void *) BPF_FUNC_sk_storage_get;
static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) =
(void *)BPF_FUNC_sk_storage_delete;
static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal;
static long long (*bpf_tcp_gen_syncookie)(struct bpf_sock *sk, void *ip,
int ip_len, void *tcp, int tcp_len) =
(void *) BPF_FUNC_tcp_gen_syncookie;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
*/
struct sk_buff;
unsigned long long load_byte(void *skb,
unsigned long long off) asm("llvm.bpf.load.byte");
unsigned long long load_half(void *skb,
unsigned long long off) asm("llvm.bpf.load.half");
unsigned long long load_word(void *skb,
unsigned long long off) asm("llvm.bpf.load.word");
/* a helper structure used by eBPF C program
* to describe map attributes to elf_bpf loader
*/
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
unsigned int map_flags;
unsigned int inner_map_idx;
unsigned int numa_node;
};
#else
#include <bpf-helpers.h>
#endif
#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
struct ____btf_map_##name { \
type_key key; \
type_val value; \
}; \
struct ____btf_map_##name \
__attribute__ ((section(".maps." #name), used)) \
____btf_map_##name = { }
static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
(void *) BPF_FUNC_skb_load_bytes;
static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) =
(void *) BPF_FUNC_skb_load_bytes_relative;
static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
(void *) BPF_FUNC_skb_store_bytes;
static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
(void *) BPF_FUNC_l3_csum_replace;
static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
(void *) BPF_FUNC_l4_csum_replace;
static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) =
(void *) BPF_FUNC_csum_diff;
static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
(void *) BPF_FUNC_skb_under_cgroup;
static int (*bpf_skb_change_head)(void *, int len, int flags) =
(void *) BPF_FUNC_skb_change_head;
static int (*bpf_skb_pull_data)(void *, int len) =
(void *) BPF_FUNC_skb_pull_data;
static unsigned int (*bpf_get_cgroup_classid)(void *ctx) =
(void *) BPF_FUNC_get_cgroup_classid;
static unsigned int (*bpf_get_route_realm)(void *ctx) =
(void *) BPF_FUNC_get_route_realm;
static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) =
(void *) BPF_FUNC_skb_change_proto;
static int (*bpf_skb_change_type)(void *ctx, __u32 type) =
(void *) BPF_FUNC_skb_change_type;
static unsigned int (*bpf_get_hash_recalc)(void *ctx) =
(void *) BPF_FUNC_get_hash_recalc;
static unsigned long long (*bpf_get_current_task)(void) =
(void *) BPF_FUNC_get_current_task;
static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) =
(void *) BPF_FUNC_skb_change_tail;
static long long (*bpf_csum_update)(void *ctx, __u32 csum) =
(void *) BPF_FUNC_csum_update;
static void (*bpf_set_hash_invalid)(void *ctx) =
(void *) BPF_FUNC_set_hash_invalid;
static int (*bpf_get_numa_node_id)(void) =
(void *) BPF_FUNC_get_numa_node_id;
static int (*bpf_probe_read_str)(void *ctx, __u32 size,
const void *unsafe_ptr) =
(void *) BPF_FUNC_probe_read_str;
static unsigned int (*bpf_get_socket_uid)(void *ctx) =
(void *) BPF_FUNC_get_socket_uid;
static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) =
(void *) BPF_FUNC_set_hash;
static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
unsigned long long flags) =
(void *) BPF_FUNC_skb_adjust_room;
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86)
#define bpf_target_x86
#define bpf_target_defined
#elif defined(__TARGET_ARCH_s390)
#define bpf_target_s390
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm)
#define bpf_target_arm
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm64)
#define bpf_target_arm64
#define bpf_target_defined
#elif defined(__TARGET_ARCH_mips)
#define bpf_target_mips
#define bpf_target_defined
#elif defined(__TARGET_ARCH_powerpc)
#define bpf_target_powerpc
#define bpf_target_defined
#elif defined(__TARGET_ARCH_sparc)
#define bpf_target_sparc
#define bpf_target_defined
#else
#undef bpf_target_defined
#endif
/* Fall back to what the compiler says */
#ifndef bpf_target_defined
#if defined(__x86_64__)
#define bpf_target_x86
#elif defined(__s390__)
#define bpf_target_s390
#elif defined(__arm__)
#define bpf_target_arm
#elif defined(__aarch64__)
#define bpf_target_arm64
#elif defined(__mips__)
#define bpf_target_mips
#elif defined(__powerpc__)
#define bpf_target_powerpc
#elif defined(__sparc__)
#define bpf_target_sparc
#endif
#endif
#if defined(bpf_target_x86)
#ifdef __KERNEL__
#define PT_REGS_PARM1(x) ((x)->di)
#define PT_REGS_PARM2(x) ((x)->si)
#define PT_REGS_PARM3(x) ((x)->dx)
#define PT_REGS_PARM4(x) ((x)->cx)
#define PT_REGS_PARM5(x) ((x)->r8)
#define PT_REGS_RET(x) ((x)->sp)
#define PT_REGS_FP(x) ((x)->bp)
#define PT_REGS_RC(x) ((x)->ax)
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->ip)
#else
#ifdef __i386__
/* i386 kernel is built with -mregparm=3 */
#define PT_REGS_PARM1(x) ((x)->eax)
#define PT_REGS_PARM2(x) ((x)->edx)
#define PT_REGS_PARM3(x) ((x)->ecx)
#define PT_REGS_PARM4(x) 0
#define PT_REGS_PARM5(x) 0
#define PT_REGS_RET(x) ((x)->esp)
#define PT_REGS_FP(x) ((x)->ebp)
#define PT_REGS_RC(x) ((x)->eax)
#define PT_REGS_SP(x) ((x)->esp)
#define PT_REGS_IP(x) ((x)->eip)
#else
#define PT_REGS_PARM1(x) ((x)->rdi)
#define PT_REGS_PARM2(x) ((x)->rsi)
#define PT_REGS_PARM3(x) ((x)->rdx)
#define PT_REGS_PARM4(x) ((x)->rcx)
#define PT_REGS_PARM5(x) ((x)->r8)
#define PT_REGS_RET(x) ((x)->rsp)
#define PT_REGS_FP(x) ((x)->rbp)
#define PT_REGS_RC(x) ((x)->rax)
#define PT_REGS_SP(x) ((x)->rsp)
#define PT_REGS_IP(x) ((x)->rip)
#endif
#endif
#elif defined(bpf_target_s390)
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
struct pt_regs;
#define PT_REGS_S390 const volatile user_pt_regs
#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
/* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
#elif defined(bpf_target_arm)
#define PT_REGS_PARM1(x) ((x)->uregs[0])
#define PT_REGS_PARM2(x) ((x)->uregs[1])
#define PT_REGS_PARM3(x) ((x)->uregs[2])
#define PT_REGS_PARM4(x) ((x)->uregs[3])
#define PT_REGS_PARM5(x) ((x)->uregs[4])
#define PT_REGS_RET(x) ((x)->uregs[14])
#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->uregs[0])
#define PT_REGS_SP(x) ((x)->uregs[13])
#define PT_REGS_IP(x) ((x)->uregs[12])
#elif defined(bpf_target_arm64)
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
struct pt_regs;
#define PT_REGS_ARM64 const volatile struct user_pt_regs
#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
/* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
#elif defined(bpf_target_mips)
#define PT_REGS_PARM1(x) ((x)->regs[4])
#define PT_REGS_PARM2(x) ((x)->regs[5])
#define PT_REGS_PARM3(x) ((x)->regs[6])
#define PT_REGS_PARM4(x) ((x)->regs[7])
#define PT_REGS_PARM5(x) ((x)->regs[8])
#define PT_REGS_RET(x) ((x)->regs[31])
#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
#define PT_REGS_RC(x) ((x)->regs[1])
#define PT_REGS_SP(x) ((x)->regs[29])
#define PT_REGS_IP(x) ((x)->cp0_epc)
#elif defined(bpf_target_powerpc)
#define PT_REGS_PARM1(x) ((x)->gpr[3])
#define PT_REGS_PARM2(x) ((x)->gpr[4])
#define PT_REGS_PARM3(x) ((x)->gpr[5])
#define PT_REGS_PARM4(x) ((x)->gpr[6])
#define PT_REGS_PARM5(x) ((x)->gpr[7])
#define PT_REGS_RC(x) ((x)->gpr[3])
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->nip)
#elif defined(bpf_target_sparc)
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
/* Should this also be a bpf_target check for the sparc case? */
#if defined(__arch64__)
#define PT_REGS_IP(x) ((x)->tpc)
#else
#define PT_REGS_IP(x) ((x)->pc)
#endif
#endif
#if defined(bpf_target_powerpc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#elif defined(bpf_target_sparc)
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#else
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
bpf_probe_read(&(ip), sizeof(ip), \
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
/*
* BPF_CORE_READ abstracts away bpf_probe_read() call and captures offset
* relocation for source address using __builtin_preserve_access_index()
* built-in, provided by Clang.
*
* __builtin_preserve_access_index() takes as an argument an expression of
* taking an address of a field within struct/union. It makes compiler emit
* a relocation, which records BTF type ID describing root struct/union and an
* accessor string which describes exact embedded field that was used to take
* an address. See detailed description of this relocation format and
* semantics in comments to struct bpf_offset_reloc in libbpf_internal.h.
*
* This relocation allows libbpf to adjust BPF instruction to use correct
* actual field offset, based on target kernel BTF type that matches original
* (local) BTF, used to record relocation.
*/
#define BPF_CORE_READ(dst, src) \
bpf_probe_read((dst), sizeof(*(src)), \
__builtin_preserve_access_index(src))
#endif

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_LEGACY__
#define __BPF_LEGACY__
/*
* legacy bpf_map_def with extra fields supported only by bpf_load(), do not
* use outside of samples/bpf
*/
struct bpf_map_def_legacy {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
unsigned int map_flags;
unsigned int inner_map_idx;
unsigned int numa_node;
};
#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
struct ____btf_map_##name { \
type_key key; \
type_val value; \
}; \
struct ____btf_map_##name \
__attribute__ ((section(".maps." #name), used)) \
____btf_map_##name = { }
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
*/
unsigned long long load_byte(void *skb,
unsigned long long off) asm("llvm.bpf.load.byte");
unsigned long long load_half(void *skb,
unsigned long long off) asm("llvm.bpf.load.half");
unsigned long long load_word(void *skb,
unsigned long long off) asm("llvm.bpf.load.word");
#endif

View File

@ -41,7 +41,7 @@
*
* If successful, 0 is returned.
*/
int enable_all_controllers(char *cgroup_path)
static int enable_all_controllers(char *cgroup_path)
{
char path[PATH_MAX + 1];
char buf[PATH_MAX];
@ -98,7 +98,7 @@ int enable_all_controllers(char *cgroup_path)
*/
int setup_cgroup_environment(void)
{
char cgroup_workdir[PATH_MAX + 1];
char cgroup_workdir[PATH_MAX - 24];
format_cgroup_path(cgroup_workdir, "");

View File

@ -1,6 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#define EMBED_FILE(NAME, PATH) \
asm ( \
" .pushsection \".rodata\", \"a\", @progbits \n" \
" .global "#NAME"_data \n" \
#NAME"_data: \n" \
" .incbin \"" PATH "\" \n" \
#NAME"_data_end: \n" \
" .global "#NAME"_size \n" \
" .type "#NAME"_size, @object \n" \
" .size "#NAME"_size, 4 \n" \
" .align 4, \n" \
#NAME"_size: \n" \
" .int "#NAME"_data_end - "#NAME"_data \n" \
" .popsection \n" \
); \
extern char NAME##_data[]; \
extern int NAME##_size;
ssize_t get_base_addr() {
size_t start;
char buf[256];
@ -21,6 +39,8 @@ ssize_t get_base_addr() {
return -EINVAL;
}
EMBED_FILE(probe, "test_attach_probe.o");
void test_attach_probe(void)
{
const char *kprobe_name = "kprobe/sys_nanosleep";
@ -29,11 +49,15 @@ void test_attach_probe(void)
const char *uretprobe_name = "uretprobe/trigger_func";
const int kprobe_idx = 0, kretprobe_idx = 1;
const int uprobe_idx = 2, uretprobe_idx = 3;
const char *file = "./test_attach_probe.o";
const char *obj_name = "attach_probe";
LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.object_name = obj_name,
.relaxed_maps = true,
);
struct bpf_program *kprobe_prog, *kretprobe_prog;
struct bpf_program *uprobe_prog, *uretprobe_prog;
struct bpf_object *obj;
int err, prog_fd, duration = 0, res;
int err, duration = 0, res;
struct bpf_link *kprobe_link = NULL;
struct bpf_link *kretprobe_link = NULL;
struct bpf_link *uprobe_link = NULL;
@ -48,11 +72,16 @@ void test_attach_probe(void)
return;
uprobe_offset = (size_t)&get_base_addr - base_addr;
/* load programs */
err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
/* open object */
obj = bpf_object__open_mem(probe_data, probe_size, &open_opts);
if (CHECK(IS_ERR(obj), "obj_open_mem", "err %ld\n", PTR_ERR(obj)))
return;
if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
"wrong obj name '%s', expected '%s'\n",
bpf_object__name(obj), obj_name))
goto cleanup;
kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
if (CHECK(!kprobe_prog, "find_probe",
"prog '%s' not found\n", kprobe_name))
@ -70,6 +99,16 @@ void test_attach_probe(void)
"prog '%s' not found\n", uretprobe_name))
goto cleanup;
bpf_program__set_kprobe(kprobe_prog);
bpf_program__set_kprobe(kretprobe_prog);
bpf_program__set_kprobe(uprobe_prog);
bpf_program__set_kprobe(uretprobe_prog);
/* create maps && load programs */
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto cleanup;
/* load maps */
results_map_fd = bpf_find_map(__func__, obj, "results_map");
if (CHECK(results_map_fd < 0, "find_results_map",

View File

@ -1,36 +1,26 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <linux/err.h>
#include <btf.h>
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <bpf/btf.h>
#define CHECK(condition, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \
fprintf(stderr, format); \
} \
__ret; \
})
static int duration = 0;
void btf_dump_printf(void *ctx, const char *fmt, va_list args)
{
vfprintf(ctx, fmt, args);
}
struct btf_dump_test_case {
static struct btf_dump_test_case {
const char *name;
const char *file;
struct btf_dump_opts opts;
} btf_dump_test_cases[] = {
{.name = "btf_dump_test_case_syntax", .opts = {}},
{.name = "btf_dump_test_case_ordering", .opts = {}},
{.name = "btf_dump_test_case_padding", .opts = {}},
{.name = "btf_dump_test_case_packing", .opts = {}},
{.name = "btf_dump_test_case_bitfields", .opts = {}},
{.name = "btf_dump_test_case_multidim", .opts = {}},
{.name = "btf_dump_test_case_namespacing", .opts = {}},
{"btf_dump: syntax", "btf_dump_test_case_syntax", {}},
{"btf_dump: ordering", "btf_dump_test_case_ordering", {}},
{"btf_dump: padding", "btf_dump_test_case_padding", {}},
{"btf_dump: packing", "btf_dump_test_case_packing", {}},
{"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}},
{"btf_dump: multidim", "btf_dump_test_case_multidim", {}},
{"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}},
};
static int btf_dump_all_types(const struct btf *btf,
@ -55,55 +45,51 @@ done:
return err;
}
int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
{
char test_file[256], out_file[256], diff_cmd[1024];
struct btf *btf = NULL;
int err = 0, fd = -1;
FILE *f = NULL;
fprintf(stderr, "Test case #%d (%s): ", n, test_case->name);
snprintf(test_file, sizeof(test_file), "%s.o", test_case->name);
snprintf(test_file, sizeof(test_file), "%s.o", t->file);
btf = btf__parse_elf(test_file, NULL);
if (CHECK(IS_ERR(btf),
if (CHECK(IS_ERR(btf), "btf_parse_elf",
"failed to load test BTF: %ld\n", PTR_ERR(btf))) {
err = -PTR_ERR(btf);
btf = NULL;
goto done;
}
snprintf(out_file, sizeof(out_file),
"/tmp/%s.output.XXXXXX", test_case->name);
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file);
if (CHECK(fd < 0, "failed to create temp output file: %d\n", fd)) {
if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
err = fd;
goto done;
}
f = fdopen(fd, "w");
if (CHECK(f == NULL, "failed to open temp output file: %s(%d)\n",
if (CHECK(f == NULL, "open_tmp", "failed to open file: %s(%d)\n",
strerror(errno), errno)) {
close(fd);
goto done;
}
test_case->opts.ctx = f;
err = btf_dump_all_types(btf, &test_case->opts);
t->opts.ctx = f;
err = btf_dump_all_types(btf, &t->opts);
fclose(f);
close(fd);
if (CHECK(err, "failure during C dumping: %d\n", err)) {
if (CHECK(err, "btf_dump", "failure during C dumping: %d\n", err)) {
goto done;
}
snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
snprintf(test_file, sizeof(test_file), "progs/%s.c", t->file);
if (access(test_file, R_OK) == -1)
/*
* When the test is run with O=, kselftest copies TEST_FILES
* without preserving the directory structure.
*/
snprintf(test_file, sizeof(test_file), "%s.c",
test_case->name);
snprintf(test_file, sizeof(test_file), "%s.c", t->file);
/*
* Diff test output and expected test output, contained between
* START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
@ -118,33 +104,27 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
"out {sub(/^[ \\t]*\\*/, \"\"); print}' '%s' | diff -u - '%s'",
test_file, out_file);
err = system(diff_cmd);
if (CHECK(err,
if (CHECK(err, "diff",
"differing test output, output=%s, err=%d, diff cmd:\n%s\n",
out_file, err, diff_cmd))
goto done;
remove(out_file);
fprintf(stderr, "OK\n");
done:
btf__free(btf);
return err;
}
int main() {
int test_case_cnt, i, err, failed = 0;
void test_btf_dump() {
int i;
test_case_cnt = sizeof(btf_dump_test_cases) /
sizeof(btf_dump_test_cases[0]);
for (i = 0; i < ARRAY_SIZE(btf_dump_test_cases); i++) {
struct btf_dump_test_case *t = &btf_dump_test_cases[i];
for (i = 0; i < test_case_cnt; i++) {
err = test_btf_dump_case(i, &btf_dump_test_cases[i]);
if (err)
failed++;
if (!test__start_subtest(t->name))
continue;
test_btf_dump_case(i, &btf_dump_test_cases[i]);
}
fprintf(stderr, "%d tests succeeded, %d tests failed.\n",
test_case_cnt - failed, failed);
return failed;
}

View File

@ -193,8 +193,12 @@ static struct core_reloc_test_case test_cases[] = {
.btf_src_file = NULL, /* load from /lib/modules/$(uname -r) */
.input = "",
.input_len = 0,
.output = "\1", /* true */
.output_len = 1,
.output = STRUCT_TO_CHAR_PTR(core_reloc_kernel_output) {
.valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
.comm = "test_progs\0\0\0\0\0",
.comm_len = 11,
},
.output_len = sizeof(struct core_reloc_kernel_output),
},
/* validate BPF program can use multiple flavors to match against

View File

@ -0,0 +1,127 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Test that the flow_dissector program can be updated with a single
* syscall by attaching a new program that replaces the existing one.
*
* Corner case - the same program cannot be attached twice.
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <sched.h>
#include <stdbool.h>
#include <unistd.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include "test_progs.h"
static bool is_attached(int netns)
{
__u32 cnt;
int err;
err = bpf_prog_query(netns, BPF_FLOW_DISSECTOR, 0, NULL, NULL, &cnt);
if (CHECK_FAIL(err)) {
perror("bpf_prog_query");
return true; /* fail-safe */
}
return cnt > 0;
}
static int load_prog(void)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, BPF_OK),
BPF_EXIT_INSN(),
};
int fd;
fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
if (CHECK_FAIL(fd < 0))
perror("bpf_load_program");
return fd;
}
static void do_flow_dissector_reattach(void)
{
int prog_fd[2] = { -1, -1 };
int err;
prog_fd[0] = load_prog();
if (prog_fd[0] < 0)
return;
prog_fd[1] = load_prog();
if (prog_fd[1] < 0)
goto out_close;
err = bpf_prog_attach(prog_fd[0], 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(err)) {
perror("bpf_prog_attach-0");
goto out_close;
}
/* Expect success when attaching a different program */
err = bpf_prog_attach(prog_fd[1], 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(err)) {
perror("bpf_prog_attach-1");
goto out_detach;
}
/* Expect failure when attaching the same program twice */
err = bpf_prog_attach(prog_fd[1], 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK_FAIL(!err || errno != EINVAL))
perror("bpf_prog_attach-2");
out_detach:
err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
if (CHECK_FAIL(err))
perror("bpf_prog_detach");
out_close:
close(prog_fd[1]);
close(prog_fd[0]);
}
void test_flow_dissector_reattach(void)
{
int init_net, err;
init_net = open("/proc/1/ns/net", O_RDONLY);
if (CHECK_FAIL(init_net < 0)) {
perror("open(/proc/1/ns/net)");
return;
}
err = setns(init_net, CLONE_NEWNET);
if (CHECK_FAIL(err)) {
perror("setns(/proc/1/ns/net)");
goto out_close;
}
if (is_attached(init_net)) {
test__skip();
printf("Can't test with flow dissector attached to init_net\n");
return;
}
/* First run tests in root network namespace */
do_flow_dissector_reattach();
/* Then repeat tests in a non-root namespace */
err = unshare(CLONE_NEWNET);
if (CHECK_FAIL(err)) {
perror("unshare(CLONE_NEWNET)");
goto out_close;
}
do_flow_dissector_reattach();
out_close:
close(init_net);
}

View File

@ -0,0 +1,99 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
struct bss {
unsigned did_run;
unsigned iters;
unsigned sum;
};
struct rdonly_map_subtest {
const char *subtest_name;
const char *prog_name;
unsigned exp_iters;
unsigned exp_sum;
};
void test_rdonly_maps(void)
{
const char *prog_name_skip_loop = "raw_tracepoint/sys_enter:skip_loop";
const char *prog_name_part_loop = "raw_tracepoint/sys_enter:part_loop";
const char *prog_name_full_loop = "raw_tracepoint/sys_enter:full_loop";
const char *file = "test_rdonly_maps.o";
struct rdonly_map_subtest subtests[] = {
{ "skip loop", prog_name_skip_loop, 0, 0 },
{ "part loop", prog_name_part_loop, 3, 2 + 3 + 4 },
{ "full loop", prog_name_full_loop, 4, 2 + 3 + 4 + 5 },
};
int i, err, zero = 0, duration = 0;
struct bpf_link *link = NULL;
struct bpf_program *prog;
struct bpf_map *bss_map;
struct bpf_object *obj;
struct bss bss;
obj = bpf_object__open_file(file, NULL);
if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
return;
bpf_object__for_each_program(prog, obj) {
bpf_program__set_raw_tracepoint(prog);
}
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
goto cleanup;
bss_map = bpf_object__find_map_by_name(obj, "test_rdo.bss");
if (CHECK(!bss_map, "find_bss_map", "failed\n"))
goto cleanup;
for (i = 0; i < ARRAY_SIZE(subtests); i++) {
const struct rdonly_map_subtest *t = &subtests[i];
if (!test__start_subtest(t->subtest_name))
continue;
prog = bpf_object__find_program_by_title(obj, t->prog_name);
if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
t->prog_name))
goto cleanup;
memset(&bss, 0, sizeof(bss));
err = bpf_map_update_elem(bpf_map__fd(bss_map), &zero, &bss, 0);
if (CHECK(err, "set_bss", "failed to set bss data: %d\n", err))
goto cleanup;
link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
if (CHECK(IS_ERR(link), "attach_prog", "prog '%s', err %ld\n",
t->prog_name, PTR_ERR(link))) {
link = NULL;
goto cleanup;
}
/* trigger probe */
usleep(1);
bpf_link__destroy(link);
link = NULL;
err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, &bss);
if (CHECK(err, "get_bss", "failed to get bss data: %d\n", err))
goto cleanup;
if (CHECK(bss.did_run == 0, "check_run",
"prog '%s' didn't run?\n", t->prog_name))
goto cleanup;
if (CHECK(bss.iters != t->exp_iters, "check_iters",
"prog '%s' iters: %d, expected: %d\n",
t->prog_name, bss.iters, t->exp_iters))
goto cleanup;
if (CHECK(bss.sum != t->exp_sum, "check_sum",
"prog '%s' sum: %d, expected: %d\n",
t->prog_name, bss.sum, t->exp_sum))
goto cleanup;
}
cleanup:
bpf_link__destroy(link);
bpf_object__close(obj);
}

View File

@ -3,16 +3,26 @@
void test_reference_tracking(void)
{
const char *file = "./test_sk_lookup_kern.o";
const char *file = "test_sk_lookup_kern.o";
const char *obj_name = "ref_track";
LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.object_name = obj_name,
.relaxed_maps = true,
);
struct bpf_object *obj;
struct bpf_program *prog;
__u32 duration = 0;
int err = 0;
obj = bpf_object__open(file);
obj = bpf_object__open_file(file, &open_opts);
if (CHECK_FAIL(IS_ERR(obj)))
return;
if (CHECK(strcmp(bpf_object__name(obj), obj_name), "obj_name",
"wrong obj name '%s', expected '%s'\n",
bpf_object__name(obj), obj_name))
goto cleanup;
bpf_object__for_each_program(prog, obj) {
const char *title;
@ -35,5 +45,7 @@ void test_reference_tracking(void)
}
CHECK(err, title, "\n");
}
cleanup:
bpf_object__close(obj);
}

View File

@ -62,6 +62,10 @@ struct padded_a_lot {
* long: 64;
* long: 64;
* int b;
* long: 32;
* long: 64;
* long: 64;
* long: 64;
*};
*
*/
@ -95,7 +99,6 @@ struct zone_padding {
struct zone {
int a;
short b;
short: 16;
struct zone_padding __pad__;
};

View File

@ -1,5 +1,14 @@
#include <stdint.h>
#include <stdbool.h>
/*
* KERNEL
*/
struct core_reloc_kernel_output {
int valid[10];
char comm[16];
int comm_len;
};
/*
* FLAVORS

View File

@ -7,6 +7,7 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
char _license[] SEC("license") = "GPL";

View File

@ -7,6 +7,7 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
char _license[] SEC("license") = "GPL";

View File

@ -7,6 +7,7 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
char _license[] SEC("license") = "GPL";

View File

@ -14,13 +14,12 @@ struct sockopt_sk {
__u8 val;
};
struct bpf_map_def SEC("maps") socket_storage_map = {
.type = BPF_MAP_TYPE_SK_STORAGE,
.key_size = sizeof(int),
.value_size = sizeof(struct sockopt_sk),
.map_flags = BPF_F_NO_PREALLOC,
};
BPF_ANNOTATE_KV_PAIR(socket_storage_map, int, struct sockopt_sk);
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct sockopt_sk);
} socket_storage_map SEC(".maps");
SEC("cgroup/getsockopt")
int _getsockopt(struct bpf_sockopt *ctx)

View File

@ -13,13 +13,12 @@ struct tcp_rtt_storage {
__u32 icsk_retransmits;
};
struct bpf_map_def SEC("maps") socket_storage_map = {
.type = BPF_MAP_TYPE_SK_STORAGE,
.key_size = sizeof(int),
.value_size = sizeof(struct tcp_rtt_storage),
.map_flags = BPF_F_NO_PREALLOC,
};
BPF_ANNOTATE_KV_PAIR(socket_storage_map, int, struct tcp_rtt_storage);
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct tcp_rtt_storage);
} socket_storage_map SEC(".maps");
SEC("sockops")
int _sockops(struct bpf_sock_ops *ctx)

View File

@ -49,4 +49,3 @@ int handle_uprobe_return(struct pt_regs *ctx)
}
char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = 1;

View File

@ -2,6 +2,7 @@
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_legacy.h"
int _version SEC("version") = 1;

View File

@ -2,6 +2,7 @@
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_legacy.h"
int _version SEC("version") = 1;

View File

@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
@ -31,6 +32,8 @@ struct core_reloc_arrays {
struct core_reloc_arrays_substruct d[1][2];
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_arrays(void *ctx)
{
@ -38,16 +41,16 @@ int test_core_arrays(void *ctx)
struct core_reloc_arrays_output *out = (void *)&data.out;
/* in->a[2] */
if (BPF_CORE_READ(&out->a2, &in->a[2]))
if (CORE_READ(&out->a2, &in->a[2]))
return 1;
/* in->b[1][2][3] */
if (BPF_CORE_READ(&out->b123, &in->b[1][2][3]))
if (CORE_READ(&out->b123, &in->b[1][2][3]))
return 1;
/* in->c[1].c */
if (BPF_CORE_READ(&out->c1c, &in->c[1].c))
if (CORE_READ(&out->c1c, &in->c[1].c))
return 1;
/* in->d[0][0].d */
if (BPF_CORE_READ(&out->d00d, &in->d[0][0].d))
if (CORE_READ(&out->d00d, &in->d[0][0].d))
return 1;
return 0;

View File

@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
@ -39,6 +40,8 @@ struct core_reloc_flavors___weird {
};
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_flavors(void *ctx)
{
@ -48,13 +51,13 @@ int test_core_flavors(void *ctx)
struct core_reloc_flavors *out = (void *)&data.out;
/* read a using weird layout */
if (BPF_CORE_READ(&out->a, &in_weird->a))
if (CORE_READ(&out->a, &in_weird->a))
return 1;
/* read b using reversed layout */
if (BPF_CORE_READ(&out->b, &in_rev->b))
if (CORE_READ(&out->b, &in_rev->b))
return 1;
/* read c using original layout */
if (BPF_CORE_READ(&out->c, &in_orig->c))
if (CORE_READ(&out->c, &in_orig->c))
return 1;
return 0;

View File

@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
@ -23,20 +24,22 @@ struct core_reloc_ints {
int64_t s64_field;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_ints(void *ctx)
{
struct core_reloc_ints *in = (void *)&data.in;
struct core_reloc_ints *out = (void *)&data.out;
if (BPF_CORE_READ(&out->u8_field, &in->u8_field) ||
BPF_CORE_READ(&out->s8_field, &in->s8_field) ||
BPF_CORE_READ(&out->u16_field, &in->u16_field) ||
BPF_CORE_READ(&out->s16_field, &in->s16_field) ||
BPF_CORE_READ(&out->u32_field, &in->u32_field) ||
BPF_CORE_READ(&out->s32_field, &in->s32_field) ||
BPF_CORE_READ(&out->u64_field, &in->u64_field) ||
BPF_CORE_READ(&out->s64_field, &in->s64_field))
if (CORE_READ(&out->u8_field, &in->u8_field) ||
CORE_READ(&out->s8_field, &in->s8_field) ||
CORE_READ(&out->u16_field, &in->u16_field) ||
CORE_READ(&out->s16_field, &in->s16_field) ||
CORE_READ(&out->u32_field, &in->u32_field) ||
CORE_READ(&out->s32_field, &in->s32_field) ||
CORE_READ(&out->u64_field, &in->u64_field) ||
CORE_READ(&out->s64_field, &in->s64_field))
return 1;
return 0;

View File

@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
@ -12,24 +13,78 @@ static volatile struct data {
char out[256];
} data;
struct core_reloc_kernel_output {
int valid[10];
char comm[16];
int comm_len;
};
struct task_struct {
int pid;
int tgid;
char comm[16];
struct task_struct *group_leader;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_kernel(void *ctx)
{
struct task_struct *task = (void *)bpf_get_current_task();
struct core_reloc_kernel_output *out = (void *)&data.out;
uint64_t pid_tgid = bpf_get_current_pid_tgid();
uint32_t real_tgid = (uint32_t)pid_tgid;
int pid, tgid;
if (BPF_CORE_READ(&pid, &task->pid) ||
BPF_CORE_READ(&tgid, &task->tgid))
if (CORE_READ(&pid, &task->pid) ||
CORE_READ(&tgid, &task->tgid))
return 1;
/* validate pid + tgid matches */
data.out[0] = (((uint64_t)pid << 32) | tgid) == pid_tgid;
out->valid[0] = (((uint64_t)pid << 32) | tgid) == pid_tgid;
/* test variadic BPF_CORE_READ macros */
out->valid[1] = BPF_CORE_READ(task,
tgid) == real_tgid;
out->valid[2] = BPF_CORE_READ(task,
group_leader,
tgid) == real_tgid;
out->valid[3] = BPF_CORE_READ(task,
group_leader, group_leader,
tgid) == real_tgid;
out->valid[4] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
tgid) == real_tgid;
out->valid[5] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader,
tgid) == real_tgid;
out->valid[6] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader, group_leader,
tgid) == real_tgid;
out->valid[7] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader, group_leader, group_leader,
tgid) == real_tgid;
out->valid[8] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader, group_leader, group_leader,
group_leader,
tgid) == real_tgid;
out->valid[9] = BPF_CORE_READ(task,
group_leader, group_leader, group_leader,
group_leader, group_leader, group_leader,
group_leader, group_leader,
tgid) == real_tgid;
/* test BPF_CORE_READ_STR_INTO() returns correct code and contents */
out->comm_len = BPF_CORE_READ_STR_INTO(
&out->comm, task,
group_leader, group_leader, group_leader, group_leader,
group_leader, group_leader, group_leader, group_leader,
comm);
return 0;
}

View File

@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
@ -32,6 +33,8 @@ struct core_reloc_misc_extensible {
int b;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_misc(void *ctx)
{
@ -41,15 +44,15 @@ int test_core_misc(void *ctx)
struct core_reloc_misc_output *out = (void *)&data.out;
/* record two different relocations with the same accessor string */
if (BPF_CORE_READ(&out->a, &in_a->a1) || /* accessor: 0:0 */
BPF_CORE_READ(&out->b, &in_b->b1)) /* accessor: 0:0 */
if (CORE_READ(&out->a, &in_a->a1) || /* accessor: 0:0 */
CORE_READ(&out->b, &in_b->b1)) /* accessor: 0:0 */
return 1;
/* Validate relocations capture array-only accesses for structs with
* fixed header, but with potentially extendable tail. This will read
* first 4 bytes of 2nd element of in_ext array of potentially
* variably sized struct core_reloc_misc_extensible. */
if (BPF_CORE_READ(&out->c, &in_ext[2])) /* accessor: 2 */
if (CORE_READ(&out->c, &in_ext[2])) /* accessor: 2 */
return 1;
return 0;

View File

@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
@ -41,20 +42,22 @@ struct core_reloc_mods {
core_reloc_mods_substruct_t h;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_mods(void *ctx)
{
struct core_reloc_mods *in = (void *)&data.in;
struct core_reloc_mods_output *out = (void *)&data.out;
if (BPF_CORE_READ(&out->a, &in->a) ||
BPF_CORE_READ(&out->b, &in->b) ||
BPF_CORE_READ(&out->c, &in->c) ||
BPF_CORE_READ(&out->d, &in->d) ||
BPF_CORE_READ(&out->e, &in->e[2]) ||
BPF_CORE_READ(&out->f, &in->f[1]) ||
BPF_CORE_READ(&out->g, &in->g.x) ||
BPF_CORE_READ(&out->h, &in->h.y))
if (CORE_READ(&out->a, &in->a) ||
CORE_READ(&out->b, &in->b) ||
CORE_READ(&out->c, &in->c) ||
CORE_READ(&out->d, &in->d) ||
CORE_READ(&out->e, &in->e[2]) ||
CORE_READ(&out->f, &in->f[1]) ||
CORE_READ(&out->g, &in->g.x) ||
CORE_READ(&out->h, &in->h.y))
return 1;
return 0;

View File

@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
@ -30,15 +31,17 @@ struct core_reloc_nesting {
} b;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_nesting(void *ctx)
{
struct core_reloc_nesting *in = (void *)&data.in;
struct core_reloc_nesting *out = (void *)&data.out;
if (BPF_CORE_READ(&out->a.a.a, &in->a.a.a))
if (CORE_READ(&out->a.a.a, &in->a.a.a))
return 1;
if (BPF_CORE_READ(&out->b.b.b, &in->b.b.b))
if (CORE_READ(&out->b.b.b, &in->b.b.b))
return 1;
return 0;

View File

@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
@ -25,17 +26,19 @@ struct core_reloc_primitives {
int (*f)(const char *);
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_primitives(void *ctx)
{
struct core_reloc_primitives *in = (void *)&data.in;
struct core_reloc_primitives *out = (void *)&data.out;
if (BPF_CORE_READ(&out->a, &in->a) ||
BPF_CORE_READ(&out->b, &in->b) ||
BPF_CORE_READ(&out->c, &in->c) ||
BPF_CORE_READ(&out->d, &in->d) ||
BPF_CORE_READ(&out->f, &in->f))
if (CORE_READ(&out->a, &in->a) ||
CORE_READ(&out->b, &in->b) ||
CORE_READ(&out->c, &in->c) ||
CORE_READ(&out->d, &in->d) ||
CORE_READ(&out->f, &in->f))
return 1;
return 0;

View File

@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
@ -16,13 +17,15 @@ struct core_reloc_ptr_as_arr {
int a;
};
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_ptr_as_arr(void *ctx)
{
struct core_reloc_ptr_as_arr *in = (void *)&data.in;
struct core_reloc_ptr_as_arr *out = (void *)&data.out;
if (BPF_CORE_READ(&out->a, &in[2].a))
if (CORE_READ(&out->a, &in[2].a))
return 1;
return 0;

View File

@ -47,12 +47,11 @@ struct {
* issue and avoid complicated C programming massaging.
* This is an acceptable workaround since there is one entry here.
*/
typedef __u64 raw_stack_trace_t[2 * MAX_STACK_RAWTP];
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, raw_stack_trace_t);
__type(value, __u64[2 * MAX_STACK_RAWTP]);
} rawdata_map SEC(".maps");
SEC("raw_tracepoint/sys_enter")
@ -100,4 +99,3 @@ int bpf_prog1(void *ctx)
}
char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */

View File

@ -22,4 +22,3 @@ int handle_sys_nanosleep_entry(struct pt_regs *ctx)
}
char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = 1;

View File

@ -0,0 +1,83 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
static volatile const struct {
unsigned a[4];
/*
* if the struct's size is multiple of 16, compiler will put it into
* .rodata.cst16 section, which is not recognized by libbpf; work
* around this by ensuring we don't have 16-aligned struct
*/
char _y;
} rdonly_values = { .a = {2, 3, 4, 5} };
static volatile struct {
unsigned did_run;
unsigned iters;
unsigned sum;
} res;
SEC("raw_tracepoint/sys_enter:skip_loop")
int skip_loop(struct pt_regs *ctx)
{
/* prevent compiler to optimize everything out */
unsigned * volatile p = (void *)&rdonly_values.a;
unsigned iters = 0, sum = 0;
/* we should never enter this loop */
while (*p & 1) {
iters++;
sum += *p;
p++;
}
res.did_run = 1;
res.iters = iters;
res.sum = sum;
return 0;
}
SEC("raw_tracepoint/sys_enter:part_loop")
int part_loop(struct pt_regs *ctx)
{
/* prevent compiler to optimize everything out */
unsigned * volatile p = (void *)&rdonly_values.a;
unsigned iters = 0, sum = 0;
/* validate verifier can derive loop termination */
while (*p < 5) {
iters++;
sum += *p;
p++;
}
res.did_run = 1;
res.iters = iters;
res.sum = sum;
return 0;
}
SEC("raw_tracepoint/sys_enter:full_loop")
int full_loop(struct pt_regs *ctx)
{
/* prevent compiler to optimize everything out */
unsigned * volatile p = (void *)&rdonly_values.a;
int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]);
unsigned iters = 0, sum = 0;
/* validate verifier can allow full loop as well */
while (i > 0 ) {
iters++;
sum += *p;
p++;
i--;
}
res.did_run = 1;
res.iters = iters;
res.sum = sum;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@ -74,4 +74,3 @@ int oncpu(struct sched_switch_args *ctx)
}
char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */

View File

@ -18,19 +18,55 @@ fi
# this is the case and run it with in_netns.sh if it is being run in the root
# namespace.
if [[ -z $(ip netns identify $$) ]]; then
../net/in_netns.sh "$0" "$@"
exit $?
fi
err=0
if bpftool="$(which bpftool)"; then
echo "Testing global flow dissector..."
# Determine selftest success via shell exit code
exit_handler()
{
if (( $? == 0 )); then
$bpftool prog loadall ./bpf_flow.o /sys/fs/bpf/flow \
type flow_dissector
if ! unshare --net $bpftool prog attach pinned \
/sys/fs/bpf/flow/flow_dissector flow_dissector; then
echo "Unexpected unsuccessful attach in namespace" >&2
err=1
fi
$bpftool prog attach pinned /sys/fs/bpf/flow/flow_dissector \
flow_dissector
if unshare --net $bpftool prog attach pinned \
/sys/fs/bpf/flow/flow_dissector flow_dissector; then
echo "Unexpected successful attach in namespace" >&2
err=1
fi
if ! $bpftool prog detach pinned \
/sys/fs/bpf/flow/flow_dissector flow_dissector; then
echo "Failed to detach flow dissector" >&2
err=1
fi
rm -rf /sys/fs/bpf/flow
else
echo "Skipping root flow dissector test, bpftool not found" >&2
fi
# Run the rest of the tests in a net namespace.
../net/in_netns.sh "$0" "$@"
err=$(( $err + $? ))
if (( $err == 0 )); then
echo "selftests: $TESTNAME [PASS]";
else
echo "selftests: $TESTNAME [FAILED]";
fi
exit $err
fi
# Determine selftest success via shell exit code
exit_handler()
{
set +e
# Cleanup

View File

@ -187,3 +187,20 @@
.prog_type = BPF_PROG_TYPE_XDP,
.retval = 55,
},
{
"taken loop with back jump to 1st insn, 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, -3),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
.retval = 55,
},