forked from Minki/linux
Alexei Starovoitov says: ==================== pull-request: bpf-next 2021-09-17 We've added 63 non-merge commits during the last 12 day(s) which contain a total of 65 files changed, 2653 insertions(+), 751 deletions(-). The main changes are: 1) Streamline internal BPF program sections handling and bpf_program__set_attach_target() in libbpf, from Andrii. 2) Add support for new btf kind BTF_KIND_TAG, from Yonghong. 3) Introduce bpf_get_branch_snapshot() to capture LBR, from Song. 4) IMUL optimization for x86-64 JIT, from Jie. 5) xsk selftest improvements, from Magnus. 6) Introduce legacy kprobe events support in libbpf, from Rafael. 7) Access hw timestamp through BPF's __sk_buff, from Vadim. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (63 commits) selftests/bpf: Fix a few compiler warnings libbpf: Constify all high-level program attach APIs libbpf: Schedule open_opts.attach_prog_fd deprecation since v0.7 selftests/bpf: Switch fexit_bpf2bpf selftest to set_attach_target() API libbpf: Allow skipping attach_func_name in bpf_program__set_attach_target() libbpf: Deprecated bpf_object_open_opts.relaxed_core_relocs selftests/bpf: Stop using relaxed_core_relocs which has no effect libbpf: Use pre-setup sec_def in libbpf_find_attach_btf_id() bpf: Update bpf_get_smp_processor_id() documentation libbpf: Add sphinx code documentation comments selftests/bpf: Skip btf_tag test if btf_tag attribute not supported docs/bpf: Add documentation for BTF_KIND_TAG selftests/bpf: Add a test with a bpf program with btf_tag attributes selftests/bpf: Test BTF_KIND_TAG for deduplication selftests/bpf: Add BTF_KIND_TAG unit tests selftests/bpf: Change NAME_NTH/IS_NAME_NTH for BTF_KIND_TAG format selftests/bpf: Test libbpf API function btf__add_tag() bpftool: Add support for BTF_KIND_TAG libbpf: Add support for BTF_KIND_TAG libbpf: Rename btf_{hash,equal}_int to btf_{hash,equal}_int_tag ... ==================== Link: https://lore.kernel.org/r/20210917173738.3397064-1-ast@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
af54faab84
@ -85,6 +85,7 @@ sequentially and type id is assigned to each recognized type starting from id
|
||||
#define BTF_KIND_VAR 14 /* Variable */
|
||||
#define BTF_KIND_DATASEC 15 /* Section */
|
||||
#define BTF_KIND_FLOAT 16 /* Floating point */
|
||||
#define BTF_KIND_TAG 17 /* Tag */
|
||||
|
||||
Note that the type section encodes debug info, not just pure types.
|
||||
``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
|
||||
@ -106,7 +107,7 @@ Each type contains the following common data::
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
* FUNC and FUNC_PROTO.
|
||||
* FUNC, FUNC_PROTO and TAG.
|
||||
* "type" is a type_id referring to another type.
|
||||
*/
|
||||
union {
|
||||
@ -465,6 +466,32 @@ map definition.
|
||||
|
||||
No additional type data follow ``btf_type``.
|
||||
|
||||
2.2.17 BTF_KIND_TAG
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``struct btf_type`` encoding requirement:
|
||||
* ``name_off``: offset to a non-empty string
|
||||
* ``info.kind_flag``: 0
|
||||
* ``info.kind``: BTF_KIND_TAG
|
||||
* ``info.vlen``: 0
|
||||
* ``type``: ``struct``, ``union``, ``func`` or ``var``
|
||||
|
||||
``btf_type`` is followed by ``struct btf_tag``.::
|
||||
|
||||
struct btf_tag {
|
||||
__u32 component_idx;
|
||||
};
|
||||
|
||||
The ``name_off`` encodes btf_tag attribute string.
|
||||
The ``type`` should be ``struct``, ``union``, ``func`` or ``var``.
|
||||
For ``var`` type, ``btf_tag.component_idx`` must be ``-1``.
|
||||
For the other three types, if the btf_tag attribute is
|
||||
applied to the ``struct``, ``union`` or ``func`` itself,
|
||||
``btf_tag.component_idx`` must be ``-1``. Otherwise,
|
||||
the attribute is applied to a ``struct``/``union`` member or
|
||||
a ``func`` argument, and ``btf_tag.component_idx`` should be a
|
||||
valid index (starting from 0) pointing to a member or an argument.
|
||||
|
||||
3. BTF Kernel API
|
||||
*****************
|
||||
|
||||
|
@ -2143,19 +2143,19 @@ static __initconst const u64 knl_hw_cache_extra_regs
|
||||
* However, there are some cases which may change PEBS status, e.g. PMI
|
||||
* throttle. The PEBS_ENABLE should be updated where the status changes.
|
||||
*/
|
||||
static void __intel_pmu_disable_all(void)
|
||||
static __always_inline void __intel_pmu_disable_all(bool bts)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
||||
|
||||
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
||||
if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
||||
intel_pmu_disable_bts();
|
||||
}
|
||||
|
||||
static void intel_pmu_disable_all(void)
|
||||
static __always_inline void intel_pmu_disable_all(void)
|
||||
{
|
||||
__intel_pmu_disable_all();
|
||||
__intel_pmu_disable_all(true);
|
||||
intel_pmu_pebs_disable_all();
|
||||
intel_pmu_lbr_disable_all();
|
||||
}
|
||||
@ -2186,6 +2186,49 @@ static void intel_pmu_enable_all(int added)
|
||||
__intel_pmu_enable_all(added, false);
|
||||
}
|
||||
|
||||
static noinline int
|
||||
__intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
|
||||
unsigned int cnt, unsigned long flags)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
intel_pmu_lbr_read();
|
||||
cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
|
||||
|
||||
memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
|
||||
intel_pmu_enable_all(0);
|
||||
local_irq_restore(flags);
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* must not have branches... */
|
||||
local_irq_save(flags);
|
||||
__intel_pmu_disable_all(false); /* we don't care about BTS */
|
||||
__intel_pmu_pebs_disable_all();
|
||||
__intel_pmu_lbr_disable();
|
||||
/* ... until here */
|
||||
return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* must not have branches... */
|
||||
local_irq_save(flags);
|
||||
__intel_pmu_disable_all(false); /* we don't care about BTS */
|
||||
__intel_pmu_pebs_disable_all();
|
||||
__intel_pmu_arch_lbr_disable();
|
||||
/* ... until here */
|
||||
return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Workaround for:
|
||||
* Intel Errata AAK100 (model 26)
|
||||
@ -2929,7 +2972,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
intel_bts_disable_local();
|
||||
cpuc->enabled = 0;
|
||||
__intel_pmu_disable_all();
|
||||
__intel_pmu_disable_all(true);
|
||||
handled = intel_pmu_drain_bts_buffer();
|
||||
handled += intel_bts_interrupt();
|
||||
status = intel_pmu_get_status();
|
||||
@ -6283,9 +6326,21 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.lbr_nr = 0;
|
||||
}
|
||||
|
||||
if (x86_pmu.lbr_nr)
|
||||
if (x86_pmu.lbr_nr) {
|
||||
pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
|
||||
|
||||
/* only support branch_stack snapshot for perfmon >= v2 */
|
||||
if (x86_pmu.disable_all == intel_pmu_disable_all) {
|
||||
if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
|
||||
static_call_update(perf_snapshot_branch_stack,
|
||||
intel_pmu_snapshot_arch_branch_stack);
|
||||
} else {
|
||||
static_call_update(perf_snapshot_branch_stack,
|
||||
intel_pmu_snapshot_branch_stack);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
intel_pmu_check_extra_regs(x86_pmu.extra_regs);
|
||||
|
||||
/* Support full width counters using alternative MSR range */
|
||||
|
@ -1301,7 +1301,7 @@ void intel_pmu_pebs_disable_all(void)
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
if (cpuc->pebs_enabled)
|
||||
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
|
||||
__intel_pmu_pebs_disable_all();
|
||||
}
|
||||
|
||||
static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
|
||||
|
@ -228,20 +228,6 @@ static void __intel_pmu_lbr_enable(bool pmi)
|
||||
wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
|
||||
}
|
||||
|
||||
static void __intel_pmu_lbr_disable(void)
|
||||
{
|
||||
u64 debugctl;
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
|
||||
wrmsrl(MSR_ARCH_LBR_CTL, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
}
|
||||
|
||||
void intel_pmu_lbr_reset_32(void)
|
||||
{
|
||||
int i;
|
||||
@ -779,8 +765,12 @@ void intel_pmu_lbr_disable_all(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
if (cpuc->lbr_users && !vlbr_exclude_host())
|
||||
if (cpuc->lbr_users && !vlbr_exclude_host()) {
|
||||
if (static_cpu_has(X86_FEATURE_ARCH_LBR))
|
||||
return __intel_pmu_arch_lbr_disable();
|
||||
|
||||
__intel_pmu_lbr_disable();
|
||||
}
|
||||
}
|
||||
|
||||
void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
|
||||
|
@ -1240,6 +1240,25 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
|
||||
return intel_pmu_has_bts_period(event, hwc->sample_period);
|
||||
}
|
||||
|
||||
static __always_inline void __intel_pmu_pebs_disable_all(void)
|
||||
{
|
||||
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
|
||||
}
|
||||
|
||||
static __always_inline void __intel_pmu_arch_lbr_disable(void)
|
||||
{
|
||||
wrmsrl(MSR_ARCH_LBR_CTL, 0);
|
||||
}
|
||||
|
||||
static __always_inline void __intel_pmu_lbr_disable(void)
|
||||
{
|
||||
u64 debugctl;
|
||||
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
}
|
||||
|
||||
int intel_pmu_save_and_restart(struct perf_event *event);
|
||||
|
||||
struct event_constraint *
|
||||
|
@ -1070,41 +1070,34 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
break;
|
||||
|
||||
case BPF_ALU | BPF_MUL | BPF_K:
|
||||
case BPF_ALU | BPF_MUL | BPF_X:
|
||||
case BPF_ALU64 | BPF_MUL | BPF_K:
|
||||
case BPF_ALU64 | BPF_MUL | BPF_X:
|
||||
{
|
||||
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
EMIT1(add_2mod(0x48, dst_reg, dst_reg));
|
||||
else if (is_ereg(dst_reg))
|
||||
EMIT1(add_2mod(0x40, dst_reg, dst_reg));
|
||||
|
||||
if (dst_reg != BPF_REG_0)
|
||||
EMIT1(0x50); /* push rax */
|
||||
if (dst_reg != BPF_REG_3)
|
||||
EMIT1(0x52); /* push rdx */
|
||||
|
||||
/* mov r11, dst_reg */
|
||||
EMIT_mov(AUX_REG, dst_reg);
|
||||
|
||||
if (BPF_SRC(insn->code) == BPF_X)
|
||||
emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
|
||||
if (is_imm8(imm32))
|
||||
/* imul dst_reg, dst_reg, imm8 */
|
||||
EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
|
||||
imm32);
|
||||
else
|
||||
emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
|
||||
|
||||
if (is64)
|
||||
EMIT1(add_1mod(0x48, AUX_REG));
|
||||
else if (is_ereg(AUX_REG))
|
||||
EMIT1(add_1mod(0x40, AUX_REG));
|
||||
/* mul(q) r11 */
|
||||
EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
|
||||
|
||||
if (dst_reg != BPF_REG_3)
|
||||
EMIT1(0x5A); /* pop rdx */
|
||||
if (dst_reg != BPF_REG_0) {
|
||||
/* mov dst_reg, rax */
|
||||
EMIT_mov(dst_reg, BPF_REG_0);
|
||||
EMIT1(0x58); /* pop rax */
|
||||
}
|
||||
/* imul dst_reg, dst_reg, imm32 */
|
||||
EMIT2_off32(0x69,
|
||||
add_2reg(0xC0, dst_reg, dst_reg),
|
||||
imm32);
|
||||
break;
|
||||
}
|
||||
|
||||
case BPF_ALU | BPF_MUL | BPF_X:
|
||||
case BPF_ALU64 | BPF_MUL | BPF_X:
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
EMIT1(add_2mod(0x48, src_reg, dst_reg));
|
||||
else if (is_ereg(dst_reg) || is_ereg(src_reg))
|
||||
EMIT1(add_2mod(0x40, src_reg, dst_reg));
|
||||
|
||||
/* imul dst_reg, src_reg */
|
||||
EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
|
||||
break;
|
||||
|
||||
/* Shifts */
|
||||
case BPF_ALU | BPF_LSH | BPF_K:
|
||||
case BPF_ALU | BPF_RSH | BPF_K:
|
||||
|
@ -57,6 +57,7 @@ struct perf_guest_info_callbacks {
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/static_call.h>
|
||||
#include <asm/local.h>
|
||||
|
||||
struct perf_callchain_entry {
|
||||
@ -1612,4 +1613,26 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event,
|
||||
extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Snapshot branch stack on software events.
|
||||
*
|
||||
* Branch stack can be very useful in understanding software events. For
|
||||
* example, when a long function, e.g. sys_perf_event_open, returns an
|
||||
* errno, it is not obvious why the function failed. Branch stack could
|
||||
* provide very helpful information in this type of scenarios.
|
||||
*
|
||||
* On software event, it is necessary to stop the hardware branch recorder
|
||||
* fast. Otherwise, the hardware register/buffer will be flushed with
|
||||
* entries of the triggering event. Therefore, static call is used to
|
||||
* stop the hardware recorder.
|
||||
*/
|
||||
|
||||
/*
|
||||
* cnt is the number of entries allocated for entries.
|
||||
* Return number of entries copied to .
|
||||
*/
|
||||
typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
|
||||
unsigned int cnt);
|
||||
DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
|
||||
|
||||
#endif /* _LINUX_PERF_EVENT_H */
|
||||
|
@ -1629,7 +1629,7 @@ union bpf_attr {
|
||||
* u32 bpf_get_smp_processor_id(void)
|
||||
* Description
|
||||
* Get the SMP (symmetric multiprocessing) processor id. Note that
|
||||
* all programs run with preemption disabled, which means that the
|
||||
* all programs run with migration disabled, which means that the
|
||||
* SMP processor id is stable during all the execution of the
|
||||
* program.
|
||||
* Return
|
||||
@ -4877,6 +4877,27 @@ union bpf_attr {
|
||||
* Get the struct pt_regs associated with **task**.
|
||||
* Return
|
||||
* A pointer to struct pt_regs.
|
||||
*
|
||||
* long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
|
||||
* Description
|
||||
* Get branch trace from hardware engines like Intel LBR. The
|
||||
* hardware engine is stopped shortly after the helper is
|
||||
* called. Therefore, the user need to filter branch entries
|
||||
* based on the actual use case. To capture branch trace
|
||||
* before the trigger point of the BPF program, the helper
|
||||
* should be called at the beginning of the BPF program.
|
||||
*
|
||||
* The data is stored as struct perf_branch_entry into output
|
||||
* buffer *entries*. *size* is the size of *entries* in bytes.
|
||||
* *flags* is reserved for now and must be zero.
|
||||
*
|
||||
* Return
|
||||
* On success, number of bytes written to *buf*. On error, a
|
||||
* negative value.
|
||||
*
|
||||
* **-EINVAL** if *flags* is not zero.
|
||||
*
|
||||
* **-ENOENT** if architecture does not support branch records.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
@ -5055,6 +5076,7 @@ union bpf_attr {
|
||||
FN(get_func_ip), \
|
||||
FN(get_attach_cookie), \
|
||||
FN(task_pt_regs), \
|
||||
FN(get_branch_snapshot), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
@ -5284,6 +5306,8 @@ struct __sk_buff {
|
||||
__u32 gso_segs;
|
||||
__bpf_md_ptr(struct bpf_sock *, sk);
|
||||
__u32 gso_size;
|
||||
__u32 :32; /* Padding, future use. */
|
||||
__u64 hwtstamp;
|
||||
};
|
||||
|
||||
struct bpf_tunnel_key {
|
||||
|
@ -43,7 +43,7 @@ struct btf_type {
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
* FUNC, FUNC_PROTO and VAR.
|
||||
* FUNC, FUNC_PROTO, VAR and TAG.
|
||||
* "type" is a type_id referring to another type.
|
||||
*/
|
||||
union {
|
||||
@ -56,25 +56,29 @@ struct btf_type {
|
||||
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
|
||||
#define BTF_INFO_KFLAG(info) ((info) >> 31)
|
||||
|
||||
#define BTF_KIND_UNKN 0 /* Unknown */
|
||||
#define BTF_KIND_INT 1 /* Integer */
|
||||
#define BTF_KIND_PTR 2 /* Pointer */
|
||||
#define BTF_KIND_ARRAY 3 /* Array */
|
||||
#define BTF_KIND_STRUCT 4 /* Struct */
|
||||
#define BTF_KIND_UNION 5 /* Union */
|
||||
#define BTF_KIND_ENUM 6 /* Enumeration */
|
||||
#define BTF_KIND_FWD 7 /* Forward */
|
||||
#define BTF_KIND_TYPEDEF 8 /* Typedef */
|
||||
#define BTF_KIND_VOLATILE 9 /* Volatile */
|
||||
#define BTF_KIND_CONST 10 /* Const */
|
||||
#define BTF_KIND_RESTRICT 11 /* Restrict */
|
||||
#define BTF_KIND_FUNC 12 /* Function */
|
||||
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
|
||||
#define BTF_KIND_VAR 14 /* Variable */
|
||||
#define BTF_KIND_DATASEC 15 /* Section */
|
||||
#define BTF_KIND_FLOAT 16 /* Floating point */
|
||||
#define BTF_KIND_MAX BTF_KIND_FLOAT
|
||||
#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
|
||||
enum {
|
||||
BTF_KIND_UNKN = 0, /* Unknown */
|
||||
BTF_KIND_INT = 1, /* Integer */
|
||||
BTF_KIND_PTR = 2, /* Pointer */
|
||||
BTF_KIND_ARRAY = 3, /* Array */
|
||||
BTF_KIND_STRUCT = 4, /* Struct */
|
||||
BTF_KIND_UNION = 5, /* Union */
|
||||
BTF_KIND_ENUM = 6, /* Enumeration */
|
||||
BTF_KIND_FWD = 7, /* Forward */
|
||||
BTF_KIND_TYPEDEF = 8, /* Typedef */
|
||||
BTF_KIND_VOLATILE = 9, /* Volatile */
|
||||
BTF_KIND_CONST = 10, /* Const */
|
||||
BTF_KIND_RESTRICT = 11, /* Restrict */
|
||||
BTF_KIND_FUNC = 12, /* Function */
|
||||
BTF_KIND_FUNC_PROTO = 13, /* Function Proto */
|
||||
BTF_KIND_VAR = 14, /* Variable */
|
||||
BTF_KIND_DATASEC = 15, /* Section */
|
||||
BTF_KIND_FLOAT = 16, /* Floating point */
|
||||
BTF_KIND_TAG = 17, /* Tag */
|
||||
|
||||
NR_BTF_KINDS,
|
||||
BTF_KIND_MAX = NR_BTF_KINDS - 1,
|
||||
};
|
||||
|
||||
/* For some specific BTF_KIND, "struct btf_type" is immediately
|
||||
* followed by extra data.
|
||||
@ -170,4 +174,15 @@ struct btf_var_secinfo {
|
||||
__u32 size;
|
||||
};
|
||||
|
||||
/* BTF_KIND_TAG is followed by a single "struct btf_tag" to describe
|
||||
* additional information related to the tag applied location.
|
||||
* If component_idx == -1, the tag is applied to a struct, union,
|
||||
* variable or function. Otherwise, it is applied to a struct/union
|
||||
* member or a func argument, and component_idx indicates which member
|
||||
* or argument (0 ... vlen-1).
|
||||
*/
|
||||
struct btf_tag {
|
||||
__s32 component_idx;
|
||||
};
|
||||
|
||||
#endif /* _UAPI__LINUX_BTF_H__ */
|
||||
|
128
kernel/bpf/btf.c
128
kernel/bpf/btf.c
@ -281,6 +281,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_VAR] = "VAR",
|
||||
[BTF_KIND_DATASEC] = "DATASEC",
|
||||
[BTF_KIND_FLOAT] = "FLOAT",
|
||||
[BTF_KIND_TAG] = "TAG",
|
||||
};
|
||||
|
||||
const char *btf_type_str(const struct btf_type *t)
|
||||
@ -459,6 +460,17 @@ static bool btf_type_is_datasec(const struct btf_type *t)
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
|
||||
}
|
||||
|
||||
static bool btf_type_is_tag(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_TAG;
|
||||
}
|
||||
|
||||
static bool btf_type_is_tag_target(const struct btf_type *t)
|
||||
{
|
||||
return btf_type_is_func(t) || btf_type_is_struct(t) ||
|
||||
btf_type_is_var(t);
|
||||
}
|
||||
|
||||
u32 btf_nr_types(const struct btf *btf)
|
||||
{
|
||||
u32 total = 0;
|
||||
@ -537,6 +549,7 @@ const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
|
||||
static bool btf_type_is_resolve_source_only(const struct btf_type *t)
|
||||
{
|
||||
return btf_type_is_var(t) ||
|
||||
btf_type_is_tag(t) ||
|
||||
btf_type_is_datasec(t);
|
||||
}
|
||||
|
||||
@ -563,6 +576,7 @@ static bool btf_type_needs_resolve(const struct btf_type *t)
|
||||
btf_type_is_struct(t) ||
|
||||
btf_type_is_array(t) ||
|
||||
btf_type_is_var(t) ||
|
||||
btf_type_is_tag(t) ||
|
||||
btf_type_is_datasec(t);
|
||||
}
|
||||
|
||||
@ -616,6 +630,11 @@ static const struct btf_var *btf_type_var(const struct btf_type *t)
|
||||
return (const struct btf_var *)(t + 1);
|
||||
}
|
||||
|
||||
static const struct btf_tag *btf_type_tag(const struct btf_type *t)
|
||||
{
|
||||
return (const struct btf_tag *)(t + 1);
|
||||
}
|
||||
|
||||
static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
|
||||
{
|
||||
return kind_ops[BTF_INFO_KIND(t->info)];
|
||||
@ -3801,6 +3820,110 @@ static const struct btf_kind_operations float_ops = {
|
||||
.show = btf_df_show,
|
||||
};
|
||||
|
||||
static s32 btf_tag_check_meta(struct btf_verifier_env *env,
|
||||
const struct btf_type *t,
|
||||
u32 meta_left)
|
||||
{
|
||||
const struct btf_tag *tag;
|
||||
u32 meta_needed = sizeof(*tag);
|
||||
s32 component_idx;
|
||||
const char *value;
|
||||
|
||||
if (meta_left < meta_needed) {
|
||||
btf_verifier_log_basic(env, t,
|
||||
"meta_left:%u meta_needed:%u",
|
||||
meta_left, meta_needed);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
value = btf_name_by_offset(env->btf, t->name_off);
|
||||
if (!value || !value[0]) {
|
||||
btf_verifier_log_type(env, t, "Invalid value");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (btf_type_vlen(t)) {
|
||||
btf_verifier_log_type(env, t, "vlen != 0");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (btf_type_kflag(t)) {
|
||||
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
component_idx = btf_type_tag(t)->component_idx;
|
||||
if (component_idx < -1) {
|
||||
btf_verifier_log_type(env, t, "Invalid component_idx");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
btf_verifier_log_type(env, t, NULL);
|
||||
|
||||
return meta_needed;
|
||||
}
|
||||
|
||||
static int btf_tag_resolve(struct btf_verifier_env *env,
|
||||
const struct resolve_vertex *v)
|
||||
{
|
||||
const struct btf_type *next_type;
|
||||
const struct btf_type *t = v->t;
|
||||
u32 next_type_id = t->type;
|
||||
struct btf *btf = env->btf;
|
||||
s32 component_idx;
|
||||
u32 vlen;
|
||||
|
||||
next_type = btf_type_by_id(btf, next_type_id);
|
||||
if (!next_type || !btf_type_is_tag_target(next_type)) {
|
||||
btf_verifier_log_type(env, v->t, "Invalid type_id");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!env_type_is_resolve_sink(env, next_type) &&
|
||||
!env_type_is_resolved(env, next_type_id))
|
||||
return env_stack_push(env, next_type, next_type_id);
|
||||
|
||||
component_idx = btf_type_tag(t)->component_idx;
|
||||
if (component_idx != -1) {
|
||||
if (btf_type_is_var(next_type)) {
|
||||
btf_verifier_log_type(env, v->t, "Invalid component_idx");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (btf_type_is_struct(next_type)) {
|
||||
vlen = btf_type_vlen(next_type);
|
||||
} else {
|
||||
/* next_type should be a function */
|
||||
next_type = btf_type_by_id(btf, next_type->type);
|
||||
vlen = btf_type_vlen(next_type);
|
||||
}
|
||||
|
||||
if ((u32)component_idx >= vlen) {
|
||||
btf_verifier_log_type(env, v->t, "Invalid component_idx");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
env_stack_pop_resolved(env, next_type_id, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void btf_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
|
||||
{
|
||||
btf_verifier_log(env, "type=%u component_idx=%d", t->type,
|
||||
btf_type_tag(t)->component_idx);
|
||||
}
|
||||
|
||||
static const struct btf_kind_operations tag_ops = {
|
||||
.check_meta = btf_tag_check_meta,
|
||||
.resolve = btf_tag_resolve,
|
||||
.check_member = btf_df_check_member,
|
||||
.check_kflag_member = btf_df_check_kflag_member,
|
||||
.log_details = btf_tag_log,
|
||||
.show = btf_df_show,
|
||||
};
|
||||
|
||||
static int btf_func_proto_check(struct btf_verifier_env *env,
|
||||
const struct btf_type *t)
|
||||
{
|
||||
@ -3935,6 +4058,7 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_VAR] = &var_ops,
|
||||
[BTF_KIND_DATASEC] = &datasec_ops,
|
||||
[BTF_KIND_FLOAT] = &float_ops,
|
||||
[BTF_KIND_TAG] = &tag_ops,
|
||||
};
|
||||
|
||||
static s32 btf_check_meta(struct btf_verifier_env *env,
|
||||
@ -4019,6 +4143,10 @@ static bool btf_resolve_valid(struct btf_verifier_env *env,
|
||||
return !btf_resolved_type_id(btf, type_id) &&
|
||||
!btf_resolved_type_size(btf, type_id);
|
||||
|
||||
if (btf_type_is_tag(t))
|
||||
return btf_resolved_type_id(btf, type_id) &&
|
||||
!btf_resolved_type_size(btf, type_id);
|
||||
|
||||
if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
|
||||
btf_type_is_var(t)) {
|
||||
t = btf_type_id_resolve(btf, &type_id);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/rcupdate_trace.h>
|
||||
#include <linux/rcupdate_wait.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/static_call.h>
|
||||
|
||||
/* dummy _ops. The verifier will operate on target program's ops. */
|
||||
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
|
||||
@ -526,7 +527,7 @@ out:
|
||||
}
|
||||
|
||||
#define NO_START_TIME 1
|
||||
static u64 notrace bpf_prog_start_time(void)
|
||||
static __always_inline u64 notrace bpf_prog_start_time(void)
|
||||
{
|
||||
u64 start = NO_START_TIME;
|
||||
|
||||
|
@ -13435,3 +13435,5 @@ struct cgroup_subsys perf_event_cgrp_subsys = {
|
||||
.threaded = true,
|
||||
};
|
||||
#endif /* CONFIG_CGROUP_PERF */
|
||||
|
||||
DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
|
||||
|
@ -1017,6 +1017,34 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
|
||||
{
|
||||
#ifndef CONFIG_X86
|
||||
return -ENOENT;
|
||||
#else
|
||||
static const u32 br_entry_size = sizeof(struct perf_branch_entry);
|
||||
u32 entry_cnt = size / br_entry_size;
|
||||
|
||||
entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
|
||||
|
||||
if (unlikely(flags))
|
||||
return -EINVAL;
|
||||
|
||||
if (!entry_cnt)
|
||||
return -ENOENT;
|
||||
|
||||
return entry_cnt * br_entry_size;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
|
||||
.func = bpf_get_branch_snapshot,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
@ -1132,6 +1160,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_snprintf_proto;
|
||||
case BPF_FUNC_get_func_ip:
|
||||
return &bpf_get_func_ip_proto_tracing;
|
||||
case BPF_FUNC_get_branch_snapshot:
|
||||
return &bpf_get_branch_snapshot_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
|
@ -8800,6 +8800,7 @@ static __init struct sk_buff *build_test_skb(void)
|
||||
skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY;
|
||||
skb_shinfo(skb[0])->gso_segs = 0;
|
||||
skb_shinfo(skb[0])->frag_list = skb[1];
|
||||
skb_shinfo(skb[0])->hwtstamps.hwtstamp = 1000;
|
||||
|
||||
/* adjust skb[0]'s len */
|
||||
skb[0]->len += skb[1]->len;
|
||||
|
@ -483,11 +483,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
|
||||
return -EINVAL;
|
||||
|
||||
/* priority is allowed */
|
||||
|
||||
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
|
||||
offsetof(struct __sk_buff, ifindex)))
|
||||
return -EINVAL;
|
||||
|
||||
/* ingress_ifindex is allowed */
|
||||
/* ifindex is allowed */
|
||||
|
||||
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
|
||||
@ -511,11 +507,18 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
|
||||
/* gso_size is allowed */
|
||||
|
||||
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
|
||||
offsetof(struct __sk_buff, hwtstamp)))
|
||||
return -EINVAL;
|
||||
|
||||
/* hwtstamp is allowed */
|
||||
|
||||
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
|
||||
sizeof(struct __sk_buff)))
|
||||
return -EINVAL;
|
||||
|
||||
skb->mark = __skb->mark;
|
||||
skb->priority = __skb->priority;
|
||||
skb->skb_iif = __skb->ingress_ifindex;
|
||||
skb->tstamp = __skb->tstamp;
|
||||
memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
|
||||
|
||||
@ -532,6 +535,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
|
||||
return -EINVAL;
|
||||
skb_shinfo(skb)->gso_segs = __skb->gso_segs;
|
||||
skb_shinfo(skb)->gso_size = __skb->gso_size;
|
||||
skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -545,11 +549,13 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
|
||||
|
||||
__skb->mark = skb->mark;
|
||||
__skb->priority = skb->priority;
|
||||
__skb->ingress_ifindex = skb->skb_iif;
|
||||
__skb->ifindex = skb->dev->ifindex;
|
||||
__skb->tstamp = skb->tstamp;
|
||||
memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
|
||||
__skb->wire_len = cb->pkt_len;
|
||||
__skb->gso_segs = skb_shinfo(skb)->gso_segs;
|
||||
__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
|
||||
}
|
||||
|
||||
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
|
@ -7765,6 +7765,10 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
|
||||
break;
|
||||
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
|
||||
return false;
|
||||
case bpf_ctx_range(struct __sk_buff, hwtstamp):
|
||||
if (type == BPF_WRITE || size != sizeof(__u64))
|
||||
return false;
|
||||
break;
|
||||
case bpf_ctx_range(struct __sk_buff, tstamp):
|
||||
if (size != sizeof(__u64))
|
||||
return false;
|
||||
@ -7774,6 +7778,9 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
|
||||
return false;
|
||||
info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
|
||||
break;
|
||||
case offsetofend(struct __sk_buff, gso_size) ... offsetof(struct __sk_buff, hwtstamp) - 1:
|
||||
/* Explicitly prohibit access to padding in __sk_buff. */
|
||||
return false;
|
||||
default:
|
||||
/* Only narrow read access allowed for now. */
|
||||
if (type == BPF_WRITE) {
|
||||
@ -7802,6 +7809,7 @@ static bool sk_filter_is_valid_access(int off, int size,
|
||||
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
|
||||
case bpf_ctx_range(struct __sk_buff, tstamp):
|
||||
case bpf_ctx_range(struct __sk_buff, wire_len):
|
||||
case bpf_ctx_range(struct __sk_buff, hwtstamp):
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -7872,6 +7880,7 @@ static bool lwt_is_valid_access(int off, int size,
|
||||
case bpf_ctx_range(struct __sk_buff, data_meta):
|
||||
case bpf_ctx_range(struct __sk_buff, tstamp):
|
||||
case bpf_ctx_range(struct __sk_buff, wire_len):
|
||||
case bpf_ctx_range(struct __sk_buff, hwtstamp):
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -8373,6 +8382,7 @@ static bool sk_skb_is_valid_access(int off, int size,
|
||||
case bpf_ctx_range(struct __sk_buff, data_meta):
|
||||
case bpf_ctx_range(struct __sk_buff, tstamp):
|
||||
case bpf_ctx_range(struct __sk_buff, wire_len):
|
||||
case bpf_ctx_range(struct __sk_buff, hwtstamp):
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -8884,6 +8894,17 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
|
||||
si->dst_reg, si->src_reg,
|
||||
offsetof(struct sk_buff, sk));
|
||||
break;
|
||||
case offsetof(struct __sk_buff, hwtstamp):
|
||||
BUILD_BUG_ON(sizeof_field(struct skb_shared_hwtstamps, hwtstamp) != 8);
|
||||
BUILD_BUG_ON(offsetof(struct skb_shared_hwtstamps, hwtstamp) != 0);
|
||||
|
||||
insn = bpf_convert_shinfo_access(si, insn);
|
||||
*insn++ = BPF_LDX_MEM(BPF_DW,
|
||||
si->dst_reg, si->dst_reg,
|
||||
bpf_target_off(struct skb_shared_info,
|
||||
hwtstamps, 8,
|
||||
target_size));
|
||||
break;
|
||||
}
|
||||
|
||||
return insn - insn_buf;
|
||||
|
@ -137,7 +137,10 @@ endif
|
||||
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
|
||||
|
||||
BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o)
|
||||
$(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP)
|
||||
|
||||
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
|
||||
$(OBJS): $(LIBBPF)
|
||||
|
||||
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
|
||||
$(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
|
||||
|
@ -37,6 +37,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_VAR] = "VAR",
|
||||
[BTF_KIND_DATASEC] = "DATASEC",
|
||||
[BTF_KIND_FLOAT] = "FLOAT",
|
||||
[BTF_KIND_TAG] = "TAG",
|
||||
};
|
||||
|
||||
struct btf_attach_table {
|
||||
@ -347,6 +348,17 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
|
||||
printf(" size=%u", t->size);
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_TAG: {
|
||||
const struct btf_tag *tag = (const void *)(t + 1);
|
||||
|
||||
if (json_output) {
|
||||
jsonw_uint_field(w, "type_id", t->type);
|
||||
jsonw_int_field(w, "component_idx", tag->component_idx);
|
||||
} else {
|
||||
printf(" type_id=%u component_idx=%d", t->type, tag->component_idx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -238,8 +238,8 @@ static void codegen(const char *template, ...)
|
||||
} else if (c == '\n') {
|
||||
break;
|
||||
} else {
|
||||
p_err("unrecognized character at pos %td in template '%s'",
|
||||
src - template - 1, template);
|
||||
p_err("unrecognized character at pos %td in template '%s': '%c'",
|
||||
src - template - 1, template, c);
|
||||
free(s);
|
||||
exit(-1);
|
||||
}
|
||||
@ -406,7 +406,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
|
||||
}
|
||||
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
const char * ident;
|
||||
const char *ident;
|
||||
|
||||
ident = get_map_ident(map);
|
||||
if (!ident)
|
||||
@ -862,6 +862,8 @@ static int do_skeleton(int argc, char **argv)
|
||||
codegen("\
|
||||
\n\
|
||||
\n\
|
||||
static inline const void *%1$s__elf_bytes(size_t *sz); \n\
|
||||
\n\
|
||||
static inline int \n\
|
||||
%1$s__create_skeleton(struct %1$s *obj) \n\
|
||||
{ \n\
|
||||
@ -943,10 +945,20 @@ static int do_skeleton(int argc, char **argv)
|
||||
codegen("\
|
||||
\n\
|
||||
\n\
|
||||
s->data_sz = %d; \n\
|
||||
s->data = (void *)\"\\ \n\
|
||||
",
|
||||
file_sz);
|
||||
s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
|
||||
\n\
|
||||
return 0; \n\
|
||||
err: \n\
|
||||
bpf_object__destroy_skeleton(s); \n\
|
||||
return -ENOMEM; \n\
|
||||
} \n\
|
||||
\n\
|
||||
static inline const void *%2$s__elf_bytes(size_t *sz) \n\
|
||||
{ \n\
|
||||
*sz = %1$d; \n\
|
||||
return (const void *)\"\\ \n\
|
||||
"
|
||||
, file_sz, obj_name);
|
||||
|
||||
/* embed contents of BPF object file */
|
||||
print_hex(obj_data, file_sz);
|
||||
@ -954,11 +966,6 @@ static int do_skeleton(int argc, char **argv)
|
||||
codegen("\
|
||||
\n\
|
||||
\"; \n\
|
||||
\n\
|
||||
return 0; \n\
|
||||
err: \n\
|
||||
bpf_object__destroy_skeleton(s); \n\
|
||||
return -ENOMEM; \n\
|
||||
} \n\
|
||||
\n\
|
||||
#endif /* %s */ \n\
|
||||
|
@ -26,6 +26,7 @@ LIBBPF_SRC := $(srctree)/tools/lib/bpf/
|
||||
SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
|
||||
|
||||
BPFOBJ := $(OUTPUT)/libbpf/libbpf.a
|
||||
LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/
|
||||
SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a
|
||||
|
||||
BINARY := $(OUTPUT)/resolve_btfids
|
||||
@ -41,7 +42,7 @@ $(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
|
||||
$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
|
||||
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) $(abspath $@)
|
||||
|
||||
CFLAGS := -g \
|
||||
-I$(srctree)/tools/include \
|
||||
@ -54,7 +55,7 @@ LIBS = -lelf -lz
|
||||
export srctree OUTPUT CFLAGS Q
|
||||
include $(srctree)/tools/build/Makefile.include
|
||||
|
||||
$(BINARY_IN): fixdep FORCE | $(OUTPUT)
|
||||
$(BINARY_IN): $(BPFOBJ) fixdep FORCE | $(OUTPUT)
|
||||
$(Q)$(MAKE) $(build)=resolve_btfids
|
||||
|
||||
$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
|
||||
|
@ -1629,7 +1629,7 @@ union bpf_attr {
|
||||
* u32 bpf_get_smp_processor_id(void)
|
||||
* Description
|
||||
* Get the SMP (symmetric multiprocessing) processor id. Note that
|
||||
* all programs run with preemption disabled, which means that the
|
||||
* all programs run with migration disabled, which means that the
|
||||
* SMP processor id is stable during all the execution of the
|
||||
* program.
|
||||
* Return
|
||||
@ -4877,6 +4877,27 @@ union bpf_attr {
|
||||
* Get the struct pt_regs associated with **task**.
|
||||
* Return
|
||||
* A pointer to struct pt_regs.
|
||||
*
|
||||
* long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
|
||||
* Description
|
||||
* Get branch trace from hardware engines like Intel LBR. The
|
||||
* hardware engine is stopped shortly after the helper is
|
||||
* called. Therefore, the user need to filter branch entries
|
||||
* based on the actual use case. To capture branch trace
|
||||
* before the trigger point of the BPF program, the helper
|
||||
* should be called at the beginning of the BPF program.
|
||||
*
|
||||
* The data is stored as struct perf_branch_entry into output
|
||||
* buffer *entries*. *size* is the size of *entries* in bytes.
|
||||
* *flags* is reserved for now and must be zero.
|
||||
*
|
||||
* Return
|
||||
* On success, number of bytes written to *buf*. On error, a
|
||||
* negative value.
|
||||
*
|
||||
* **-EINVAL** if *flags* is not zero.
|
||||
*
|
||||
* **-ENOENT** if architecture does not support branch records.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
@ -5055,6 +5076,7 @@ union bpf_attr {
|
||||
FN(get_func_ip), \
|
||||
FN(get_attach_cookie), \
|
||||
FN(task_pt_regs), \
|
||||
FN(get_branch_snapshot), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
@ -5284,6 +5306,8 @@ struct __sk_buff {
|
||||
__u32 gso_segs;
|
||||
__bpf_md_ptr(struct bpf_sock *, sk);
|
||||
__u32 gso_size;
|
||||
__u32 :32; /* Padding, future use. */
|
||||
__u64 hwtstamp;
|
||||
};
|
||||
|
||||
struct bpf_tunnel_key {
|
||||
|
@ -43,7 +43,7 @@ struct btf_type {
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
* FUNC, FUNC_PROTO and VAR.
|
||||
* FUNC, FUNC_PROTO, VAR and TAG.
|
||||
* "type" is a type_id referring to another type.
|
||||
*/
|
||||
union {
|
||||
@ -56,25 +56,29 @@ struct btf_type {
|
||||
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
|
||||
#define BTF_INFO_KFLAG(info) ((info) >> 31)
|
||||
|
||||
#define BTF_KIND_UNKN 0 /* Unknown */
|
||||
#define BTF_KIND_INT 1 /* Integer */
|
||||
#define BTF_KIND_PTR 2 /* Pointer */
|
||||
#define BTF_KIND_ARRAY 3 /* Array */
|
||||
#define BTF_KIND_STRUCT 4 /* Struct */
|
||||
#define BTF_KIND_UNION 5 /* Union */
|
||||
#define BTF_KIND_ENUM 6 /* Enumeration */
|
||||
#define BTF_KIND_FWD 7 /* Forward */
|
||||
#define BTF_KIND_TYPEDEF 8 /* Typedef */
|
||||
#define BTF_KIND_VOLATILE 9 /* Volatile */
|
||||
#define BTF_KIND_CONST 10 /* Const */
|
||||
#define BTF_KIND_RESTRICT 11 /* Restrict */
|
||||
#define BTF_KIND_FUNC 12 /* Function */
|
||||
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
|
||||
#define BTF_KIND_VAR 14 /* Variable */
|
||||
#define BTF_KIND_DATASEC 15 /* Section */
|
||||
#define BTF_KIND_FLOAT 16 /* Floating point */
|
||||
#define BTF_KIND_MAX BTF_KIND_FLOAT
|
||||
#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
|
||||
enum {
|
||||
BTF_KIND_UNKN = 0, /* Unknown */
|
||||
BTF_KIND_INT = 1, /* Integer */
|
||||
BTF_KIND_PTR = 2, /* Pointer */
|
||||
BTF_KIND_ARRAY = 3, /* Array */
|
||||
BTF_KIND_STRUCT = 4, /* Struct */
|
||||
BTF_KIND_UNION = 5, /* Union */
|
||||
BTF_KIND_ENUM = 6, /* Enumeration */
|
||||
BTF_KIND_FWD = 7, /* Forward */
|
||||
BTF_KIND_TYPEDEF = 8, /* Typedef */
|
||||
BTF_KIND_VOLATILE = 9, /* Volatile */
|
||||
BTF_KIND_CONST = 10, /* Const */
|
||||
BTF_KIND_RESTRICT = 11, /* Restrict */
|
||||
BTF_KIND_FUNC = 12, /* Function */
|
||||
BTF_KIND_FUNC_PROTO = 13, /* Function Proto */
|
||||
BTF_KIND_VAR = 14, /* Variable */
|
||||
BTF_KIND_DATASEC = 15, /* Section */
|
||||
BTF_KIND_FLOAT = 16, /* Floating point */
|
||||
BTF_KIND_TAG = 17, /* Tag */
|
||||
|
||||
NR_BTF_KINDS,
|
||||
BTF_KIND_MAX = NR_BTF_KINDS - 1,
|
||||
};
|
||||
|
||||
/* For some specific BTF_KIND, "struct btf_type" is immediately
|
||||
* followed by extra data.
|
||||
@ -170,4 +174,15 @@ struct btf_var_secinfo {
|
||||
__u32 size;
|
||||
};
|
||||
|
||||
/* BTF_KIND_TAG is followed by a single "struct btf_tag" to describe
|
||||
* additional information related to the tag applied location.
|
||||
* If component_idx == -1, the tag is applied to a struct, union,
|
||||
* variable or function. Otherwise, it is applied to a struct/union
|
||||
* member or a func argument, and component_idx indicates which member
|
||||
* or argument (0 ... vlen-1).
|
||||
*/
|
||||
struct btf_tag {
|
||||
__s32 component_idx;
|
||||
};
|
||||
|
||||
#endif /* _UAPI__LINUX_BTF_H__ */
|
||||
|
1
tools/lib/bpf/.gitignore
vendored
1
tools/lib/bpf/.gitignore
vendored
@ -1,5 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
libbpf_version.h
|
||||
libbpf.pc
|
||||
libbpf.so.*
|
||||
TAGS
|
||||
|
@ -8,7 +8,8 @@ VERSION_SCRIPT := libbpf.map
|
||||
LIBBPF_VERSION := $(shell \
|
||||
grep -oE '^LIBBPF_([0-9.]+)' $(VERSION_SCRIPT) | \
|
||||
sort -rV | head -n1 | cut -d'_' -f2)
|
||||
LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
|
||||
LIBBPF_MAJOR_VERSION := $(word 1,$(subst ., ,$(LIBBPF_VERSION)))
|
||||
LIBBPF_MINOR_VERSION := $(word 2,$(subst ., ,$(LIBBPF_VERSION)))
|
||||
|
||||
MAKEFLAGS += --no-print-directory
|
||||
|
||||
@ -59,7 +60,8 @@ ifndef VERBOSE
|
||||
VERBOSE = 0
|
||||
endif
|
||||
|
||||
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
|
||||
INCLUDES = -I$(if $(OUTPUT),$(OUTPUT),.) \
|
||||
-I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
|
||||
|
||||
export prefix libdir src obj
|
||||
|
||||
@ -112,6 +114,7 @@ STATIC_OBJDIR := $(OUTPUT)staticobjs/
|
||||
BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o
|
||||
BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o
|
||||
BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h
|
||||
BPF_GENERATED := $(BPF_HELPER_DEFS)
|
||||
|
||||
LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
|
||||
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
|
||||
@ -136,7 +139,7 @@ all: fixdep
|
||||
|
||||
all_cmd: $(CMD_TARGETS) check
|
||||
|
||||
$(BPF_IN_SHARED): force $(BPF_HELPER_DEFS)
|
||||
$(BPF_IN_SHARED): force $(BPF_GENERATED)
|
||||
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
|
||||
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
|
||||
@ -154,7 +157,7 @@ $(BPF_IN_SHARED): force $(BPF_HELPER_DEFS)
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
|
||||
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
|
||||
|
||||
$(BPF_IN_STATIC): force $(BPF_HELPER_DEFS)
|
||||
$(BPF_IN_STATIC): force $(BPF_GENERATED)
|
||||
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
|
||||
|
||||
$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
|
||||
@ -179,7 +182,7 @@ $(OUTPUT)libbpf.pc:
|
||||
-e "s|@VERSION@|$(LIBBPF_VERSION)|" \
|
||||
< libbpf.pc.template > $@
|
||||
|
||||
check: check_abi
|
||||
check: check_abi check_version
|
||||
|
||||
check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
|
||||
@if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \
|
||||
@ -205,6 +208,21 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
HDR_MAJ_VERSION := $(shell grep -oE '^\#define LIBBPF_MAJOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
|
||||
HDR_MIN_VERSION := $(shell grep -oE '^\#define LIBBPF_MINOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
|
||||
|
||||
check_version: $(VERSION_SCRIPT) libbpf_version.h
|
||||
@if [ "$(HDR_MAJ_VERSION)" != "$(LIBBPF_MAJOR_VERSION)" ]; then \
|
||||
echo "Error: libbpf major version mismatch detected: " \
|
||||
"'$(HDR_MAJ_VERSION)' != '$(LIBBPF_MAJOR_VERSION)'" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if [ "$(HDR_MIN_VERSION)" != "$(LIBBPF_MINOR_VERSION)" ]; then \
|
||||
echo "Error: libbpf minor version mismatch detected: " \
|
||||
"'$(HDR_MIN_VERSION)' != '$(LIBBPF_MINOR_VERSION)'" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
define do_install_mkdir
|
||||
if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
|
||||
@ -224,10 +242,11 @@ install_lib: all_cmd
|
||||
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
|
||||
|
||||
INSTALL_HEADERS = bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
|
||||
bpf_helpers.h $(BPF_HELPER_DEFS) bpf_tracing.h \
|
||||
bpf_endian.h bpf_core_read.h skel_internal.h
|
||||
bpf_helpers.h $(BPF_GENERATED) bpf_tracing.h \
|
||||
bpf_endian.h bpf_core_read.h skel_internal.h \
|
||||
libbpf_version.h
|
||||
|
||||
install_headers: $(BPF_HELPER_DEFS)
|
||||
install_headers: $(BPF_GENERATED)
|
||||
$(call QUIET_INSTALL, headers) \
|
||||
$(foreach hdr,$(INSTALL_HEADERS), \
|
||||
$(call do_install,$(hdr),$(prefix)/include/bpf,644);)
|
||||
@ -240,12 +259,12 @@ install: install_lib install_pkgconfig install_headers
|
||||
|
||||
clean:
|
||||
$(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
|
||||
*~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \
|
||||
*~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_GENERATED) \
|
||||
$(SHARED_OBJDIR) $(STATIC_OBJDIR) \
|
||||
$(addprefix $(OUTPUT), \
|
||||
*.o *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) *.pc)
|
||||
|
||||
PHONY += force cscope tags
|
||||
PHONY += force cscope tags check check_abi check_version
|
||||
force:
|
||||
|
||||
cscope:
|
||||
|
@ -304,6 +304,8 @@ static int btf_type_size(const struct btf_type *t)
|
||||
return base_size + sizeof(struct btf_var);
|
||||
case BTF_KIND_DATASEC:
|
||||
return base_size + vlen * sizeof(struct btf_var_secinfo);
|
||||
case BTF_KIND_TAG:
|
||||
return base_size + sizeof(struct btf_tag);
|
||||
default:
|
||||
pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
|
||||
return -EINVAL;
|
||||
@ -376,6 +378,9 @@ static int btf_bswap_type_rest(struct btf_type *t)
|
||||
v->size = bswap_32(v->size);
|
||||
}
|
||||
return 0;
|
||||
case BTF_KIND_TAG:
|
||||
btf_tag(t)->component_idx = bswap_32(btf_tag(t)->component_idx);
|
||||
return 0;
|
||||
default:
|
||||
pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
|
||||
return -EINVAL;
|
||||
@ -586,6 +591,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_TAG:
|
||||
type_id = t->type;
|
||||
break;
|
||||
case BTF_KIND_ARRAY:
|
||||
@ -2440,6 +2446,48 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_TAG type with:
|
||||
* - *value* - non-empty/non-NULL string;
|
||||
* - *ref_type_id* - referenced type ID, it might not exist yet;
|
||||
* - *component_idx* - -1 for tagging reference type, otherwise struct/union
|
||||
* member or function argument index;
|
||||
* Returns:
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
int component_idx)
|
||||
{
|
||||
struct btf_type *t;
|
||||
int sz, value_off;
|
||||
|
||||
if (!value || !value[0] || component_idx < -1)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (validate_type_id(ref_type_id))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
sz = sizeof(struct btf_type) + sizeof(struct btf_tag);
|
||||
t = btf_add_type_mem(btf, sz);
|
||||
if (!t)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
value_off = btf__add_str(btf, value);
|
||||
if (value_off < 0)
|
||||
return value_off;
|
||||
|
||||
t->name_off = value_off;
|
||||
t->info = btf_type_info(BTF_KIND_TAG, 0, false);
|
||||
t->type = ref_type_id;
|
||||
btf_tag(t)->component_idx = component_idx;
|
||||
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
|
||||
struct btf_ext_sec_setup_param {
|
||||
__u32 off;
|
||||
__u32 len;
|
||||
@ -3256,8 +3304,8 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
|
||||
t1->size == t2->size;
|
||||
}
|
||||
|
||||
/* Calculate type signature hash of INT. */
|
||||
static long btf_hash_int(struct btf_type *t)
|
||||
/* Calculate type signature hash of INT or TAG. */
|
||||
static long btf_hash_int_tag(struct btf_type *t)
|
||||
{
|
||||
__u32 info = *(__u32 *)(t + 1);
|
||||
long h;
|
||||
@ -3267,8 +3315,8 @@ static long btf_hash_int(struct btf_type *t)
|
||||
return h;
|
||||
}
|
||||
|
||||
/* Check structural equality of two INTs. */
|
||||
static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
|
||||
/* Check structural equality of two INTs or TAGs. */
|
||||
static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
|
||||
{
|
||||
__u32 info1, info2;
|
||||
|
||||
@ -3535,7 +3583,8 @@ static int btf_dedup_prep(struct btf_dedup *d)
|
||||
h = btf_hash_common(t);
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
h = btf_hash_int(t);
|
||||
case BTF_KIND_TAG:
|
||||
h = btf_hash_int_tag(t);
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
h = btf_hash_enum(t);
|
||||
@ -3590,14 +3639,15 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DATASEC:
|
||||
case BTF_KIND_TAG:
|
||||
return 0;
|
||||
|
||||
case BTF_KIND_INT:
|
||||
h = btf_hash_int(t);
|
||||
h = btf_hash_int_tag(t);
|
||||
for_each_dedup_cand(d, hash_entry, h) {
|
||||
cand_id = (__u32)(long)hash_entry->value;
|
||||
cand = btf_type_by_id(d->btf, cand_id);
|
||||
if (btf_equal_int(t, cand)) {
|
||||
if (btf_equal_int_tag(t, cand)) {
|
||||
new_id = cand_id;
|
||||
break;
|
||||
}
|
||||
@ -3881,7 +3931,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
|
||||
|
||||
switch (cand_kind) {
|
||||
case BTF_KIND_INT:
|
||||
return btf_equal_int(cand_type, canon_type);
|
||||
return btf_equal_int_tag(cand_type, canon_type);
|
||||
|
||||
case BTF_KIND_ENUM:
|
||||
if (d->opts.dont_resolve_fwds)
|
||||
@ -4210,6 +4260,23 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
|
||||
}
|
||||
break;
|
||||
|
||||
case BTF_KIND_TAG:
|
||||
ref_type_id = btf_dedup_ref_type(d, t->type);
|
||||
if (ref_type_id < 0)
|
||||
return ref_type_id;
|
||||
t->type = ref_type_id;
|
||||
|
||||
h = btf_hash_int_tag(t);
|
||||
for_each_dedup_cand(d, hash_entry, h) {
|
||||
cand_id = (__u32)(long)hash_entry->value;
|
||||
cand = btf_type_by_id(d->btf, cand_id);
|
||||
if (btf_equal_int_tag(t, cand)) {
|
||||
new_id = cand_id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case BTF_KIND_ARRAY: {
|
||||
struct btf_array *info = btf_array(t);
|
||||
|
||||
@ -4482,6 +4549,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_TAG:
|
||||
return visit(&t->type, ctx);
|
||||
|
||||
case BTF_KIND_ARRAY: {
|
||||
|
@ -1,5 +1,6 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
/* Copyright (c) 2018 Facebook */
|
||||
/*! \file */
|
||||
|
||||
#ifndef __LIBBPF_BTF_H
|
||||
#define __LIBBPF_BTF_H
|
||||
@ -30,11 +31,80 @@ enum btf_endianness {
|
||||
BTF_BIG_ENDIAN = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief **btf__free()** frees all data of a BTF object
|
||||
* @param btf BTF object to free
|
||||
*/
|
||||
LIBBPF_API void btf__free(struct btf *btf);
|
||||
|
||||
/**
|
||||
* @brief **btf__new()** creates a new instance of a BTF object from the raw
|
||||
* bytes of an ELF's BTF section
|
||||
* @param data raw bytes
|
||||
* @param size number of bytes passed in `data`
|
||||
* @return new BTF object instance which has to be eventually freed with
|
||||
* **btf__free()**
|
||||
*
|
||||
* On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
|
||||
* error code from such a pointer `libbpf_get_error()` should be used. If
|
||||
* `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
|
||||
* returned on error instead. In both cases thread-local `errno` variable is
|
||||
* always set to error code as well.
|
||||
*/
|
||||
LIBBPF_API struct btf *btf__new(const void *data, __u32 size);
|
||||
|
||||
/**
|
||||
* @brief **btf__new_split()** create a new instance of a BTF object from the
|
||||
* provided raw data bytes. It takes another BTF instance, **base_btf**, which
|
||||
* serves as a base BTF, which is extended by types in a newly created BTF
|
||||
* instance
|
||||
* @param data raw bytes
|
||||
* @param size length of raw bytes
|
||||
* @param base_btf the base BTF object
|
||||
* @return new BTF object instance which has to be eventually freed with
|
||||
* **btf__free()**
|
||||
*
|
||||
* If *base_btf* is NULL, `btf__new_split()` is equivalent to `btf__new()` and
|
||||
* creates non-split BTF.
|
||||
*
|
||||
* On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
|
||||
* error code from such a pointer `libbpf_get_error()` should be used. If
|
||||
* `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
|
||||
* returned on error instead. In both cases thread-local `errno` variable is
|
||||
* always set to error code as well.
|
||||
*/
|
||||
LIBBPF_API struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf);
|
||||
|
||||
/**
|
||||
* @brief **btf__new_empty()** creates an empty BTF object. Use
|
||||
* `btf__add_*()` to populate such BTF object.
|
||||
* @return new BTF object instance which has to be eventually freed with
|
||||
* **btf__free()**
|
||||
*
|
||||
* On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
|
||||
* error code from such a pointer `libbpf_get_error()` should be used. If
|
||||
* `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
|
||||
* returned on error instead. In both cases thread-local `errno` variable is
|
||||
* always set to error code as well.
|
||||
*/
|
||||
LIBBPF_API struct btf *btf__new_empty(void);
|
||||
|
||||
/**
|
||||
* @brief **btf__new_empty_split()** creates an unpopulated BTF object from an
|
||||
* ELF BTF section except with a base BTF on top of which split BTF should be
|
||||
* based
|
||||
* @return new BTF object instance which has to be eventually freed with
|
||||
* **btf__free()**
|
||||
*
|
||||
* If *base_btf* is NULL, `btf__new_empty_split()` is equivalent to
|
||||
* `btf__new_empty()` and creates non-split BTF.
|
||||
*
|
||||
* On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
|
||||
* error code from such a pointer `libbpf_get_error()` should be used. If
|
||||
* `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
|
||||
* returned on error instead. In both cases thread-local `errno` variable is
|
||||
* always set to error code as well.
|
||||
*/
|
||||
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
|
||||
|
||||
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
|
||||
@ -50,9 +120,11 @@ LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
|
||||
|
||||
LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id);
|
||||
LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead")
|
||||
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
|
||||
|
||||
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead")
|
||||
LIBBPF_API int btf__load(struct btf *btf);
|
||||
LIBBPF_API int btf__load_into_kernel(struct btf *btf);
|
||||
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
|
||||
@ -141,6 +213,10 @@ LIBBPF_API int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz
|
||||
LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id,
|
||||
__u32 offset, __u32 byte_sz);
|
||||
|
||||
/* tag construction API */
|
||||
LIBBPF_API int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
int component_idx);
|
||||
|
||||
struct btf_dedup_opts {
|
||||
unsigned int dedup_table_size;
|
||||
bool dont_resolve_fwds;
|
||||
@ -328,6 +404,11 @@ static inline bool btf_is_float(const struct btf_type *t)
|
||||
return btf_kind(t) == BTF_KIND_FLOAT;
|
||||
}
|
||||
|
||||
static inline bool btf_is_tag(const struct btf_type *t)
|
||||
{
|
||||
return btf_kind(t) == BTF_KIND_TAG;
|
||||
}
|
||||
|
||||
static inline __u8 btf_int_encoding(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INT_ENCODING(*(__u32 *)(t + 1));
|
||||
@ -396,6 +477,12 @@ btf_var_secinfos(const struct btf_type *t)
|
||||
return (struct btf_var_secinfo *)(t + 1);
|
||||
}
|
||||
|
||||
struct btf_tag;
|
||||
static inline struct btf_tag *btf_tag(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_tag *)(t + 1);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* extern "C" */
|
||||
#endif
|
||||
|
@ -316,6 +316,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_TAG:
|
||||
d->type_states[t->type].referenced = 1;
|
||||
break;
|
||||
|
||||
@ -583,6 +584,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DATASEC:
|
||||
case BTF_KIND_TAG:
|
||||
d->type_states[id].order_state = ORDERED;
|
||||
return 0;
|
||||
|
||||
@ -2215,6 +2217,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
case BTF_KIND_TAG:
|
||||
err = btf_dump_unsupported_data(d, t, id);
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
|
@ -195,6 +195,8 @@ enum kern_feature_id {
|
||||
FEAT_BTF_FLOAT,
|
||||
/* BPF perf link support */
|
||||
FEAT_PERF_LINK,
|
||||
/* BTF_KIND_TAG support */
|
||||
FEAT_BTF_TAG,
|
||||
__FEAT_CNT,
|
||||
};
|
||||
|
||||
@ -218,8 +220,7 @@ struct reloc_desc {
|
||||
|
||||
struct bpf_sec_def;
|
||||
|
||||
typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog);
|
||||
|
||||
struct bpf_sec_def {
|
||||
const char *sec;
|
||||
@ -1987,6 +1988,7 @@ static const char *__btf_kind_str(__u16 kind)
|
||||
case BTF_KIND_VAR: return "var";
|
||||
case BTF_KIND_DATASEC: return "datasec";
|
||||
case BTF_KIND_FLOAT: return "float";
|
||||
case BTF_KIND_TAG: return "tag";
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
@ -2486,8 +2488,9 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
|
||||
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
|
||||
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
|
||||
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
|
||||
bool has_tag = kernel_supports(obj, FEAT_BTF_TAG);
|
||||
|
||||
return !has_func || !has_datasec || !has_func_global || !has_float;
|
||||
return !has_func || !has_datasec || !has_func_global || !has_float || !has_tag;
|
||||
}
|
||||
|
||||
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
@ -2496,14 +2499,15 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
|
||||
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
|
||||
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
|
||||
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
|
||||
bool has_tag = kernel_supports(obj, FEAT_BTF_TAG);
|
||||
struct btf_type *t;
|
||||
int i, j, vlen;
|
||||
|
||||
for (i = 1; i <= btf__get_nr_types(btf); i++) {
|
||||
t = (struct btf_type *)btf__type_by_id(btf, i);
|
||||
|
||||
if (!has_datasec && btf_is_var(t)) {
|
||||
/* replace VAR with INT */
|
||||
if ((!has_datasec && btf_is_var(t)) || (!has_tag && btf_is_tag(t))) {
|
||||
/* replace VAR/TAG with INT */
|
||||
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
|
||||
/*
|
||||
* using size = 1 is the safest choice, 4 will be too
|
||||
@ -2993,6 +2997,12 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
if (!obj->efile.symbols) {
|
||||
pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
|
||||
obj->path);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
scn = NULL;
|
||||
while ((scn = elf_nextscn(elf, scn)) != NULL) {
|
||||
idx++;
|
||||
@ -4207,6 +4217,23 @@ static int probe_kern_btf_float(void)
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_btf_tag(void)
|
||||
{
|
||||
static const char strs[] = "\0tag";
|
||||
__u32 types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* VAR x */ /* [2] */
|
||||
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
|
||||
BTF_VAR_STATIC,
|
||||
/* attr */
|
||||
BTF_TYPE_TAG_ENC(1, 2, -1),
|
||||
};
|
||||
|
||||
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
|
||||
strs, sizeof(strs)));
|
||||
}
|
||||
|
||||
static int probe_kern_array_mmap(void)
|
||||
{
|
||||
struct bpf_create_map_attr attr = {
|
||||
@ -4423,6 +4450,9 @@ static struct kern_feature_desc {
|
||||
[FEAT_PERF_LINK] = {
|
||||
"BPF perf link support", probe_perf_link,
|
||||
},
|
||||
[FEAT_BTF_TAG] = {
|
||||
"BTF_KIND_TAG support", probe_kern_btf_tag,
|
||||
},
|
||||
};
|
||||
|
||||
static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
|
||||
@ -6367,12 +6397,40 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
|
||||
|
||||
static const struct bpf_sec_def *find_sec_def(const char *sec_name);
|
||||
|
||||
static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
struct bpf_program *prog;
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
prog->sec_def = find_sec_def(prog->sec_name);
|
||||
if (!prog->sec_def) {
|
||||
/* couldn't guess, but user might manually specify */
|
||||
pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
|
||||
prog->name, prog->sec_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prog->sec_def->is_sleepable)
|
||||
prog->prog_flags |= BPF_F_SLEEPABLE;
|
||||
bpf_program__set_type(prog, prog->sec_def->prog_type);
|
||||
bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type);
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
|
||||
prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
|
||||
prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
|
||||
#pragma GCC diagnostic pop
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_object *
|
||||
__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
|
||||
const struct bpf_object_open_opts *opts)
|
||||
{
|
||||
const char *obj_name, *kconfig, *btf_tmp_path;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
char tmp_name[64];
|
||||
int err;
|
||||
@ -6430,31 +6488,13 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
|
||||
err = err ? : bpf_object__collect_externs(obj);
|
||||
err = err ? : bpf_object__finalize_btf(obj);
|
||||
err = err ? : bpf_object__init_maps(obj, opts);
|
||||
err = err ? : bpf_object_init_progs(obj, opts);
|
||||
err = err ? : bpf_object__collect_relos(obj);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
bpf_object__elf_finish(obj);
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
prog->sec_def = find_sec_def(prog->sec_name);
|
||||
if (!prog->sec_def) {
|
||||
/* couldn't guess, but user might manually specify */
|
||||
pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
|
||||
prog->name, prog->sec_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (prog->sec_def->is_sleepable)
|
||||
prog->prog_flags |= BPF_F_SLEEPABLE;
|
||||
bpf_program__set_type(prog, prog->sec_def->prog_type);
|
||||
bpf_program__set_expected_attach_type(prog,
|
||||
prog->sec_def->expected_attach_type);
|
||||
|
||||
if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
|
||||
prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
|
||||
prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
|
||||
}
|
||||
|
||||
return obj;
|
||||
out:
|
||||
bpf_object__close(obj);
|
||||
@ -7907,18 +7947,12 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
|
||||
__VA_ARGS__ \
|
||||
}
|
||||
|
||||
static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
static struct bpf_link *attach_kprobe(const struct bpf_program *prog);
|
||||
static struct bpf_link *attach_tp(const struct bpf_program *prog);
|
||||
static struct bpf_link *attach_raw_tp(const struct bpf_program *prog);
|
||||
static struct bpf_link *attach_trace(const struct bpf_program *prog);
|
||||
static struct bpf_link *attach_lsm(const struct bpf_program *prog);
|
||||
static struct bpf_link *attach_iter(const struct bpf_program *prog);
|
||||
|
||||
static const struct bpf_sec_def section_defs[] = {
|
||||
BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
|
||||
@ -8244,35 +8278,37 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (prog->type == BPF_PROG_TYPE_UNSPEC) {
|
||||
const struct bpf_sec_def *sec_def;
|
||||
/* prevent the use of BPF prog with invalid type */
|
||||
if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
|
||||
pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
|
||||
map->name, prog->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sec_def = find_sec_def(prog->sec_name);
|
||||
if (sec_def &&
|
||||
sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
|
||||
/* for pr_warn */
|
||||
prog->type = sec_def->prog_type;
|
||||
goto invalid_prog;
|
||||
}
|
||||
|
||||
prog->type = BPF_PROG_TYPE_STRUCT_OPS;
|
||||
/* if we haven't yet processed this BPF program, record proper
|
||||
* attach_btf_id and member_idx
|
||||
*/
|
||||
if (!prog->attach_btf_id) {
|
||||
prog->attach_btf_id = st_ops->type_id;
|
||||
prog->expected_attach_type = member_idx;
|
||||
} else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
|
||||
prog->attach_btf_id != st_ops->type_id ||
|
||||
prog->expected_attach_type != member_idx) {
|
||||
goto invalid_prog;
|
||||
}
|
||||
|
||||
/* struct_ops BPF prog can be re-used between multiple
|
||||
* .struct_ops as long as it's the same struct_ops struct
|
||||
* definition and the same function pointer field
|
||||
*/
|
||||
if (prog->attach_btf_id != st_ops->type_id ||
|
||||
prog->expected_attach_type != member_idx) {
|
||||
pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
|
||||
map->name, prog->name, prog->sec_name, prog->type,
|
||||
prog->attach_btf_id, prog->expected_attach_type, name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
st_ops->progs[member_idx] = prog;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
invalid_prog:
|
||||
pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
|
||||
map->name, prog->name, prog->sec_name, prog->type,
|
||||
prog->attach_btf_id, prog->expected_attach_type, name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define BTF_TRACE_PREFIX "btf_trace_"
|
||||
@ -8428,28 +8464,15 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd,
|
||||
{
|
||||
enum bpf_attach_type attach_type = prog->expected_attach_type;
|
||||
__u32 attach_prog_fd = prog->attach_prog_fd;
|
||||
const char *name = prog->sec_name, *attach_name;
|
||||
const struct bpf_sec_def *sec = NULL;
|
||||
int i, err = 0;
|
||||
const char *attach_name;
|
||||
int err = 0;
|
||||
|
||||
if (!name)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
|
||||
if (!section_defs[i].is_attach_btf)
|
||||
continue;
|
||||
if (strncmp(name, section_defs[i].sec, section_defs[i].len))
|
||||
continue;
|
||||
|
||||
sec = §ion_defs[i];
|
||||
break;
|
||||
}
|
||||
|
||||
if (!sec) {
|
||||
pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
|
||||
if (!prog->sec_def || !prog->sec_def->is_attach_btf) {
|
||||
pr_warn("failed to identify BTF ID based on ELF section name '%s'\n",
|
||||
prog->sec_name);
|
||||
return -ESRCH;
|
||||
}
|
||||
attach_name = name + sec->len;
|
||||
attach_name = prog->sec_name + prog->sec_def->len;
|
||||
|
||||
/* BPF program's BTF ID */
|
||||
if (attach_prog_fd) {
|
||||
@ -8483,27 +8506,28 @@ int libbpf_attach_type_by_name(const char *name,
|
||||
enum bpf_attach_type *attach_type)
|
||||
{
|
||||
char *type_names;
|
||||
int i;
|
||||
const struct bpf_sec_def *sec_def;
|
||||
|
||||
if (!name)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
|
||||
if (strncmp(name, section_defs[i].sec, section_defs[i].len))
|
||||
continue;
|
||||
if (!section_defs[i].is_attachable)
|
||||
return libbpf_err(-EINVAL);
|
||||
*attach_type = section_defs[i].expected_attach_type;
|
||||
return 0;
|
||||
}
|
||||
pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
|
||||
type_names = libbpf_get_type_names(true);
|
||||
if (type_names != NULL) {
|
||||
pr_debug("attachable section(type) names are:%s\n", type_names);
|
||||
free(type_names);
|
||||
sec_def = find_sec_def(name);
|
||||
if (!sec_def) {
|
||||
pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
|
||||
type_names = libbpf_get_type_names(true);
|
||||
if (type_names != NULL) {
|
||||
pr_debug("attachable section(type) names are:%s\n", type_names);
|
||||
free(type_names);
|
||||
}
|
||||
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
return libbpf_err(-EINVAL);
|
||||
if (!sec_def->is_attachable)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
*attach_type = sec_def->expected_attach_type;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_map__fd(const struct bpf_map *map)
|
||||
@ -8987,9 +9011,57 @@ int bpf_link__unpin(struct bpf_link *link)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int poke_kprobe_events(bool add, const char *name, bool retprobe, uint64_t offset)
|
||||
{
|
||||
int fd, ret = 0;
|
||||
pid_t p = getpid();
|
||||
char cmd[260], probename[128], probefunc[128];
|
||||
const char *file = "/sys/kernel/debug/tracing/kprobe_events";
|
||||
|
||||
if (retprobe)
|
||||
snprintf(probename, sizeof(probename), "kretprobes/%s_libbpf_%u", name, p);
|
||||
else
|
||||
snprintf(probename, sizeof(probename), "kprobes/%s_libbpf_%u", name, p);
|
||||
|
||||
if (offset)
|
||||
snprintf(probefunc, sizeof(probefunc), "%s+%zu", name, (size_t)offset);
|
||||
|
||||
if (add) {
|
||||
snprintf(cmd, sizeof(cmd), "%c:%s %s",
|
||||
retprobe ? 'r' : 'p',
|
||||
probename,
|
||||
offset ? probefunc : name);
|
||||
} else {
|
||||
snprintf(cmd, sizeof(cmd), "-:%s", probename);
|
||||
}
|
||||
|
||||
fd = open(file, O_WRONLY | O_APPEND, 0);
|
||||
if (!fd)
|
||||
return -errno;
|
||||
ret = write(fd, cmd, strlen(cmd));
|
||||
if (ret < 0)
|
||||
ret = -errno;
|
||||
close(fd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int add_kprobe_event_legacy(const char *name, bool retprobe, uint64_t offset)
|
||||
{
|
||||
return poke_kprobe_events(true, name, retprobe, offset);
|
||||
}
|
||||
|
||||
static inline int remove_kprobe_event_legacy(const char *name, bool retprobe)
|
||||
{
|
||||
return poke_kprobe_events(false, name, retprobe, 0);
|
||||
}
|
||||
|
||||
struct bpf_link_perf {
|
||||
struct bpf_link link;
|
||||
int perf_event_fd;
|
||||
/* legacy kprobe support: keep track of probe identifier and type */
|
||||
char *legacy_probe_name;
|
||||
bool legacy_is_retprobe;
|
||||
};
|
||||
|
||||
static int bpf_link_perf_detach(struct bpf_link *link)
|
||||
@ -9004,17 +9076,23 @@ static int bpf_link_perf_detach(struct bpf_link *link)
|
||||
close(perf_link->perf_event_fd);
|
||||
close(link->fd);
|
||||
|
||||
return libbpf_err(err);
|
||||
/* legacy kprobe needs to be removed after perf event fd closure */
|
||||
if (perf_link->legacy_probe_name)
|
||||
err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
|
||||
perf_link->legacy_is_retprobe);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void bpf_link_perf_dealloc(struct bpf_link *link)
|
||||
{
|
||||
struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
|
||||
|
||||
free(perf_link->legacy_probe_name);
|
||||
free(perf_link);
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_perf_event_opts(struct bpf_program *prog, int pfd,
|
||||
struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
|
||||
const struct bpf_perf_event_opts *opts)
|
||||
{
|
||||
char errmsg[STRERR_BUFSIZE];
|
||||
@ -9089,7 +9167,7 @@ err_out:
|
||||
return libbpf_err_ptr(err);
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd)
|
||||
struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
|
||||
{
|
||||
return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
|
||||
}
|
||||
@ -9124,6 +9202,18 @@ static int parse_uint_from_file(const char *file, const char *fmt)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int determine_kprobe_perf_type_legacy(const char *func_name, bool is_retprobe)
|
||||
{
|
||||
char file[192];
|
||||
|
||||
snprintf(file, sizeof(file),
|
||||
"/sys/kernel/debug/tracing/events/%s/%s_libbpf_%d/id",
|
||||
is_retprobe ? "kretprobes" : "kprobes",
|
||||
func_name, getpid());
|
||||
|
||||
return parse_uint_from_file(file, "%d\n");
|
||||
}
|
||||
|
||||
static int determine_kprobe_perf_type(void)
|
||||
{
|
||||
const char *file = "/sys/bus/event_source/devices/kprobe/type";
|
||||
@ -9206,16 +9296,52 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
|
||||
return pfd;
|
||||
}
|
||||
|
||||
static int perf_event_kprobe_open_legacy(bool retprobe, const char *name, uint64_t offset, int pid)
|
||||
{
|
||||
struct perf_event_attr attr = {};
|
||||
char errmsg[STRERR_BUFSIZE];
|
||||
int type, pfd, err;
|
||||
|
||||
err = add_kprobe_event_legacy(name, retprobe, offset);
|
||||
if (err < 0) {
|
||||
pr_warn("failed to add legacy kprobe event: %s\n",
|
||||
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
|
||||
return err;
|
||||
}
|
||||
type = determine_kprobe_perf_type_legacy(name, retprobe);
|
||||
if (type < 0) {
|
||||
pr_warn("failed to determine legacy kprobe event id: %s\n",
|
||||
libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
|
||||
return type;
|
||||
}
|
||||
attr.size = sizeof(attr);
|
||||
attr.config = type;
|
||||
attr.type = PERF_TYPE_TRACEPOINT;
|
||||
|
||||
pfd = syscall(__NR_perf_event_open, &attr,
|
||||
pid < 0 ? -1 : pid, /* pid */
|
||||
pid == -1 ? 0 : -1, /* cpu */
|
||||
-1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
|
||||
if (pfd < 0) {
|
||||
err = -errno;
|
||||
pr_warn("legacy kprobe perf_event_open() failed: %s\n",
|
||||
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
|
||||
return err;
|
||||
}
|
||||
return pfd;
|
||||
}
|
||||
|
||||
struct bpf_link *
|
||||
bpf_program__attach_kprobe_opts(struct bpf_program *prog,
|
||||
bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
|
||||
const char *func_name,
|
||||
const struct bpf_kprobe_opts *opts)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
|
||||
char errmsg[STRERR_BUFSIZE];
|
||||
char *legacy_probe = NULL;
|
||||
struct bpf_link *link;
|
||||
unsigned long offset;
|
||||
bool retprobe;
|
||||
bool retprobe, legacy;
|
||||
int pfd, err;
|
||||
|
||||
if (!OPTS_VALID(opts, bpf_kprobe_opts))
|
||||
@ -9225,8 +9351,19 @@ bpf_program__attach_kprobe_opts(struct bpf_program *prog,
|
||||
offset = OPTS_GET(opts, offset, 0);
|
||||
pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
|
||||
|
||||
pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
|
||||
offset, -1 /* pid */, 0 /* ref_ctr_off */);
|
||||
legacy = determine_kprobe_perf_type() < 0;
|
||||
if (!legacy) {
|
||||
pfd = perf_event_open_probe(false /* uprobe */, retprobe,
|
||||
func_name, offset,
|
||||
-1 /* pid */, 0 /* ref_ctr_off */);
|
||||
} else {
|
||||
legacy_probe = strdup(func_name);
|
||||
if (!legacy_probe)
|
||||
return libbpf_err_ptr(-ENOMEM);
|
||||
|
||||
pfd = perf_event_kprobe_open_legacy(retprobe, func_name,
|
||||
offset, -1 /* pid */);
|
||||
}
|
||||
if (pfd < 0) {
|
||||
pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
|
||||
prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
|
||||
@ -9242,10 +9379,17 @@ bpf_program__attach_kprobe_opts(struct bpf_program *prog,
|
||||
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
|
||||
return libbpf_err_ptr(err);
|
||||
}
|
||||
if (legacy) {
|
||||
struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
|
||||
|
||||
perf_link->legacy_probe_name = legacy_probe;
|
||||
perf_link->legacy_is_retprobe = retprobe;
|
||||
}
|
||||
|
||||
return link;
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
|
||||
struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
|
||||
bool retprobe,
|
||||
const char *func_name)
|
||||
{
|
||||
@ -9256,8 +9400,7 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
|
||||
return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
|
||||
}
|
||||
|
||||
static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog)
|
||||
static struct bpf_link *attach_kprobe(const struct bpf_program *prog)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
|
||||
unsigned long offset = 0;
|
||||
@ -9266,8 +9409,8 @@ static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
|
||||
char *func;
|
||||
int n, err;
|
||||
|
||||
func_name = prog->sec_name + sec->len;
|
||||
opts.retprobe = strcmp(sec->sec, "kretprobe/") == 0;
|
||||
func_name = prog->sec_name + prog->sec_def->len;
|
||||
opts.retprobe = strcmp(prog->sec_def->sec, "kretprobe/") == 0;
|
||||
|
||||
n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
|
||||
if (n < 1) {
|
||||
@ -9289,7 +9432,7 @@ static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
|
||||
}
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid,
|
||||
bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
|
||||
const char *binary_path, size_t func_offset,
|
||||
const struct bpf_uprobe_opts *opts)
|
||||
{
|
||||
@ -9329,7 +9472,7 @@ bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid,
|
||||
return link;
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
|
||||
struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
|
||||
bool retprobe, pid_t pid,
|
||||
const char *binary_path,
|
||||
size_t func_offset)
|
||||
@ -9389,7 +9532,7 @@ static int perf_event_open_tracepoint(const char *tp_category,
|
||||
return pfd;
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_tracepoint_opts(struct bpf_program *prog,
|
||||
struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
|
||||
const char *tp_category,
|
||||
const char *tp_name,
|
||||
const struct bpf_tracepoint_opts *opts)
|
||||
@ -9423,15 +9566,14 @@ struct bpf_link *bpf_program__attach_tracepoint_opts(struct bpf_program *prog,
|
||||
return link;
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
|
||||
struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
|
||||
const char *tp_category,
|
||||
const char *tp_name)
|
||||
{
|
||||
return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
|
||||
}
|
||||
|
||||
static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog)
|
||||
static struct bpf_link *attach_tp(const struct bpf_program *prog)
|
||||
{
|
||||
char *sec_name, *tp_cat, *tp_name;
|
||||
struct bpf_link *link;
|
||||
@ -9441,7 +9583,7 @@ static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
|
||||
return libbpf_err_ptr(-ENOMEM);
|
||||
|
||||
/* extract "tp/<category>/<name>" */
|
||||
tp_cat = sec_name + sec->len;
|
||||
tp_cat = sec_name + prog->sec_def->len;
|
||||
tp_name = strchr(tp_cat, '/');
|
||||
if (!tp_name) {
|
||||
free(sec_name);
|
||||
@ -9455,7 +9597,7 @@ static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
|
||||
return link;
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
|
||||
struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
|
||||
const char *tp_name)
|
||||
{
|
||||
char errmsg[STRERR_BUFSIZE];
|
||||
@ -9485,16 +9627,15 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
|
||||
return link;
|
||||
}
|
||||
|
||||
static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog)
|
||||
static struct bpf_link *attach_raw_tp(const struct bpf_program *prog)
|
||||
{
|
||||
const char *tp_name = prog->sec_name + sec->len;
|
||||
const char *tp_name = prog->sec_name + prog->sec_def->len;
|
||||
|
||||
return bpf_program__attach_raw_tracepoint(prog, tp_name);
|
||||
}
|
||||
|
||||
/* Common logic for all BPF program types that attach to a btf_id */
|
||||
static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
|
||||
static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog)
|
||||
{
|
||||
char errmsg[STRERR_BUFSIZE];
|
||||
struct bpf_link *link;
|
||||
@ -9523,30 +9664,28 @@ static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
|
||||
return (struct bpf_link *)link;
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
|
||||
struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
|
||||
{
|
||||
return bpf_program__attach_btf_id(prog);
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
|
||||
struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
|
||||
{
|
||||
return bpf_program__attach_btf_id(prog);
|
||||
}
|
||||
|
||||
static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog)
|
||||
static struct bpf_link *attach_trace(const struct bpf_program *prog)
|
||||
{
|
||||
return bpf_program__attach_trace(prog);
|
||||
}
|
||||
|
||||
static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog)
|
||||
static struct bpf_link *attach_lsm(const struct bpf_program *prog)
|
||||
{
|
||||
return bpf_program__attach_lsm(prog);
|
||||
}
|
||||
|
||||
static struct bpf_link *
|
||||
bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
|
||||
bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id,
|
||||
const char *target_name)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
|
||||
@ -9582,24 +9721,24 @@ bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
|
||||
}
|
||||
|
||||
struct bpf_link *
|
||||
bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
|
||||
bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
|
||||
{
|
||||
return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
|
||||
}
|
||||
|
||||
struct bpf_link *
|
||||
bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
|
||||
bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
|
||||
{
|
||||
return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
|
||||
struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
|
||||
{
|
||||
/* target_fd/target_ifindex use the same field in LINK_CREATE */
|
||||
return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
|
||||
struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
|
||||
int target_fd,
|
||||
const char *attach_func_name)
|
||||
{
|
||||
@ -9632,7 +9771,7 @@ struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
|
||||
}
|
||||
|
||||
struct bpf_link *
|
||||
bpf_program__attach_iter(struct bpf_program *prog,
|
||||
bpf_program__attach_iter(const struct bpf_program *prog,
|
||||
const struct bpf_iter_attach_opts *opts)
|
||||
{
|
||||
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
|
||||
@ -9671,21 +9810,17 @@ bpf_program__attach_iter(struct bpf_program *prog,
|
||||
return link;
|
||||
}
|
||||
|
||||
static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog)
|
||||
static struct bpf_link *attach_iter(const struct bpf_program *prog)
|
||||
{
|
||||
return bpf_program__attach_iter(prog, NULL);
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_program__attach(struct bpf_program *prog)
|
||||
struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
|
||||
{
|
||||
const struct bpf_sec_def *sec_def;
|
||||
|
||||
sec_def = find_sec_def(prog->sec_name);
|
||||
if (!sec_def || !sec_def->attach_fn)
|
||||
if (!prog->sec_def || !prog->sec_def->attach_fn)
|
||||
return libbpf_err_ptr(-ESRCH);
|
||||
|
||||
return sec_def->attach_fn(sec_def, prog);
|
||||
return prog->sec_def->attach_fn(prog);
|
||||
}
|
||||
|
||||
static int bpf_link__detach_struct_ops(struct bpf_link *link)
|
||||
@ -9698,7 +9833,7 @@ static int bpf_link__detach_struct_ops(struct bpf_link *link)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
|
||||
struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
|
||||
{
|
||||
struct bpf_struct_ops *st_ops;
|
||||
struct bpf_link *link;
|
||||
@ -10511,18 +10646,29 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
|
||||
{
|
||||
int btf_obj_fd = 0, btf_id = 0, err;
|
||||
|
||||
if (!prog || attach_prog_fd < 0 || !attach_func_name)
|
||||
if (!prog || attach_prog_fd < 0)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (prog->obj->loaded)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (attach_prog_fd && !attach_func_name) {
|
||||
/* remember attach_prog_fd and let bpf_program__load() find
|
||||
* BTF ID during the program load
|
||||
*/
|
||||
prog->attach_prog_fd = attach_prog_fd;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (attach_prog_fd) {
|
||||
btf_id = libbpf_find_prog_btf_id(attach_func_name,
|
||||
attach_prog_fd);
|
||||
if (btf_id < 0)
|
||||
return libbpf_err(btf_id);
|
||||
} else {
|
||||
if (!attach_func_name)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
/* load btf_vmlinux, if not yet */
|
||||
err = bpf_object__load_vmlinux_btf(prog->obj, true);
|
||||
if (err)
|
||||
@ -10764,16 +10910,15 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
|
||||
for (i = 0; i < s->prog_cnt; i++) {
|
||||
struct bpf_program *prog = *s->progs[i].prog;
|
||||
struct bpf_link **link = s->progs[i].link;
|
||||
const struct bpf_sec_def *sec_def;
|
||||
|
||||
if (!prog->load)
|
||||
continue;
|
||||
|
||||
sec_def = find_sec_def(prog->sec_name);
|
||||
if (!sec_def || !sec_def->attach_fn)
|
||||
/* auto-attaching not supported for this program */
|
||||
if (!prog->sec_def || !prog->sec_def->attach_fn)
|
||||
continue;
|
||||
|
||||
*link = sec_def->attach_fn(sec_def, prog);
|
||||
*link = bpf_program__attach(prog);
|
||||
err = libbpf_get_error(*link);
|
||||
if (err) {
|
||||
pr_warn("failed to auto-attach program '%s': %d\n",
|
||||
|
@ -83,12 +83,15 @@ struct bpf_object_open_opts {
|
||||
* Non-relocatable instructions are replaced with invalid ones to
|
||||
* prevent accidental errors.
|
||||
* */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "field has no effect")
|
||||
bool relaxed_core_relocs;
|
||||
/* maps that set the 'pinning' attribute in their definition will have
|
||||
* their pin_path attribute set to a file in this directory, and be
|
||||
* auto-pinned to that path on load; defaults to "/sys/fs/bpf".
|
||||
*/
|
||||
const char *pin_root_path;
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__set_attach_target() on each individual bpf_program")
|
||||
__u32 attach_prog_fd;
|
||||
/* Additional kernel config content that augments and overrides
|
||||
* system Kconfig for CONFIG_xxx externs.
|
||||
@ -243,7 +246,7 @@ LIBBPF_API int bpf_link__detach(struct bpf_link *link);
|
||||
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach(struct bpf_program *prog);
|
||||
bpf_program__attach(const struct bpf_program *prog);
|
||||
|
||||
struct bpf_perf_event_opts {
|
||||
/* size of this struct, for forward/backward compatiblity */
|
||||
@ -254,10 +257,10 @@ struct bpf_perf_event_opts {
|
||||
#define bpf_perf_event_opts__last_field bpf_cookie
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_perf_event(struct bpf_program *prog, int pfd);
|
||||
bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_perf_event_opts(struct bpf_program *prog, int pfd,
|
||||
bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
|
||||
const struct bpf_perf_event_opts *opts);
|
||||
|
||||
struct bpf_kprobe_opts {
|
||||
@ -274,10 +277,10 @@ struct bpf_kprobe_opts {
|
||||
#define bpf_kprobe_opts__last_field retprobe
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe,
|
||||
bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
|
||||
const char *func_name);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_kprobe_opts(struct bpf_program *prog,
|
||||
bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
|
||||
const char *func_name,
|
||||
const struct bpf_kprobe_opts *opts);
|
||||
|
||||
@ -297,11 +300,11 @@ struct bpf_uprobe_opts {
|
||||
#define bpf_uprobe_opts__last_field retprobe
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe,
|
||||
bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe,
|
||||
pid_t pid, const char *binary_path,
|
||||
size_t func_offset);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid,
|
||||
bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
|
||||
const char *binary_path, size_t func_offset,
|
||||
const struct bpf_uprobe_opts *opts);
|
||||
|
||||
@ -314,35 +317,35 @@ struct bpf_tracepoint_opts {
|
||||
#define bpf_tracepoint_opts__last_field bpf_cookie
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_tracepoint(struct bpf_program *prog,
|
||||
bpf_program__attach_tracepoint(const struct bpf_program *prog,
|
||||
const char *tp_category,
|
||||
const char *tp_name);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_tracepoint_opts(struct bpf_program *prog,
|
||||
bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
|
||||
const char *tp_category,
|
||||
const char *tp_name,
|
||||
const struct bpf_tracepoint_opts *opts);
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
|
||||
bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
|
||||
const char *tp_name);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_trace(struct bpf_program *prog);
|
||||
bpf_program__attach_trace(const struct bpf_program *prog);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_lsm(struct bpf_program *prog);
|
||||
bpf_program__attach_lsm(const struct bpf_program *prog);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd);
|
||||
bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_netns(struct bpf_program *prog, int netns_fd);
|
||||
bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_xdp(struct bpf_program *prog, int ifindex);
|
||||
bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex);
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_freplace(struct bpf_program *prog,
|
||||
bpf_program__attach_freplace(const struct bpf_program *prog,
|
||||
int target_fd, const char *attach_func_name);
|
||||
|
||||
struct bpf_map;
|
||||
|
||||
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
|
||||
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
|
||||
|
||||
struct bpf_iter_attach_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
@ -352,7 +355,7 @@ struct bpf_iter_attach_opts {
|
||||
#define bpf_iter_attach_opts__last_field link_info_len
|
||||
|
||||
LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_iter(struct bpf_program *prog,
|
||||
bpf_program__attach_iter(const struct bpf_program *prog,
|
||||
const struct bpf_iter_attach_opts *opts);
|
||||
|
||||
struct bpf_insn;
|
||||
@ -854,7 +857,7 @@ struct bpf_object_skeleton {
|
||||
size_t sz; /* size of this struct, for forward/backward compatibility */
|
||||
|
||||
const char *name;
|
||||
void *data;
|
||||
const void *data;
|
||||
size_t data_sz;
|
||||
|
||||
struct bpf_object **obj;
|
||||
|
@ -386,3 +386,8 @@ LIBBPF_0.5.0 {
|
||||
btf_dump__dump_type_data;
|
||||
libbpf_set_strict_mode;
|
||||
} LIBBPF_0.4.0;
|
||||
|
||||
LIBBPF_0.6.0 {
|
||||
global:
|
||||
btf__add_tag;
|
||||
} LIBBPF_0.5.0;
|
||||
|
@ -10,6 +10,7 @@
|
||||
#define __LIBBPF_LIBBPF_COMMON_H
|
||||
|
||||
#include <string.h>
|
||||
#include "libbpf_version.h"
|
||||
|
||||
#ifndef LIBBPF_API
|
||||
#define LIBBPF_API __attribute__((visibility("default")))
|
||||
@ -17,6 +18,29 @@
|
||||
|
||||
#define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg)))
|
||||
|
||||
/* Mark a symbol as deprecated when libbpf version is >= {major}.{minor} */
|
||||
#define LIBBPF_DEPRECATED_SINCE(major, minor, msg) \
|
||||
__LIBBPF_MARK_DEPRECATED_ ## major ## _ ## minor \
|
||||
(LIBBPF_DEPRECATED("libbpf v" # major "." # minor "+: " msg))
|
||||
|
||||
#define __LIBBPF_CURRENT_VERSION_GEQ(major, minor) \
|
||||
(LIBBPF_MAJOR_VERSION > (major) || \
|
||||
(LIBBPF_MAJOR_VERSION == (major) && LIBBPF_MINOR_VERSION >= (minor)))
|
||||
|
||||
/* Add checks for other versions below when planning deprecation of API symbols
|
||||
* with the LIBBPF_DEPRECATED_SINCE macro.
|
||||
*/
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 6)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_6(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_6(X)
|
||||
#endif
|
||||
#if __LIBBPF_CURRENT_VERSION_GEQ(0, 7)
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_7(X) X
|
||||
#else
|
||||
#define __LIBBPF_MARK_DEPRECATED_0_7(X)
|
||||
#endif
|
||||
|
||||
/* Helper macro to declare and initialize libbpf options struct
|
||||
*
|
||||
* This dance with uninitialized declaration, followed by memset to zero,
|
||||
|
@ -69,6 +69,8 @@
|
||||
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
|
||||
#define BTF_TYPE_FLOAT_ENC(name, sz) \
|
||||
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
|
||||
#define BTF_TYPE_TAG_ENC(value, type, component_idx) \
|
||||
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TAG, 0, 0), type), (component_idx)
|
||||
|
||||
#ifndef likely
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
@ -90,17 +92,30 @@
|
||||
/* Symbol versioning is different between static and shared library.
|
||||
* Properly versioned symbols are needed for shared library, but
|
||||
* only the symbol of the new version is needed for static library.
|
||||
* Starting with GNU C 10, use symver attribute instead of .symver assembler
|
||||
* directive, which works better with GCC LTO builds.
|
||||
*/
|
||||
#ifdef SHARED
|
||||
# define COMPAT_VERSION(internal_name, api_name, version) \
|
||||
#if defined(SHARED) && defined(__GNUC__) && __GNUC__ >= 10
|
||||
|
||||
#define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
__attribute__((symver(#api_name "@@" #version)))
|
||||
#define COMPAT_VERSION(internal_name, api_name, version) \
|
||||
__attribute__((symver(#api_name "@" #version)))
|
||||
|
||||
#elif defined(SHARED)
|
||||
|
||||
#define COMPAT_VERSION(internal_name, api_name, version) \
|
||||
asm(".symver " #internal_name "," #api_name "@" #version);
|
||||
# define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
#define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
asm(".symver " #internal_name "," #api_name "@@" #version);
|
||||
#else
|
||||
# define COMPAT_VERSION(internal_name, api_name, version)
|
||||
# define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
|
||||
#else /* !SHARED */
|
||||
|
||||
#define COMPAT_VERSION(internal_name, api_name, version)
|
||||
#define DEFAULT_VERSION(internal_name, api_name, version) \
|
||||
extern typeof(internal_name) api_name \
|
||||
__attribute__((alias(#internal_name)));
|
||||
|
||||
#endif
|
||||
|
||||
extern void libbpf_print(enum libbpf_print_level level,
|
||||
|
9
tools/lib/bpf/libbpf_version.h
Normal file
9
tools/lib/bpf/libbpf_version.h
Normal file
@ -0,0 +1,9 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
/* Copyright (C) 2021 Facebook */
|
||||
#ifndef __LIBBPF_VERSION_H
|
||||
#define __LIBBPF_VERSION_H
|
||||
|
||||
#define LIBBPF_MAJOR_VERSION 0
|
||||
#define LIBBPF_MINOR_VERSION 6
|
||||
|
||||
#endif /* __LIBBPF_VERSION_H */
|
@ -281,6 +281,7 @@ out_mmap:
|
||||
return err;
|
||||
}
|
||||
|
||||
DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
|
||||
int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
|
||||
__u64 size, struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
@ -345,6 +346,7 @@ struct xsk_umem_config_v1 {
|
||||
__u32 frame_headroom;
|
||||
};
|
||||
|
||||
COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
|
||||
int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
|
||||
__u64 size, struct xsk_ring_prod *fill,
|
||||
struct xsk_ring_cons *comp,
|
||||
@ -358,8 +360,6 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
|
||||
return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
|
||||
&config);
|
||||
}
|
||||
COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
|
||||
DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
|
||||
|
||||
static enum xsk_prog get_xsk_prog(void)
|
||||
{
|
||||
|
5
tools/testing/selftests/bpf/.gitignore
vendored
5
tools/testing/selftests/bpf/.gitignore
vendored
@ -9,8 +9,9 @@ test_tag
|
||||
FEATURE-DUMP.libbpf
|
||||
fixdep
|
||||
test_dev_cgroup
|
||||
/test_progs*
|
||||
!test_progs.h
|
||||
/test_progs
|
||||
/test_progs-no_alu32
|
||||
/test_progs-bpf_gcc
|
||||
test_verifier_log
|
||||
feature
|
||||
test_sock
|
||||
|
@ -512,14 +512,14 @@ $(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
|
||||
$(Q)$(CXX) $(CFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
|
||||
|
||||
# Benchmark runner
|
||||
$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h
|
||||
$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ)
|
||||
$(call msg,CC,,$@)
|
||||
$(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
|
||||
$(OUTPUT)/bench_rename.o: $(OUTPUT)/test_overhead.skel.h
|
||||
$(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h
|
||||
$(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \
|
||||
$(OUTPUT)/perfbuf_bench.skel.h
|
||||
$(OUTPUT)/bench.o: bench.h testing_helpers.h
|
||||
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
|
||||
$(OUTPUT)/bench: LDLIBS += -lm
|
||||
$(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
|
||||
$(OUTPUT)/bench_count.o \
|
||||
|
@ -201,6 +201,20 @@ Without it, the error from compiling bpf selftests looks like:
|
||||
|
||||
__ https://reviews.llvm.org/D93563
|
||||
|
||||
btf_tag test and Clang version
|
||||
==============================
|
||||
|
||||
The btf_tag selftest require LLVM support to recognize the btf_tag attribute.
|
||||
It was introduced in `Clang 14`__.
|
||||
|
||||
Without it, the btf_tag selftest will be skipped and you will observe:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
#<test_num> btf_tag:SKIP
|
||||
|
||||
__ https://reviews.llvm.org/D106614
|
||||
|
||||
Clang dependencies for static linking tests
|
||||
===========================================
|
||||
|
||||
|
@ -13,6 +13,18 @@
|
||||
|
||||
DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
|
||||
|
||||
noinline int bpf_testmod_loop_test(int n)
|
||||
{
|
||||
int i, sum = 0;
|
||||
|
||||
/* the primary goal of this test is to test LBR. Create a lot of
|
||||
* branches in the function, so we can catch it easily.
|
||||
*/
|
||||
for (i = 0; i < n; i++)
|
||||
sum += i;
|
||||
return sum;
|
||||
}
|
||||
|
||||
noinline ssize_t
|
||||
bpf_testmod_test_read(struct file *file, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
@ -24,7 +36,11 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
|
||||
.len = len,
|
||||
};
|
||||
|
||||
trace_bpf_testmod_test_read(current, &ctx);
|
||||
/* This is always true. Use the check to make sure the compiler
|
||||
* doesn't remove bpf_testmod_loop_test.
|
||||
*/
|
||||
if (bpf_testmod_loop_test(101) > 100)
|
||||
trace_bpf_testmod_test_read(current, &ctx);
|
||||
|
||||
return -EIO; /* always fail */
|
||||
}
|
||||
@ -71,4 +87,3 @@ module_exit(bpf_testmod_exit);
|
||||
MODULE_AUTHOR("Andrii Nakryiko");
|
||||
MODULE_DESCRIPTION("BPF selftests module");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
|
@ -24,11 +24,12 @@ static const char * const btf_kind_str_mapping[] = {
|
||||
[BTF_KIND_VAR] = "VAR",
|
||||
[BTF_KIND_DATASEC] = "DATASEC",
|
||||
[BTF_KIND_FLOAT] = "FLOAT",
|
||||
[BTF_KIND_TAG] = "TAG",
|
||||
};
|
||||
|
||||
static const char *btf_kind_str(__u16 kind)
|
||||
{
|
||||
if (kind > BTF_KIND_DATASEC)
|
||||
if (kind > BTF_KIND_TAG)
|
||||
return "UNKNOWN";
|
||||
return btf_kind_str_mapping[kind];
|
||||
}
|
||||
@ -177,6 +178,10 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
|
||||
case BTF_KIND_FLOAT:
|
||||
fprintf(out, " size=%u", t->size);
|
||||
break;
|
||||
case BTF_KIND_TAG:
|
||||
fprintf(out, " type_id=%u component_idx=%d",
|
||||
t->type, btf_tag(t)->component_idx);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -589,7 +589,7 @@ out:
|
||||
|
||||
static void test_bpf_hash_map(void)
|
||||
{
|
||||
__u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
|
||||
__u32 expected_key_a = 0, expected_key_b = 0;
|
||||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
struct bpf_iter_bpf_hash_map *skel;
|
||||
int err, i, len, map_fd, iter_fd;
|
||||
@ -638,7 +638,6 @@ static void test_bpf_hash_map(void)
|
||||
val = i + 4;
|
||||
expected_key_a += key.a;
|
||||
expected_key_b += key.b;
|
||||
expected_key_c += key.c;
|
||||
expected_val += val;
|
||||
|
||||
err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
|
||||
@ -685,7 +684,7 @@ out:
|
||||
|
||||
static void test_bpf_percpu_hash_map(void)
|
||||
{
|
||||
__u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
|
||||
__u32 expected_key_a = 0, expected_key_b = 0;
|
||||
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
struct bpf_iter_bpf_percpu_hash_map *skel;
|
||||
int err, i, j, len, map_fd, iter_fd;
|
||||
@ -722,7 +721,6 @@ static void test_bpf_percpu_hash_map(void)
|
||||
key.c = i + 3;
|
||||
expected_key_a += key.a;
|
||||
expected_key_b += key.b;
|
||||
expected_key_c += key.c;
|
||||
|
||||
for (j = 0; j < bpf_num_possible_cpus(); j++) {
|
||||
*(__u32 *)(val + j * 8) = i + j;
|
||||
|
@ -39,8 +39,8 @@ static bool always_log;
|
||||
#define BTF_END_RAW 0xdeadbeef
|
||||
#define NAME_TBD 0xdeadb33f
|
||||
|
||||
#define NAME_NTH(N) (0xffff0000 | N)
|
||||
#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xffff0000)
|
||||
#define NAME_NTH(N) (0xfffe0000 | N)
|
||||
#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xfffe0000)
|
||||
#define GET_NAME_NTH_IDX(X) (X & 0x0000ffff)
|
||||
|
||||
#define MAX_NR_RAW_U32 1024
|
||||
@ -3661,6 +3661,249 @@ static struct btf_raw_test raw_tests[] = {
|
||||
.err_str = "Invalid type_size",
|
||||
},
|
||||
|
||||
{
|
||||
.descr = "tag test #1, struct/member, well-formed",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_STRUCT_ENC(0, 2, 8), /* [2] */
|
||||
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
|
||||
BTF_MEMBER_ENC(NAME_TBD, 1, 32),
|
||||
BTF_TAG_ENC(NAME_TBD, 2, -1),
|
||||
BTF_TAG_ENC(NAME_TBD, 2, 0),
|
||||
BTF_TAG_ENC(NAME_TBD, 2, 1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0m1\0m2\0tag1\0tag2\0tag3"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 8,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 2,
|
||||
.max_entries = 1,
|
||||
},
|
||||
{
|
||||
.descr = "tag test #2, union/member, well-formed",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_UNION_ENC(NAME_TBD, 2, 4), /* [2] */
|
||||
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
|
||||
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
|
||||
BTF_TAG_ENC(NAME_TBD, 2, -1),
|
||||
BTF_TAG_ENC(NAME_TBD, 2, 0),
|
||||
BTF_TAG_ENC(NAME_TBD, 2, 1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 2,
|
||||
.max_entries = 1,
|
||||
},
|
||||
{
|
||||
.descr = "tag test #3, variable, well-formed",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
|
||||
BTF_VAR_ENC(NAME_TBD, 1, 1), /* [3] */
|
||||
BTF_TAG_ENC(NAME_TBD, 2, -1),
|
||||
BTF_TAG_ENC(NAME_TBD, 3, -1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0local\0global\0tag1\0tag2"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
},
|
||||
{
|
||||
.descr = "tag test #4, func/parameter, well-formed",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
|
||||
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
|
||||
BTF_TAG_ENC(NAME_TBD, 3, -1),
|
||||
BTF_TAG_ENC(NAME_TBD, 3, 0),
|
||||
BTF_TAG_ENC(NAME_TBD, 3, 1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0arg1\0arg2\0f\0tag1\0tag2\0tag3"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
},
|
||||
{
|
||||
.descr = "tag test #5, invalid value",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
|
||||
BTF_TAG_ENC(0, 2, -1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0local\0tag"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
.btf_load_err = true,
|
||||
.err_str = "Invalid value",
|
||||
},
|
||||
{
|
||||
.descr = "tag test #6, invalid target type",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_TAG_ENC(NAME_TBD, 1, -1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0tag1"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
.btf_load_err = true,
|
||||
.err_str = "Invalid type",
|
||||
},
|
||||
{
|
||||
.descr = "tag test #7, invalid vlen",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
|
||||
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_TAG, 0, 1), 2), (0),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0local\0tag1"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
.btf_load_err = true,
|
||||
.err_str = "vlen != 0",
|
||||
},
|
||||
{
|
||||
.descr = "tag test #8, invalid kflag",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
|
||||
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_TAG, 1, 0), 2), (-1),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0local\0tag1"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
.btf_load_err = true,
|
||||
.err_str = "Invalid btf_info kind_flag",
|
||||
},
|
||||
{
|
||||
.descr = "tag test #9, var, invalid component_idx",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
|
||||
BTF_TAG_ENC(NAME_TBD, 2, 0),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0local\0tag"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
.btf_load_err = true,
|
||||
.err_str = "Invalid component_idx",
|
||||
},
|
||||
{
|
||||
.descr = "tag test #10, struct member, invalid component_idx",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_STRUCT_ENC(0, 2, 8), /* [2] */
|
||||
BTF_MEMBER_ENC(NAME_TBD, 1, 0),
|
||||
BTF_MEMBER_ENC(NAME_TBD, 1, 32),
|
||||
BTF_TAG_ENC(NAME_TBD, 2, 2),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0m1\0m2\0tag"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 8,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 2,
|
||||
.max_entries = 1,
|
||||
.btf_load_err = true,
|
||||
.err_str = "Invalid component_idx",
|
||||
},
|
||||
{
|
||||
.descr = "tag test #11, func parameter, invalid component_idx",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
|
||||
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
|
||||
BTF_TAG_ENC(NAME_TBD, 3, 2),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0arg1\0arg2\0f\0tag"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
.btf_load_err = true,
|
||||
.err_str = "Invalid component_idx",
|
||||
},
|
||||
{
|
||||
.descr = "tag test #12, < -1 component_idx",
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
|
||||
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
|
||||
BTF_TAG_ENC(NAME_TBD, 3, -2),
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0arg1\0arg2\0f\0tag"),
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.map_name = "tag_type_check_btf",
|
||||
.key_size = sizeof(int),
|
||||
.value_size = 4,
|
||||
.key_type_id = 1,
|
||||
.value_type_id = 1,
|
||||
.max_entries = 1,
|
||||
.btf_load_err = true,
|
||||
.err_str = "Invalid component_idx",
|
||||
},
|
||||
|
||||
}; /* struct btf_raw_test raw_tests[] */
|
||||
|
||||
static const char *get_next_str(const char *start, const char *end)
|
||||
@ -6421,27 +6664,33 @@ const struct btf_dedup_test dedup_tests[] = {
|
||||
BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */
|
||||
BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */
|
||||
BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */
|
||||
BTF_MEMBER_ENC(NAME_NTH(8), 13, 672), /* float d; */
|
||||
BTF_MEMBER_ENC(NAME_NTH(8), 15, 672), /* float d; */
|
||||
/* ptr -> [3] struct s */
|
||||
BTF_PTR_ENC(3), /* [4] */
|
||||
/* ptr -> [6] const int */
|
||||
BTF_PTR_ENC(6), /* [5] */
|
||||
/* const -> [1] int */
|
||||
BTF_CONST_ENC(1), /* [6] */
|
||||
/* tag -> [3] struct s */
|
||||
BTF_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */
|
||||
/* tag -> [3] struct s, member 1 */
|
||||
BTF_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */
|
||||
|
||||
/* full copy of the above */
|
||||
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */
|
||||
BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */
|
||||
BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [9] */
|
||||
BTF_MEMBER_ENC(NAME_NTH(3), 10, 0),
|
||||
BTF_MEMBER_ENC(NAME_NTH(4), 11, 64),
|
||||
BTF_MEMBER_ENC(NAME_NTH(5), 8, 128),
|
||||
BTF_MEMBER_ENC(NAME_NTH(6), 7, 640),
|
||||
BTF_MEMBER_ENC(NAME_NTH(8), 13, 672),
|
||||
BTF_PTR_ENC(9), /* [10] */
|
||||
BTF_PTR_ENC(12), /* [11] */
|
||||
BTF_CONST_ENC(7), /* [12] */
|
||||
BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [13] */
|
||||
BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [9] */
|
||||
BTF_TYPE_ARRAY_ENC(9, 9, 16), /* [10] */
|
||||
BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [11] */
|
||||
BTF_MEMBER_ENC(NAME_NTH(3), 12, 0),
|
||||
BTF_MEMBER_ENC(NAME_NTH(4), 13, 64),
|
||||
BTF_MEMBER_ENC(NAME_NTH(5), 10, 128),
|
||||
BTF_MEMBER_ENC(NAME_NTH(6), 9, 640),
|
||||
BTF_MEMBER_ENC(NAME_NTH(8), 15, 672),
|
||||
BTF_PTR_ENC(11), /* [12] */
|
||||
BTF_PTR_ENC(14), /* [13] */
|
||||
BTF_CONST_ENC(9), /* [14] */
|
||||
BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [15] */
|
||||
BTF_TAG_ENC(NAME_NTH(2), 11, -1), /* [16] */
|
||||
BTF_TAG_ENC(NAME_NTH(2), 11, 1), /* [17] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0float\0d"),
|
||||
@ -6458,14 +6707,16 @@ const struct btf_dedup_test dedup_tests[] = {
|
||||
BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */
|
||||
BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */
|
||||
BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */
|
||||
BTF_MEMBER_ENC(NAME_NTH(4), 7, 672), /* float d; */
|
||||
BTF_MEMBER_ENC(NAME_NTH(4), 9, 672), /* float d; */
|
||||
/* ptr -> [3] struct s */
|
||||
BTF_PTR_ENC(3), /* [4] */
|
||||
/* ptr -> [6] const int */
|
||||
BTF_PTR_ENC(6), /* [5] */
|
||||
/* const -> [1] int */
|
||||
BTF_CONST_ENC(1), /* [6] */
|
||||
BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [7] */
|
||||
BTF_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */
|
||||
BTF_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */
|
||||
BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [9] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"),
|
||||
@ -6590,9 +6841,11 @@ const struct btf_dedup_test dedup_tests[] = {
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
|
||||
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
|
||||
BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
|
||||
BTF_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */
|
||||
BTF_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N"),
|
||||
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P"),
|
||||
},
|
||||
.expect = {
|
||||
.raw_types = {
|
||||
@ -6616,9 +6869,11 @@ const struct btf_dedup_test dedup_tests[] = {
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
|
||||
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
|
||||
BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
|
||||
BTF_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */
|
||||
BTF_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N"),
|
||||
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P"),
|
||||
},
|
||||
.opts = {
|
||||
.dont_resolve_fwds = false,
|
||||
@ -6767,6 +7022,152 @@ const struct btf_dedup_test dedup_tests[] = {
|
||||
.dedup_table_size = 1
|
||||
},
|
||||
},
|
||||
{
|
||||
.descr = "dedup: func/func_arg/var tags",
|
||||
.input = {
|
||||
.raw_types = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* static int t */
|
||||
BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */
|
||||
/* void f(int a1, int a2) */
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
|
||||
BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
|
||||
/* tag -> t */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */
|
||||
/* tag -> func */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 4, -1), /* [7] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 4, -1), /* [8] */
|
||||
/* tag -> func arg a1 */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 4, 1), /* [9] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 4, 1), /* [10] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"),
|
||||
},
|
||||
.expect = {
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
|
||||
BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"),
|
||||
},
|
||||
.opts = {
|
||||
.dont_resolve_fwds = false,
|
||||
},
|
||||
},
|
||||
{
|
||||
.descr = "dedup: func/func_param tags",
|
||||
.input = {
|
||||
.raw_types = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
/* void f(int a1, int a2) */
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
|
||||
BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */
|
||||
/* void f(int a1, int a2) */
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [4] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
|
||||
BTF_FUNC_ENC(NAME_NTH(3), 4), /* [5] */
|
||||
/* tag -> f: tag1, tag2 */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 3, -1), /* [6] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 3, -1), /* [7] */
|
||||
/* tag -> f/a2: tag1, tag2 */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 3, 1), /* [8] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 3, 1), /* [9] */
|
||||
/* tag -> f: tag1, tag3 */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 5, -1), /* [10] */
|
||||
BTF_TAG_ENC(NAME_NTH(6), 5, -1), /* [11] */
|
||||
/* tag -> f/a2: tag1, tag3 */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 5, 1), /* [12] */
|
||||
BTF_TAG_ENC(NAME_NTH(6), 5, 1), /* [13] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"),
|
||||
},
|
||||
.expect = {
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [2] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
|
||||
BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 3, -1), /* [4] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 3, -1), /* [5] */
|
||||
BTF_TAG_ENC(NAME_NTH(6), 3, -1), /* [6] */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 3, 1), /* [7] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 3, 1), /* [8] */
|
||||
BTF_TAG_ENC(NAME_NTH(6), 3, 1), /* [9] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"),
|
||||
},
|
||||
.opts = {
|
||||
.dont_resolve_fwds = false,
|
||||
},
|
||||
},
|
||||
{
|
||||
.descr = "dedup: struct/struct_member tags",
|
||||
.input = {
|
||||
.raw_types = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */
|
||||
BTF_MEMBER_ENC(NAME_NTH(2), 1, 0),
|
||||
BTF_MEMBER_ENC(NAME_NTH(3), 1, 32),
|
||||
BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [3] */
|
||||
BTF_MEMBER_ENC(NAME_NTH(2), 1, 0),
|
||||
BTF_MEMBER_ENC(NAME_NTH(3), 1, 32),
|
||||
/* tag -> t: tag1, tag2 */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 2, -1), /* [4] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
|
||||
/* tag -> t/m2: tag1, tag2 */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */
|
||||
/* tag -> t: tag1, tag3 */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 3, -1), /* [8] */
|
||||
BTF_TAG_ENC(NAME_NTH(6), 3, -1), /* [9] */
|
||||
/* tag -> t/m2: tag1, tag3 */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 3, 1), /* [10] */
|
||||
BTF_TAG_ENC(NAME_NTH(6), 3, 1), /* [11] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
|
||||
},
|
||||
.expect = {
|
||||
.raw_types = {
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */
|
||||
BTF_MEMBER_ENC(NAME_NTH(2), 1, 0),
|
||||
BTF_MEMBER_ENC(NAME_NTH(3), 1, 32),
|
||||
BTF_TAG_ENC(NAME_NTH(4), 2, -1), /* [3] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [4] */
|
||||
BTF_TAG_ENC(NAME_NTH(6), 2, -1), /* [5] */
|
||||
BTF_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */
|
||||
BTF_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */
|
||||
BTF_TAG_ENC(NAME_NTH(6), 2, 1), /* [8] */
|
||||
BTF_END_RAW,
|
||||
},
|
||||
BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
|
||||
},
|
||||
.opts = {
|
||||
.dont_resolve_fwds = false,
|
||||
},
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
@ -6801,6 +7202,8 @@ static int btf_type_size(const struct btf_type *t)
|
||||
return base_size + sizeof(struct btf_var);
|
||||
case BTF_KIND_DATASEC:
|
||||
return base_size + vlen * sizeof(struct btf_var_secinfo);
|
||||
case BTF_KIND_TAG:
|
||||
return base_size + sizeof(struct btf_tag);
|
||||
default:
|
||||
fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind);
|
||||
return -EINVAL;
|
||||
|
20
tools/testing/selftests/bpf/prog_tests/btf_tag.c
Normal file
20
tools/testing/selftests/bpf/prog_tests/btf_tag.c
Normal file
@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include <test_progs.h>
|
||||
#include "tag.skel.h"
|
||||
|
||||
void test_btf_tag(void)
|
||||
{
|
||||
struct tag *skel;
|
||||
|
||||
skel = tag__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "btf_tag"))
|
||||
return;
|
||||
|
||||
if (skel->rodata->skip_tests) {
|
||||
printf("%s:SKIP: btf_tag attribute not supported", __func__);
|
||||
test__skip();
|
||||
}
|
||||
|
||||
tag__destroy(skel);
|
||||
}
|
@ -281,5 +281,26 @@ void test_btf_write() {
|
||||
"[17] DATASEC 'datasec1' size=12 vlen=1\n"
|
||||
"\ttype_id=1 offset=4 size=8", "raw_dump");
|
||||
|
||||
/* TAG */
|
||||
id = btf__add_tag(btf, "tag1", 16, -1);
|
||||
ASSERT_EQ(id, 18, "tag_id");
|
||||
t = btf__type_by_id(btf, 18);
|
||||
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value");
|
||||
ASSERT_EQ(btf_kind(t), BTF_KIND_TAG, "tag_kind");
|
||||
ASSERT_EQ(t->type, 16, "tag_type");
|
||||
ASSERT_EQ(btf_tag(t)->component_idx, -1, "tag_component_idx");
|
||||
ASSERT_STREQ(btf_type_raw_dump(btf, 18),
|
||||
"[18] TAG 'tag1' type_id=16 component_idx=-1", "raw_dump");
|
||||
|
||||
id = btf__add_tag(btf, "tag2", 14, 1);
|
||||
ASSERT_EQ(id, 19, "tag_id");
|
||||
t = btf__type_by_id(btf, 19);
|
||||
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag2", "tag_value");
|
||||
ASSERT_EQ(btf_kind(t), BTF_KIND_TAG, "tag_kind");
|
||||
ASSERT_EQ(t->type, 14, "tag_type");
|
||||
ASSERT_EQ(btf_tag(t)->component_idx, 1, "tag_component_idx");
|
||||
ASSERT_STREQ(btf_type_raw_dump(btf, 19),
|
||||
"[19] TAG 'tag2' type_id=14 component_idx=1", "raw_dump");
|
||||
|
||||
btf__free(btf);
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ static int duration = 0;
|
||||
.output_len = sizeof(struct core_reloc_module_output), \
|
||||
.prog_sec_name = sec_name, \
|
||||
.raw_tp_name = tp_name, \
|
||||
.trigger = trigger_module_test_read, \
|
||||
.trigger = __trigger_module_test_read, \
|
||||
.needs_testmod = true, \
|
||||
}
|
||||
|
||||
@ -249,8 +249,7 @@ static int duration = 0;
|
||||
#define SIZE_CASE_COMMON(name) \
|
||||
.case_name = #name, \
|
||||
.bpf_obj_file = "test_core_reloc_size.o", \
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o", \
|
||||
.relaxed_core_relocs = true
|
||||
.btf_src_file = "btf__core_reloc_" #name ".o"
|
||||
|
||||
#define SIZE_OUTPUT_DATA(type) \
|
||||
STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \
|
||||
@ -475,19 +474,11 @@ static int setup_type_id_case_failure(struct core_reloc_test_case *test)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trigger_module_test_read(const struct core_reloc_test_case *test)
|
||||
static int __trigger_module_test_read(const struct core_reloc_test_case *test)
|
||||
{
|
||||
struct core_reloc_module_output *exp = (void *)test->output;
|
||||
int fd, err;
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_RDONLY);
|
||||
err = -errno;
|
||||
if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err))
|
||||
return err;
|
||||
|
||||
read(fd, NULL, exp->len); /* request expected number of bytes */
|
||||
close(fd);
|
||||
|
||||
trigger_module_test_read(exp->len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||
struct bpf_object *obj = NULL, *tgt_obj;
|
||||
__u32 retval, tgt_prog_id, info_len;
|
||||
struct bpf_prog_info prog_info = {};
|
||||
struct bpf_program **prog = NULL;
|
||||
struct bpf_program **prog = NULL, *p;
|
||||
struct bpf_link **link = NULL;
|
||||
int err, tgt_fd, i;
|
||||
struct btf *btf;
|
||||
@ -69,9 +69,6 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||
&tgt_obj, &tgt_fd);
|
||||
if (!ASSERT_OK(err, "tgt_prog_load"))
|
||||
return;
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
|
||||
.attach_prog_fd = tgt_fd,
|
||||
);
|
||||
|
||||
info_len = sizeof(prog_info);
|
||||
err = bpf_obj_get_info_by_fd(tgt_fd, &prog_info, &info_len);
|
||||
@ -89,10 +86,15 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||
if (!ASSERT_OK_PTR(prog, "prog_ptr"))
|
||||
goto close_prog;
|
||||
|
||||
obj = bpf_object__open_file(obj_file, &opts);
|
||||
obj = bpf_object__open_file(obj_file, NULL);
|
||||
if (!ASSERT_OK_PTR(obj, "obj_open"))
|
||||
goto close_prog;
|
||||
|
||||
bpf_object__for_each_program(p, obj) {
|
||||
err = bpf_program__set_attach_target(p, tgt_fd, NULL);
|
||||
ASSERT_OK(err, "set_attach_target");
|
||||
}
|
||||
|
||||
err = bpf_object__load(obj);
|
||||
if (!ASSERT_OK(err, "obj_load"))
|
||||
goto close_prog;
|
||||
@ -270,7 +272,7 @@ static void test_fmod_ret_freplace(void)
|
||||
struct bpf_link *freplace_link = NULL;
|
||||
struct bpf_program *prog;
|
||||
__u32 duration = 0;
|
||||
int err, pkt_fd;
|
||||
int err, pkt_fd, attach_prog_fd;
|
||||
|
||||
err = bpf_prog_load(tgt_name, BPF_PROG_TYPE_UNSPEC,
|
||||
&pkt_obj, &pkt_fd);
|
||||
@ -278,26 +280,32 @@ static void test_fmod_ret_freplace(void)
|
||||
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
|
||||
tgt_name, err, errno))
|
||||
return;
|
||||
opts.attach_prog_fd = pkt_fd;
|
||||
|
||||
freplace_obj = bpf_object__open_file(freplace_name, &opts);
|
||||
freplace_obj = bpf_object__open_file(freplace_name, NULL);
|
||||
if (!ASSERT_OK_PTR(freplace_obj, "freplace_obj_open"))
|
||||
goto out;
|
||||
|
||||
prog = bpf_program__next(NULL, freplace_obj);
|
||||
err = bpf_program__set_attach_target(prog, pkt_fd, NULL);
|
||||
ASSERT_OK(err, "freplace__set_attach_target");
|
||||
|
||||
err = bpf_object__load(freplace_obj);
|
||||
if (CHECK(err, "freplace_obj_load", "err %d\n", err))
|
||||
goto out;
|
||||
|
||||
prog = bpf_program__next(NULL, freplace_obj);
|
||||
freplace_link = bpf_program__attach_trace(prog);
|
||||
if (!ASSERT_OK_PTR(freplace_link, "freplace_attach_trace"))
|
||||
goto out;
|
||||
|
||||
opts.attach_prog_fd = bpf_program__fd(prog);
|
||||
fmod_obj = bpf_object__open_file(fmod_ret_name, &opts);
|
||||
fmod_obj = bpf_object__open_file(fmod_ret_name, NULL);
|
||||
if (!ASSERT_OK_PTR(fmod_obj, "fmod_obj_open"))
|
||||
goto out;
|
||||
|
||||
attach_prog_fd = bpf_program__fd(prog);
|
||||
prog = bpf_program__next(NULL, fmod_obj);
|
||||
err = bpf_program__set_attach_target(prog, attach_prog_fd, NULL);
|
||||
ASSERT_OK(err, "fmod_ret_set_attach_target");
|
||||
|
||||
err = bpf_object__load(fmod_obj);
|
||||
if (CHECK(!err, "fmod_obj_load", "loading fmod_ret should fail\n"))
|
||||
goto out;
|
||||
@ -322,14 +330,14 @@ static void test_func_sockmap_update(void)
|
||||
}
|
||||
|
||||
static void test_obj_load_failure_common(const char *obj_file,
|
||||
const char *target_obj_file)
|
||||
|
||||
const char *target_obj_file)
|
||||
{
|
||||
/*
|
||||
* standalone test that asserts failure to load freplace prog
|
||||
* because of invalid return code.
|
||||
*/
|
||||
struct bpf_object *obj = NULL, *pkt_obj;
|
||||
struct bpf_program *prog;
|
||||
int err, pkt_fd;
|
||||
__u32 duration = 0;
|
||||
|
||||
@ -339,14 +347,15 @@ static void test_obj_load_failure_common(const char *obj_file,
|
||||
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
|
||||
target_obj_file, err, errno))
|
||||
return;
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
|
||||
.attach_prog_fd = pkt_fd,
|
||||
);
|
||||
|
||||
obj = bpf_object__open_file(obj_file, &opts);
|
||||
obj = bpf_object__open_file(obj_file, NULL);
|
||||
if (!ASSERT_OK_PTR(obj, "obj_open"))
|
||||
goto close_prog;
|
||||
|
||||
prog = bpf_program__next(NULL, obj);
|
||||
err = bpf_program__set_attach_target(prog, pkt_fd, NULL);
|
||||
ASSERT_OK(err, "set_attach_target");
|
||||
|
||||
/* It should fail to load the program */
|
||||
err = bpf_object__load(obj);
|
||||
if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err))
|
||||
|
100
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
Normal file
100
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
Normal file
@ -0,0 +1,100 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include <test_progs.h>
|
||||
#include "get_branch_snapshot.skel.h"
|
||||
|
||||
static int *pfd_array;
|
||||
static int cpu_cnt;
|
||||
|
||||
static int create_perf_events(void)
|
||||
{
|
||||
struct perf_event_attr attr = {0};
|
||||
int cpu;
|
||||
|
||||
/* create perf event */
|
||||
attr.size = sizeof(attr);
|
||||
attr.type = PERF_TYPE_RAW;
|
||||
attr.config = 0x1b00;
|
||||
attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
|
||||
attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL |
|
||||
PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
|
||||
|
||||
cpu_cnt = libbpf_num_possible_cpus();
|
||||
pfd_array = malloc(sizeof(int) * cpu_cnt);
|
||||
if (!pfd_array) {
|
||||
cpu_cnt = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
for (cpu = 0; cpu < cpu_cnt; cpu++) {
|
||||
pfd_array[cpu] = syscall(__NR_perf_event_open, &attr,
|
||||
-1, cpu, -1, PERF_FLAG_FD_CLOEXEC);
|
||||
if (pfd_array[cpu] < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return cpu == 0;
|
||||
}
|
||||
|
||||
static void close_perf_events(void)
|
||||
{
|
||||
int cpu = 0;
|
||||
int fd;
|
||||
|
||||
while (cpu++ < cpu_cnt) {
|
||||
fd = pfd_array[cpu];
|
||||
if (fd < 0)
|
||||
break;
|
||||
close(fd);
|
||||
}
|
||||
free(pfd_array);
|
||||
}
|
||||
|
||||
void test_get_branch_snapshot(void)
|
||||
{
|
||||
struct get_branch_snapshot *skel = NULL;
|
||||
int err;
|
||||
|
||||
if (create_perf_events()) {
|
||||
test__skip(); /* system doesn't support LBR */
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
skel = get_branch_snapshot__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load"))
|
||||
goto cleanup;
|
||||
|
||||
err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low);
|
||||
if (!ASSERT_OK(err, "kallsyms_find"))
|
||||
goto cleanup;
|
||||
|
||||
err = kallsyms_find_next("bpf_testmod_loop_test", &skel->bss->address_high);
|
||||
if (!ASSERT_OK(err, "kallsyms_find_next"))
|
||||
goto cleanup;
|
||||
|
||||
err = get_branch_snapshot__attach(skel);
|
||||
if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
|
||||
goto cleanup;
|
||||
|
||||
trigger_module_test_read(100);
|
||||
|
||||
if (skel->bss->total_entries < 16) {
|
||||
/* too few entries for the hit/waste test */
|
||||
test__skip();
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr");
|
||||
|
||||
/* Given we stop LBR in software, we will waste a few entries.
|
||||
* But we should try to waste as few as possible entries. We are at
|
||||
* about 7 on x86_64 systems.
|
||||
* Add a check for < 10 so that we get heads-up when something
|
||||
* changes and wastes too many entries.
|
||||
*/
|
||||
ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries");
|
||||
|
||||
cleanup:
|
||||
get_branch_snapshot__destroy(skel);
|
||||
close_perf_events();
|
||||
}
|
@ -6,45 +6,6 @@
|
||||
|
||||
static int duration;
|
||||
|
||||
static int trigger_module_test_read(int read_sz)
|
||||
{
|
||||
int fd, err;
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_RDONLY);
|
||||
err = -errno;
|
||||
if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err))
|
||||
return err;
|
||||
|
||||
read(fd, NULL, read_sz);
|
||||
close(fd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trigger_module_test_write(int write_sz)
|
||||
{
|
||||
int fd, err;
|
||||
char *buf = malloc(write_sz);
|
||||
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(buf, 'a', write_sz);
|
||||
buf[write_sz-1] = '\0';
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_WRONLY);
|
||||
err = -errno;
|
||||
if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) {
|
||||
free(buf);
|
||||
return err;
|
||||
}
|
||||
|
||||
write(fd, buf, write_sz);
|
||||
close(fd);
|
||||
free(buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int delete_module(const char *name, int flags)
|
||||
{
|
||||
return syscall(__NR_delete_module, name, flags);
|
||||
|
@ -11,12 +11,14 @@ void test_skb_ctx(void)
|
||||
.cb[3] = 4,
|
||||
.cb[4] = 5,
|
||||
.priority = 6,
|
||||
.ingress_ifindex = 11,
|
||||
.ifindex = 1,
|
||||
.tstamp = 7,
|
||||
.wire_len = 100,
|
||||
.gso_segs = 8,
|
||||
.mark = 9,
|
||||
.gso_size = 10,
|
||||
.hwtstamp = 11,
|
||||
};
|
||||
struct bpf_prog_test_run_attr tattr = {
|
||||
.data_in = &pkt_v4,
|
||||
@ -97,6 +99,10 @@ void test_skb_ctx(void)
|
||||
"ctx_out_ifindex",
|
||||
"skb->ifindex == %d, expected %d\n",
|
||||
skb.ifindex, 1);
|
||||
CHECK_ATTR(skb.ingress_ifindex != 11,
|
||||
"ctx_out_ingress_ifindex",
|
||||
"skb->ingress_ifindex == %d, expected %d\n",
|
||||
skb.ingress_ifindex, 11);
|
||||
CHECK_ATTR(skb.tstamp != 8,
|
||||
"ctx_out_tstamp",
|
||||
"skb->tstamp == %lld, expected %d\n",
|
||||
|
@ -18,6 +18,8 @@ void test_skeleton(void)
|
||||
struct test_skeleton__data *data;
|
||||
struct test_skeleton__rodata *rodata;
|
||||
struct test_skeleton__kconfig *kcfg;
|
||||
const void *elf_bytes;
|
||||
size_t elf_bytes_sz = 0;
|
||||
|
||||
skel = test_skeleton__open();
|
||||
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
|
||||
@ -91,6 +93,10 @@ void test_skeleton(void)
|
||||
CHECK(bss->kern_ver != kcfg->LINUX_KERNEL_VERSION, "ext2",
|
||||
"got %d != exp %d\n", bss->kern_ver, kcfg->LINUX_KERNEL_VERSION);
|
||||
|
||||
elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz);
|
||||
ASSERT_OK_PTR(elf_bytes, "elf_bytes");
|
||||
ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz");
|
||||
|
||||
cleanup:
|
||||
test_skeleton__destroy(skel);
|
||||
}
|
||||
|
@ -219,10 +219,7 @@ out:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
/* test_tailcall_3 checks that the count value of the tail call limit
|
||||
* enforcement matches with expectations.
|
||||
*/
|
||||
static void test_tailcall_3(void)
|
||||
static void test_tailcall_count(const char *which)
|
||||
{
|
||||
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
|
||||
struct bpf_map *prog_array, *data_map;
|
||||
@ -231,7 +228,7 @@ static void test_tailcall_3(void)
|
||||
__u32 retval, duration;
|
||||
char buff[128] = {};
|
||||
|
||||
err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
|
||||
err = bpf_prog_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
|
||||
&prog_fd);
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
@ -296,6 +293,22 @@ out:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
/* test_tailcall_3 checks that the count value of the tail call limit
|
||||
* enforcement matches with expectations. JIT uses direct jump.
|
||||
*/
|
||||
static void test_tailcall_3(void)
|
||||
{
|
||||
test_tailcall_count("tailcall3.o");
|
||||
}
|
||||
|
||||
/* test_tailcall_6 checks that the count value of the tail call limit
|
||||
* enforcement matches with expectations. JIT uses indirect jump.
|
||||
*/
|
||||
static void test_tailcall_6(void)
|
||||
{
|
||||
test_tailcall_count("tailcall6.o");
|
||||
}
|
||||
|
||||
/* test_tailcall_4 checks that the kernel properly selects indirect jump
|
||||
* for the case where the key is not known. Latter is passed via global
|
||||
* data to select different targets we can compare return value of.
|
||||
@ -822,6 +835,8 @@ void test_tailcalls(void)
|
||||
test_tailcall_4();
|
||||
if (test__start_subtest("tailcall_5"))
|
||||
test_tailcall_5();
|
||||
if (test__start_subtest("tailcall_6"))
|
||||
test_tailcall_6();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_1"))
|
||||
test_tailcall_bpf2bpf_1();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_2"))
|
||||
|
@ -633,7 +633,7 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result)
|
||||
struct nstoken *nstoken = NULL;
|
||||
int err;
|
||||
int tunnel_pid = -1;
|
||||
int src_fd, target_fd;
|
||||
int src_fd, target_fd = -1;
|
||||
int ifindex;
|
||||
|
||||
/* Start a L3 TUN/TAP tunnel between the src and dst namespaces.
|
||||
|
@ -169,11 +169,7 @@ static __always_inline void bictcp_hystart_reset(struct sock *sk)
|
||||
ca->sample_cnt = 0;
|
||||
}
|
||||
|
||||
/* "struct_ops/" prefix is not a requirement
|
||||
* It will be recognized as BPF_PROG_TYPE_STRUCT_OPS
|
||||
* as long as it is used in one of the func ptr
|
||||
* under SEC(".struct_ops").
|
||||
*/
|
||||
/* "struct_ops/" prefix is a requirement */
|
||||
SEC("struct_ops/bpf_cubic_init")
|
||||
void BPF_PROG(bpf_cubic_init, struct sock *sk)
|
||||
{
|
||||
@ -188,10 +184,8 @@ void BPF_PROG(bpf_cubic_init, struct sock *sk)
|
||||
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
|
||||
}
|
||||
|
||||
/* No prefix in SEC will also work.
|
||||
* The remaining tcp-cubic functions have an easier way.
|
||||
*/
|
||||
SEC("no-sec-prefix-bictcp_cwnd_event")
|
||||
/* "struct_ops" prefix is a requirement */
|
||||
SEC("struct_ops/bpf_cubic_cwnd_event")
|
||||
void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
|
||||
{
|
||||
if (event == CA_EVENT_TX_START) {
|
||||
|
40
tools/testing/selftests/bpf/progs/get_branch_snapshot.c
Normal file
40
tools/testing/selftests/bpf/progs/get_branch_snapshot.c
Normal file
@ -0,0 +1,40 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
__u64 test1_hits = 0;
|
||||
__u64 address_low = 0;
|
||||
__u64 address_high = 0;
|
||||
int wasted_entries = 0;
|
||||
long total_entries = 0;
|
||||
|
||||
#define ENTRY_CNT 32
|
||||
struct perf_branch_entry entries[ENTRY_CNT] = {};
|
||||
|
||||
static inline bool in_range(__u64 val)
|
||||
{
|
||||
return (val >= address_low) && (val < address_high);
|
||||
}
|
||||
|
||||
SEC("fexit/bpf_testmod_loop_test")
|
||||
int BPF_PROG(test1, int n, int ret)
|
||||
{
|
||||
long i;
|
||||
|
||||
total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0);
|
||||
total_entries /= sizeof(struct perf_branch_entry);
|
||||
|
||||
for (i = 0; i < ENTRY_CNT; i++) {
|
||||
if (i >= total_entries)
|
||||
break;
|
||||
if (in_range(entries[i].from) && in_range(entries[i].to))
|
||||
test1_hits++;
|
||||
else if (!test1_hits)
|
||||
wasted_entries++;
|
||||
}
|
||||
return 0;
|
||||
}
|
49
tools/testing/selftests/bpf/progs/tag.c
Normal file
49
tools/testing/selftests/bpf/progs/tag.c
Normal file
@ -0,0 +1,49 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0
|
||||
#endif
|
||||
|
||||
#if __has_attribute(btf_tag)
|
||||
#define __tag1 __attribute__((btf_tag("tag1")))
|
||||
#define __tag2 __attribute__((btf_tag("tag2")))
|
||||
volatile const bool skip_tests __tag1 __tag2 = false;
|
||||
#else
|
||||
#define __tag1
|
||||
#define __tag2
|
||||
volatile const bool skip_tests = true;
|
||||
#endif
|
||||
|
||||
struct key_t {
|
||||
int a;
|
||||
int b __tag1 __tag2;
|
||||
int c;
|
||||
} __tag1 __tag2;
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 3);
|
||||
__type(key, struct key_t);
|
||||
__type(value, __u64);
|
||||
} hashmap1 SEC(".maps");
|
||||
|
||||
|
||||
static __noinline int foo(int x __tag1 __tag2) __tag1 __tag2
|
||||
{
|
||||
struct key_t key;
|
||||
__u64 val = 1;
|
||||
|
||||
key.a = key.b = key.c = x;
|
||||
bpf_map_update_elem(&hashmap1, &key, &val, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fentry/bpf_fentry_test1")
|
||||
int BPF_PROG(sub, int x)
|
||||
{
|
||||
return foo(x);
|
||||
}
|
34
tools/testing/selftests/bpf/progs/tailcall6.c
Normal file
34
tools/testing/selftests/bpf/progs/tailcall6.c
Normal file
@ -0,0 +1,34 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/bpf.h>
|
||||
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} jmp_table SEC(".maps");
|
||||
|
||||
int count, which;
|
||||
|
||||
SEC("classifier/0")
|
||||
int bpf_func_0(struct __sk_buff *skb)
|
||||
{
|
||||
count++;
|
||||
if (__builtin_constant_p(which))
|
||||
__bpf_unreachable();
|
||||
bpf_tail_call(skb, &jmp_table, which);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("classifier")
|
||||
int entry(struct __sk_buff *skb)
|
||||
{
|
||||
if (__builtin_constant_p(which))
|
||||
__bpf_unreachable();
|
||||
bpf_tail_call(skb, &jmp_table, which);
|
||||
return 0;
|
||||
}
|
||||
|
||||
char __license[] SEC("license") = "GPL";
|
@ -25,6 +25,12 @@ int process(struct __sk_buff *skb)
|
||||
return 1;
|
||||
if (skb->gso_size != 10)
|
||||
return 1;
|
||||
if (skb->ingress_ifindex != 11)
|
||||
return 1;
|
||||
if (skb->ifindex != 1)
|
||||
return 1;
|
||||
if (skb->hwtstamp != 11)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -69,4 +69,7 @@
|
||||
#define BTF_TYPE_FLOAT_ENC(name, sz) \
|
||||
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
|
||||
|
||||
#define BTF_TAG_ENC(value, type, component_idx) \
|
||||
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TAG, 0, 0), type), (component_idx)
|
||||
|
||||
#endif /* _TEST_BTF_H */
|
||||
|
@ -743,6 +743,45 @@ int cd_flavor_subdir(const char *exec_name)
|
||||
return chdir(flavor);
|
||||
}
|
||||
|
||||
int trigger_module_test_read(int read_sz)
|
||||
{
|
||||
int fd, err;
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_RDONLY);
|
||||
err = -errno;
|
||||
if (!ASSERT_GE(fd, 0, "testmod_file_open"))
|
||||
return err;
|
||||
|
||||
read(fd, NULL, read_sz);
|
||||
close(fd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int trigger_module_test_write(int write_sz)
|
||||
{
|
||||
int fd, err;
|
||||
char *buf = malloc(write_sz);
|
||||
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(buf, 'a', write_sz);
|
||||
buf[write_sz-1] = '\0';
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_WRONLY);
|
||||
err = -errno;
|
||||
if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
|
||||
free(buf);
|
||||
return err;
|
||||
}
|
||||
|
||||
write(fd, buf, write_sz);
|
||||
close(fd);
|
||||
free(buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MAX_BACKTRACE_SZ 128
|
||||
void crash_handler(int signum)
|
||||
{
|
||||
|
@ -291,6 +291,8 @@ int compare_map_keys(int map1_fd, int map2_fd);
|
||||
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
|
||||
int extract_build_id(char *build_id, size_t size);
|
||||
int kern_sync_rcu(void);
|
||||
int trigger_module_test_read(int read_sz);
|
||||
int trigger_module_test_write(int write_sz);
|
||||
|
||||
#ifdef __x86_64__
|
||||
#define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
|
||||
|
@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <ctype.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
@ -117,6 +118,42 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* find the address of the next symbol of the same type, this can be used
|
||||
* to determine the end of a function.
|
||||
*/
|
||||
int kallsyms_find_next(const char *sym, unsigned long long *addr)
|
||||
{
|
||||
char type, found_type, name[500];
|
||||
unsigned long long value;
|
||||
bool found = false;
|
||||
int err = 0;
|
||||
FILE *f;
|
||||
|
||||
f = fopen("/proc/kallsyms", "r");
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
|
||||
/* Different types of symbols in kernel modules are mixed
|
||||
* in /proc/kallsyms. Only return the next matching type.
|
||||
* Use tolower() for type so that 'T' matches 't'.
|
||||
*/
|
||||
if (found && found_type == tolower(type)) {
|
||||
*addr = value;
|
||||
goto out;
|
||||
}
|
||||
if (strcmp(name, sym) == 0) {
|
||||
found = true;
|
||||
found_type = tolower(type);
|
||||
}
|
||||
}
|
||||
err = -ENOENT;
|
||||
|
||||
out:
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
void read_trace_pipe(void)
|
||||
{
|
||||
int trace_fd;
|
||||
|
@ -16,6 +16,11 @@ long ksym_get_addr(const char *name);
|
||||
/* open kallsyms and find addresses on the fly, faster than load + search. */
|
||||
int kallsyms_find(const char *sym, unsigned long long *addr);
|
||||
|
||||
/* find the address of the next symbol, this can be used to determine the
|
||||
* end of a function
|
||||
*/
|
||||
int kallsyms_find_next(const char *sym, unsigned long long *addr);
|
||||
|
||||
void read_trace_pipe(void);
|
||||
|
||||
ssize_t get_uprobe_offset(const void *addr, ssize_t base);
|
||||
|
@ -1057,6 +1057,66 @@
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"padding after gso_size is not accessible",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetofend(struct __sk_buff, gso_size)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr = "invalid bpf_context access off=180 size=4",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"read hwtstamp from CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, hwtstamp)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"read hwtstamp from CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, hwtstamp)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"write hwtstamp from CGROUP_SKB",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
||||
offsetof(struct __sk_buff, hwtstamp)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr = "invalid bpf_context access off=184 size=8",
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
|
||||
},
|
||||
{
|
||||
"read hwtstamp from CLS",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, hwtstamp)),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"check wire_len is not readable by sockets",
|
||||
.insns = {
|
||||
|
@ -62,6 +62,11 @@
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
|
||||
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 0xefefef),
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
|
||||
BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
|
||||
BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
|
||||
@ -73,11 +78,22 @@
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
|
||||
BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0xefefef),
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
|
||||
BPF_LD_IMM64(BPF_REG_2, 0x2ad4d4aaULL),
|
||||
BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 0x2b),
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
|
||||
BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
|
||||
BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
|
||||
BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
|
||||
BPF_LD_IMM64(BPF_REG_5, 0xeeff0d413122ULL),
|
||||
BPF_ALU32_REG(BPF_MUL, BPF_REG_5, BPF_REG_1),
|
||||
BPF_JMP_REG(BPF_JEQ, BPF_REG_5, BPF_REG_0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 2),
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -20,10 +20,9 @@
|
||||
#define MAX_INTERFACES 2
|
||||
#define MAX_INTERFACE_NAME_CHARS 7
|
||||
#define MAX_INTERFACES_NAMESPACE_CHARS 10
|
||||
#define MAX_SOCKS 1
|
||||
#define MAX_SOCKETS 2
|
||||
#define MAX_TEST_NAME_SIZE 32
|
||||
#define MAX_TEARDOWN_ITER 10
|
||||
#define MAX_BIDI_ITER 2
|
||||
#define MAX_BPF_ITER 2
|
||||
#define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
|
||||
sizeof(struct udphdr))
|
||||
#define MIN_PKT_SIZE 64
|
||||
@ -39,7 +38,10 @@
|
||||
#define BATCH_SIZE 8
|
||||
#define POLL_TMOUT 1000
|
||||
#define DEFAULT_PKT_CNT (4 * 1024)
|
||||
#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
|
||||
#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
|
||||
#define RX_FULL_RXQSIZE 32
|
||||
#define DEFAULT_OFFSET 256
|
||||
#define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1)
|
||||
|
||||
#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
|
||||
@ -51,8 +53,13 @@ enum test_mode {
|
||||
};
|
||||
|
||||
enum test_type {
|
||||
TEST_TYPE_NOPOLL,
|
||||
TEST_TYPE_RUN_TO_COMPLETION,
|
||||
TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME,
|
||||
TEST_TYPE_POLL,
|
||||
TEST_TYPE_UNALIGNED,
|
||||
TEST_TYPE_ALIGNED_INV_DESC,
|
||||
TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME,
|
||||
TEST_TYPE_UNALIGNED_INV_DESC,
|
||||
TEST_TYPE_TEARDOWN,
|
||||
TEST_TYPE_BIDI,
|
||||
TEST_TYPE_STATS,
|
||||
@ -68,25 +75,21 @@ enum stat_test_type {
|
||||
STAT_TEST_TYPE_MAX
|
||||
};
|
||||
|
||||
static int configured_mode;
|
||||
static bool opt_pkt_dump;
|
||||
static u32 num_frames = DEFAULT_PKT_CNT / 4;
|
||||
static bool second_step;
|
||||
static int test_type;
|
||||
|
||||
static bool opt_verbose;
|
||||
|
||||
static u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
|
||||
static u32 xdp_bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY;
|
||||
static int stat_test_type;
|
||||
static u32 rxqsize;
|
||||
static u32 frame_headroom;
|
||||
|
||||
struct xsk_umem_info {
|
||||
struct xsk_ring_prod fq;
|
||||
struct xsk_ring_cons cq;
|
||||
struct xsk_umem *umem;
|
||||
u32 num_frames;
|
||||
u32 frame_headroom;
|
||||
void *buffer;
|
||||
u32 frame_size;
|
||||
bool unaligned_mode;
|
||||
};
|
||||
|
||||
struct xsk_socket_info {
|
||||
@ -95,51 +98,58 @@ struct xsk_socket_info {
|
||||
struct xsk_umem_info *umem;
|
||||
struct xsk_socket *xsk;
|
||||
u32 outstanding_tx;
|
||||
};
|
||||
|
||||
struct flow_vector {
|
||||
enum fvector {
|
||||
tx,
|
||||
rx,
|
||||
} vector;
|
||||
u32 rxqsize;
|
||||
};
|
||||
|
||||
struct pkt {
|
||||
u64 addr;
|
||||
u32 len;
|
||||
u32 payload;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
struct pkt_stream {
|
||||
u32 nb_pkts;
|
||||
u32 rx_pkt_nb;
|
||||
struct pkt *pkts;
|
||||
bool use_addr_for_fill;
|
||||
};
|
||||
|
||||
typedef void *(*thread_func_t)(void *arg);
|
||||
|
||||
struct ifobject {
|
||||
char ifname[MAX_INTERFACE_NAME_CHARS];
|
||||
char nsname[MAX_INTERFACES_NAMESPACE_CHARS];
|
||||
struct xsk_socket_info *xsk;
|
||||
struct xsk_socket_info **xsk_arr;
|
||||
struct xsk_umem_info **umem_arr;
|
||||
struct xsk_socket_info *xsk_arr;
|
||||
struct xsk_umem_info *umem;
|
||||
void *(*func_ptr)(void *arg);
|
||||
struct flow_vector fv;
|
||||
struct xsk_umem_info *umem_arr;
|
||||
thread_func_t func_ptr;
|
||||
struct pkt_stream *pkt_stream;
|
||||
int ns_fd;
|
||||
u32 dst_ip;
|
||||
u32 src_ip;
|
||||
u32 xdp_flags;
|
||||
u32 bind_flags;
|
||||
u16 src_port;
|
||||
u16 dst_port;
|
||||
bool tx_on;
|
||||
bool rx_on;
|
||||
bool use_poll;
|
||||
u8 dst_mac[ETH_ALEN];
|
||||
u8 src_mac[ETH_ALEN];
|
||||
};
|
||||
|
||||
static struct ifobject *ifdict[MAX_INTERFACES];
|
||||
static struct ifobject *ifdict_rx;
|
||||
static struct ifobject *ifdict_tx;
|
||||
struct test_spec {
|
||||
struct ifobject *ifobj_tx;
|
||||
struct ifobject *ifobj_rx;
|
||||
struct pkt_stream *pkt_stream_default;
|
||||
u16 total_steps;
|
||||
u16 current_step;
|
||||
u16 nb_sockets;
|
||||
char name[MAX_TEST_NAME_SIZE];
|
||||
};
|
||||
|
||||
/*threads*/
|
||||
pthread_barrier_t barr;
|
||||
pthread_t t0, t1;
|
||||
|
||||
#endif /* XDPXCEIVER_H */
|
||||
|
Loading…
Reference in New Issue
Block a user