forked from Minki/linux
7c8199e24f
Tracing BPF programs can attach to kprobe and fentry. Hence they run in unknown context where calling plain kmalloc() might not be safe. Front-end kmalloc() with minimal per-cpu cache of free elements. Refill this cache asynchronously from irq_work. BPF programs always run with migration disabled. It's safe to allocate from cache of the current cpu with irqs disabled. Free-ing is always done into bucket of the current cpu as well. irq_work trims extra free elements from buckets with kfree and refills them with kmalloc, so global kmalloc logic takes care of freeing objects allocated by one cpu and freed on another. struct bpf_mem_alloc supports two modes: - When size != 0 create kmem_cache and bpf_mem_cache for each cpu. This is typical bpf hash map use case when all elements have equal size. - When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on kmalloc/kfree. Max allocation size is 4096 in this case. This is bpf_dynptr and bpf_kptr use case. bpf_mem_alloc/bpf_mem_free are bpf specific 'wrappers' of kmalloc/kfree. bpf_mem_cache_alloc/bpf_mem_cache_free are 'wrappers' of kmem_cache_alloc/kmem_cache_free. The allocators are NMI-safe from bpf programs only. They are not NMI-safe in general. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20220902211058.60789-2-alexei.starovoitov@gmail.com
46 lines
1.7 KiB
Makefile
46 lines
1.7 KiB
Makefile
# SPDX-License-Identifier: GPL-2.0
|
|
obj-y := core.o
|
|
ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y)
|
|
# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details
|
|
cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
|
|
endif
|
|
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
|
|
|
|
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
|
|
obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
|
|
obj-$(CONFIG_BPF_JIT) += trampoline.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += btf.o memalloc.o
|
|
obj-$(CONFIG_BPF_JIT) += dispatcher.o
|
|
ifeq ($(CONFIG_NET),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += devmap.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += offload.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += net_namespace.o
|
|
endif
|
|
ifeq ($(CONFIG_PERF_EVENTS),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
|
|
endif
|
|
ifeq ($(CONFIG_CGROUPS),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += cgroup_iter.o
|
|
endif
|
|
obj-$(CONFIG_CGROUP_BPF) += cgroup.o
|
|
ifeq ($(CONFIG_INET),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += reuseport_array.o
|
|
endif
|
|
ifeq ($(CONFIG_SYSFS),y)
|
|
obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o
|
|
endif
|
|
ifeq ($(CONFIG_BPF_JIT),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
|
|
obj-${CONFIG_BPF_LSM} += bpf_lsm.o
|
|
endif
|
|
obj-$(CONFIG_BPF_PRELOAD) += preload/
|
|
|
|
obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
|
|
$(obj)/relo_core.o: $(srctree)/tools/lib/bpf/relo_core.c FORCE
|
|
$(call if_changed_rule,cc_o_c)
|