2020-02-25 20:54:26 +00:00
|
|
|
#!/usr/bin/env python3
|
2018-04-25 17:16:52 +00:00
|
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
#
|
2019-05-10 14:51:22 +00:00
|
|
|
# Copyright (C) 2018-2019 Netronome Systems, Inc.
|
2021-03-02 17:19:41 +00:00
|
|
|
# Copyright (C) 2021 Isovalent, Inc.
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
# In case user attempts to run with Python 2.
|
|
|
|
from __future__ import print_function
|
|
|
|
|
|
|
|
import argparse
|
|
|
|
import re
|
|
|
|
import sys, os
|
2022-08-23 15:53:26 +00:00
|
|
|
import subprocess
|
|
|
|
|
2022-08-23 15:53:27 +00:00
|
|
|
helpersDocStart = 'Start of BPF helper function descriptions:'
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
class NoHelperFound(BaseException):
|
|
|
|
pass
|
|
|
|
|
2021-03-02 17:19:42 +00:00
|
|
|
class NoSyscallCommandFound(BaseException):
|
|
|
|
pass
|
|
|
|
|
2018-04-25 17:16:52 +00:00
|
|
|
class ParsingError(BaseException):
|
|
|
|
def __init__(self, line='<line not provided>', reader=None):
|
|
|
|
if reader:
|
|
|
|
BaseException.__init__(self,
|
|
|
|
'Error at file offset %d, parsing line: %s' %
|
|
|
|
(reader.tell(), line))
|
|
|
|
else:
|
|
|
|
BaseException.__init__(self, 'Error parsing line: %s' % line)
|
|
|
|
|
2021-03-02 17:19:42 +00:00
|
|
|
|
|
|
|
class APIElement(object):
|
2018-04-25 17:16:52 +00:00
|
|
|
"""
|
2021-03-02 17:19:42 +00:00
|
|
|
An object representing the description of an aspect of the eBPF API.
|
|
|
|
@proto: prototype of the API symbol
|
|
|
|
@desc: textual description of the symbol
|
|
|
|
@ret: (optional) description of any associated return value
|
2018-04-25 17:16:52 +00:00
|
|
|
"""
|
|
|
|
def __init__(self, proto='', desc='', ret=''):
|
|
|
|
self.proto = proto
|
|
|
|
self.desc = desc
|
|
|
|
self.ret = ret
|
|
|
|
|
2021-03-02 17:19:42 +00:00
|
|
|
|
|
|
|
class Helper(APIElement):
|
|
|
|
"""
|
|
|
|
An object representing the description of an eBPF helper function.
|
|
|
|
@proto: function prototype of the helper function
|
|
|
|
@desc: textual description of the helper function
|
|
|
|
@ret: description of the return value of the helper function
|
|
|
|
"""
|
2022-08-24 18:10:43 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
self.enum_val = None
|
|
|
|
|
2018-04-25 17:16:52 +00:00
|
|
|
def proto_break_down(self):
|
|
|
|
"""
|
|
|
|
Break down helper function protocol into smaller chunks: return type,
|
|
|
|
name, distincts arguments.
|
|
|
|
"""
|
2023-08-29 07:49:31 +00:00
|
|
|
arg_re = re.compile(r'((\w+ )*?(\w+|...))( (\**)(\w+))?$')
|
2018-04-25 17:16:52 +00:00
|
|
|
res = {}
|
2023-08-29 07:49:31 +00:00
|
|
|
proto_re = re.compile(r'(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$')
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
capture = proto_re.match(self.proto)
|
|
|
|
res['ret_type'] = capture.group(1)
|
|
|
|
res['ret_star'] = capture.group(2)
|
|
|
|
res['name'] = capture.group(3)
|
|
|
|
res['args'] = []
|
|
|
|
|
|
|
|
args = capture.group(4).split(', ')
|
|
|
|
for a in args:
|
|
|
|
capture = arg_re.match(a)
|
|
|
|
res['args'].append({
|
|
|
|
'type' : capture.group(1),
|
2019-05-10 14:51:22 +00:00
|
|
|
'star' : capture.group(5),
|
|
|
|
'name' : capture.group(6)
|
2018-04-25 17:16:52 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return res
|
|
|
|
|
2021-03-02 17:19:42 +00:00
|
|
|
|
2018-04-25 17:16:52 +00:00
|
|
|
class HeaderParser(object):
|
|
|
|
"""
|
|
|
|
An object used to parse a file in order to extract the documentation of a
|
|
|
|
list of eBPF helper functions. All the helpers that can be retrieved are
|
|
|
|
stored as Helper object, in the self.helpers() array.
|
|
|
|
@filename: name of file to parse, usually include/uapi/linux/bpf.h in the
|
|
|
|
kernel tree
|
|
|
|
"""
|
|
|
|
def __init__(self, filename):
|
|
|
|
self.reader = open(filename, 'r')
|
|
|
|
self.line = ''
|
|
|
|
self.helpers = []
|
2021-03-02 17:19:42 +00:00
|
|
|
self.commands = []
|
2022-01-12 11:49:53 +00:00
|
|
|
self.desc_unique_helpers = set()
|
|
|
|
self.define_unique_helpers = []
|
2022-08-24 18:10:43 +00:00
|
|
|
self.helper_enum_vals = {}
|
2022-10-06 04:24:52 +00:00
|
|
|
self.helper_enum_pos = {}
|
2022-01-19 11:44:42 +00:00
|
|
|
self.desc_syscalls = []
|
|
|
|
self.enum_syscalls = []
|
2021-03-02 17:19:42 +00:00
|
|
|
|
|
|
|
def parse_element(self):
|
|
|
|
proto = self.parse_symbol()
|
2022-01-19 11:44:41 +00:00
|
|
|
desc = self.parse_desc(proto)
|
|
|
|
ret = self.parse_ret(proto)
|
2021-03-02 17:19:42 +00:00
|
|
|
return APIElement(proto=proto, desc=desc, ret=ret)
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
def parse_helper(self):
|
|
|
|
proto = self.parse_proto()
|
2022-01-19 11:44:41 +00:00
|
|
|
desc = self.parse_desc(proto)
|
|
|
|
ret = self.parse_ret(proto)
|
2018-04-25 17:16:52 +00:00
|
|
|
return Helper(proto=proto, desc=desc, ret=ret)
|
|
|
|
|
2021-03-02 17:19:42 +00:00
|
|
|
def parse_symbol(self):
|
2023-08-29 07:49:31 +00:00
|
|
|
p = re.compile(r' \* ?(BPF\w+)$')
|
2021-03-02 17:19:42 +00:00
|
|
|
capture = p.match(self.line)
|
|
|
|
if not capture:
|
|
|
|
raise NoSyscallCommandFound
|
2023-08-29 07:49:31 +00:00
|
|
|
end_re = re.compile(r' \* ?NOTES$')
|
2021-03-02 17:19:42 +00:00
|
|
|
end = end_re.match(self.line)
|
|
|
|
if end:
|
|
|
|
raise NoSyscallCommandFound
|
|
|
|
self.line = self.reader.readline()
|
|
|
|
return capture.group(1)
|
|
|
|
|
2018-04-25 17:16:52 +00:00
|
|
|
def parse_proto(self):
|
|
|
|
# Argument can be of shape:
|
|
|
|
# - "void"
|
|
|
|
# - "type name"
|
|
|
|
# - "type *name"
|
|
|
|
# - Same as above, with "const" and/or "struct" in front of type
|
|
|
|
# - "..." (undefined number of arguments, for bpf_trace_printk())
|
|
|
|
# There is at least one term ("void"), and at most five arguments.
|
2023-08-29 07:49:31 +00:00
|
|
|
p = re.compile(r' \* ?((.+) \**\w+\((((const )?(struct )?(\w+|\.\.\.)( \**\w+)?)(, )?){1,5}\))$')
|
2018-04-25 17:16:52 +00:00
|
|
|
capture = p.match(self.line)
|
|
|
|
if not capture:
|
|
|
|
raise NoHelperFound
|
|
|
|
self.line = self.reader.readline()
|
|
|
|
return capture.group(1)
|
|
|
|
|
2022-01-19 11:44:41 +00:00
|
|
|
def parse_desc(self, proto):
|
2023-08-29 07:49:31 +00:00
|
|
|
p = re.compile(r' \* ?(?:\t| {5,8})Description$')
|
2018-04-25 17:16:52 +00:00
|
|
|
capture = p.match(self.line)
|
|
|
|
if not capture:
|
2022-01-19 11:44:41 +00:00
|
|
|
raise Exception("No description section found for " + proto)
|
2018-04-25 17:16:52 +00:00
|
|
|
# Description can be several lines, some of them possibly empty, and it
|
|
|
|
# stops when another subsection title is met.
|
|
|
|
desc = ''
|
2022-01-19 11:44:41 +00:00
|
|
|
desc_present = False
|
2018-04-25 17:16:52 +00:00
|
|
|
while True:
|
|
|
|
self.line = self.reader.readline()
|
|
|
|
if self.line == ' *\n':
|
|
|
|
desc += '\n'
|
|
|
|
else:
|
2023-08-29 07:49:31 +00:00
|
|
|
p = re.compile(r' \* ?(?:\t| {5,8})(?:\t| {8})(.*)')
|
2018-04-25 17:16:52 +00:00
|
|
|
capture = p.match(self.line)
|
|
|
|
if capture:
|
2022-01-19 11:44:41 +00:00
|
|
|
desc_present = True
|
2018-04-25 17:16:52 +00:00
|
|
|
desc += capture.group(1) + '\n'
|
|
|
|
else:
|
|
|
|
break
|
2022-01-19 11:44:41 +00:00
|
|
|
|
|
|
|
if not desc_present:
|
|
|
|
raise Exception("No description found for " + proto)
|
2018-04-25 17:16:52 +00:00
|
|
|
return desc
|
|
|
|
|
2022-01-19 11:44:41 +00:00
|
|
|
def parse_ret(self, proto):
|
2023-08-29 07:49:31 +00:00
|
|
|
p = re.compile(r' \* ?(?:\t| {5,8})Return$')
|
2018-04-25 17:16:52 +00:00
|
|
|
capture = p.match(self.line)
|
|
|
|
if not capture:
|
2022-01-19 11:44:41 +00:00
|
|
|
raise Exception("No return section found for " + proto)
|
2018-04-25 17:16:52 +00:00
|
|
|
# Return value description can be several lines, some of them possibly
|
|
|
|
# empty, and it stops when another subsection title is met.
|
|
|
|
ret = ''
|
2022-01-19 11:44:41 +00:00
|
|
|
ret_present = False
|
2018-04-25 17:16:52 +00:00
|
|
|
while True:
|
|
|
|
self.line = self.reader.readline()
|
|
|
|
if self.line == ' *\n':
|
|
|
|
ret += '\n'
|
|
|
|
else:
|
2023-08-29 07:49:31 +00:00
|
|
|
p = re.compile(r' \* ?(?:\t| {5,8})(?:\t| {8})(.*)')
|
2018-04-25 17:16:52 +00:00
|
|
|
capture = p.match(self.line)
|
|
|
|
if capture:
|
2022-01-19 11:44:41 +00:00
|
|
|
ret_present = True
|
2018-04-25 17:16:52 +00:00
|
|
|
ret += capture.group(1) + '\n'
|
|
|
|
else:
|
|
|
|
break
|
2022-01-19 11:44:41 +00:00
|
|
|
|
|
|
|
if not ret_present:
|
|
|
|
raise Exception("No return found for " + proto)
|
2018-04-25 17:16:52 +00:00
|
|
|
return ret
|
|
|
|
|
2022-01-19 11:44:42 +00:00
|
|
|
def seek_to(self, target, help_message, discard_lines = 1):
|
2021-03-02 17:19:42 +00:00
|
|
|
self.reader.seek(0)
|
|
|
|
offset = self.reader.read().find(target)
|
2018-04-25 17:16:52 +00:00
|
|
|
if offset == -1:
|
2021-03-02 17:19:42 +00:00
|
|
|
raise Exception(help_message)
|
2018-04-25 17:16:52 +00:00
|
|
|
self.reader.seek(offset)
|
|
|
|
self.reader.readline()
|
2022-01-19 11:44:42 +00:00
|
|
|
for _ in range(discard_lines):
|
|
|
|
self.reader.readline()
|
2018-04-25 17:16:52 +00:00
|
|
|
self.line = self.reader.readline()
|
|
|
|
|
2022-01-19 11:44:42 +00:00
|
|
|
def parse_desc_syscall(self):
|
2021-03-02 17:19:42 +00:00
|
|
|
self.seek_to('* DOC: eBPF Syscall Commands',
|
|
|
|
'Could not find start of eBPF syscall descriptions list')
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
command = self.parse_element()
|
|
|
|
self.commands.append(command)
|
2022-01-19 11:44:42 +00:00
|
|
|
self.desc_syscalls.append(command.proto)
|
|
|
|
|
2021-03-02 17:19:42 +00:00
|
|
|
except NoSyscallCommandFound:
|
|
|
|
break
|
|
|
|
|
2022-01-19 11:44:42 +00:00
|
|
|
def parse_enum_syscall(self):
|
|
|
|
self.seek_to('enum bpf_cmd {',
|
|
|
|
'Could not find start of bpf_cmd enum', 0)
|
|
|
|
# Searches for either one or more BPF\w+ enums
|
2023-08-29 07:49:31 +00:00
|
|
|
bpf_p = re.compile(r'\s*(BPF\w+)+')
|
2022-01-19 11:44:42 +00:00
|
|
|
# Searches for an enum entry assigned to another entry,
|
|
|
|
# for e.g. BPF_PROG_RUN = BPF_PROG_TEST_RUN, which is
|
|
|
|
# not documented hence should be skipped in check to
|
|
|
|
# determine if the right number of syscalls are documented
|
2023-08-29 07:49:31 +00:00
|
|
|
assign_p = re.compile(r'\s*(BPF\w+)\s*=\s*(BPF\w+)')
|
2022-01-19 11:44:42 +00:00
|
|
|
bpf_cmd_str = ''
|
|
|
|
while True:
|
|
|
|
capture = assign_p.match(self.line)
|
|
|
|
if capture:
|
|
|
|
# Skip line if an enum entry is assigned to another entry
|
|
|
|
self.line = self.reader.readline()
|
|
|
|
continue
|
|
|
|
capture = bpf_p.match(self.line)
|
|
|
|
if capture:
|
|
|
|
bpf_cmd_str += self.line
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
self.line = self.reader.readline()
|
|
|
|
# Find the number of occurences of BPF\w+
|
2023-08-29 07:49:31 +00:00
|
|
|
self.enum_syscalls = re.findall(r'(BPF\w+)+', bpf_cmd_str)
|
2022-01-19 11:44:42 +00:00
|
|
|
|
2022-01-12 11:49:53 +00:00
|
|
|
def parse_desc_helpers(self):
|
2022-08-23 15:53:27 +00:00
|
|
|
self.seek_to(helpersDocStart,
|
2021-03-02 17:19:42 +00:00
|
|
|
'Could not find start of eBPF helper descriptions list')
|
2018-04-25 17:16:52 +00:00
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
helper = self.parse_helper()
|
|
|
|
self.helpers.append(helper)
|
2022-01-12 11:49:53 +00:00
|
|
|
proto = helper.proto_break_down()
|
|
|
|
self.desc_unique_helpers.add(proto['name'])
|
2018-04-25 17:16:52 +00:00
|
|
|
except NoHelperFound:
|
|
|
|
break
|
|
|
|
|
2022-01-12 11:49:53 +00:00
|
|
|
def parse_define_helpers(self):
|
bpf: explicitly define BPF_FUNC_xxx integer values
Historically enum bpf_func_id's BPF_FUNC_xxx enumerators relied on
implicit sequential values being assigned by compiler. This is
convenient, as new BPF helpers are always added at the very end, but it
also has its downsides, some of them being:
- with over 200 helpers now it's very hard to know what's each helper's ID,
which is often important to know when working with BPF assembly (e.g.,
by dumping raw bpf assembly instructions with llvm-objdump -d
command). it's possible to work around this by looking into vmlinux.h,
dumping /sys/btf/kernel/vmlinux, looking at libbpf-provided
bpf_helper_defs.h, etc. But it always feels like an unnecessary step
and one should be able to quickly figure this out from UAPI header.
- when backporting and cherry-picking only some BPF helpers onto older
kernels it's important to be able to skip some enum values for helpers
that weren't backported, but preserve absolute integer IDs to keep BPF
helper IDs stable so that BPF programs stay portable across upstream
and backported kernels.
While neither problem is insurmountable, they come up frequently enough
and are annoying enough to warrant improving the situation. And for the
backporting the problem can easily go unnoticed for a while, especially
if backport is done with people not very familiar with BPF subsystem overall.
Anyways, it's easy to fix this by making sure that __BPF_FUNC_MAPPER
macro provides explicit helper IDs. Unfortunately that would potentially
break existing users that use UAPI-exposed __BPF_FUNC_MAPPER and are
expected to pass macro that accepts only symbolic helper identifier
(e.g., map_lookup_elem for bpf_map_lookup_elem() helper).
As such, we need to introduce a new macro (___BPF_FUNC_MAPPER) which
would specify both identifier and integer ID, but in such a way as to
allow existing __BPF_FUNC_MAPPER be expressed in terms of new
___BPF_FUNC_MAPPER macro. And that's what this patch is doing. To avoid
duplication and allow __BPF_FUNC_MAPPER stay *exactly* the same,
___BPF_FUNC_MAPPER accepts arbitrary "context" arguments, which can be
used to pass any extra macros, arguments, and whatnot. In our case we
use this to pass original user-provided macro that expects single
argument and __BPF_FUNC_MAPPER is using it's own three-argument
__BPF_FUNC_MAPPER_APPLY intermediate macro to impedance-match new and
old "callback" macros.
Once we resolve this, we use new ___BPF_FUNC_MAPPER to define enum
bpf_func_id with explicit values. The other users of __BPF_FUNC_MAPPER
in kernel (namely in kernel/bpf/disasm.c) are kept exactly the same both
as demonstration that backwards compat works, but also to avoid
unnecessary code churn.
Note that new ___BPF_FUNC_MAPPER() doesn't forcefully insert comma
between values, as that might not be appropriate in all possible cases
where ___BPF_FUNC_MAPPER might be used by users. This doesn't reduce
usability, as it's trivial to insert that comma inside "callback" macro.
To validate all the manually specified IDs are exactly right, we used
BTF to compare before and after values:
$ bpftool btf dump file ~/linux-build/default/vmlinux | rg bpf_func_id -A 211 > after.txt
$ git stash # stach UAPI changes
$ make -j90
... re-building kernel without UAPI changes ...
$ bpftool btf dump file ~/linux-build/default/vmlinux | rg bpf_func_id -A 211 > before.txt
$ diff -u before.txt after.txt
--- before.txt 2022-10-05 10:48:18.119195916 -0700
+++ after.txt 2022-10-05 10:46:49.446615025 -0700
@@ -1,4 +1,4 @@
-[14576] ENUM 'bpf_func_id' encoding=UNSIGNED size=4 vlen=211
+[9560] ENUM 'bpf_func_id' encoding=UNSIGNED size=4 vlen=211
'BPF_FUNC_unspec' val=0
'BPF_FUNC_map_lookup_elem' val=1
'BPF_FUNC_map_update_elem' val=2
As can be seen from diff above, the only thing that changed was resulting BTF
type ID of ENUM bpf_func_id, not any of the enumerators, their names or integer
values.
The only other place that needed fixing was scripts/bpf_doc.py used to generate
man pages and bpf_helper_defs.h header for libbpf and selftests. That script is
tightly-coupled to exact shape of ___BPF_FUNC_MAPPER macro definition, so had
to be trivially adapted.
Cc: Quentin Monnet <quentin@isovalent.com>
Reported-by: Andrea Terzolo <andrea.terzolo@polito.it>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20221006042452.2089843-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-10-06 04:24:51 +00:00
|
|
|
# Parse FN(...) in #define ___BPF_FUNC_MAPPER to compare later with the
|
2022-08-24 18:10:43 +00:00
|
|
|
# number of unique function names present in description and use the
|
|
|
|
# correct enumeration value.
|
2022-01-12 11:49:53 +00:00
|
|
|
# Note: seek_to(..) discards the first line below the target search text,
|
bpf: explicitly define BPF_FUNC_xxx integer values
Historically enum bpf_func_id's BPF_FUNC_xxx enumerators relied on
implicit sequential values being assigned by compiler. This is
convenient, as new BPF helpers are always added at the very end, but it
also has its downsides, some of them being:
- with over 200 helpers now it's very hard to know what's each helper's ID,
which is often important to know when working with BPF assembly (e.g.,
by dumping raw bpf assembly instructions with llvm-objdump -d
command). it's possible to work around this by looking into vmlinux.h,
dumping /sys/btf/kernel/vmlinux, looking at libbpf-provided
bpf_helper_defs.h, etc. But it always feels like an unnecessary step
and one should be able to quickly figure this out from UAPI header.
- when backporting and cherry-picking only some BPF helpers onto older
kernels it's important to be able to skip some enum values for helpers
that weren't backported, but preserve absolute integer IDs to keep BPF
helper IDs stable so that BPF programs stay portable across upstream
and backported kernels.
While neither problem is insurmountable, they come up frequently enough
and are annoying enough to warrant improving the situation. And for the
backporting the problem can easily go unnoticed for a while, especially
if backport is done with people not very familiar with BPF subsystem overall.
Anyways, it's easy to fix this by making sure that __BPF_FUNC_MAPPER
macro provides explicit helper IDs. Unfortunately that would potentially
break existing users that use UAPI-exposed __BPF_FUNC_MAPPER and are
expected to pass macro that accepts only symbolic helper identifier
(e.g., map_lookup_elem for bpf_map_lookup_elem() helper).
As such, we need to introduce a new macro (___BPF_FUNC_MAPPER) which
would specify both identifier and integer ID, but in such a way as to
allow existing __BPF_FUNC_MAPPER be expressed in terms of new
___BPF_FUNC_MAPPER macro. And that's what this patch is doing. To avoid
duplication and allow __BPF_FUNC_MAPPER stay *exactly* the same,
___BPF_FUNC_MAPPER accepts arbitrary "context" arguments, which can be
used to pass any extra macros, arguments, and whatnot. In our case we
use this to pass original user-provided macro that expects single
argument and __BPF_FUNC_MAPPER is using it's own three-argument
__BPF_FUNC_MAPPER_APPLY intermediate macro to impedance-match new and
old "callback" macros.
Once we resolve this, we use new ___BPF_FUNC_MAPPER to define enum
bpf_func_id with explicit values. The other users of __BPF_FUNC_MAPPER
in kernel (namely in kernel/bpf/disasm.c) are kept exactly the same both
as demonstration that backwards compat works, but also to avoid
unnecessary code churn.
Note that new ___BPF_FUNC_MAPPER() doesn't forcefully insert comma
between values, as that might not be appropriate in all possible cases
where ___BPF_FUNC_MAPPER might be used by users. This doesn't reduce
usability, as it's trivial to insert that comma inside "callback" macro.
To validate all the manually specified IDs are exactly right, we used
BTF to compare before and after values:
$ bpftool btf dump file ~/linux-build/default/vmlinux | rg bpf_func_id -A 211 > after.txt
$ git stash # stach UAPI changes
$ make -j90
... re-building kernel without UAPI changes ...
$ bpftool btf dump file ~/linux-build/default/vmlinux | rg bpf_func_id -A 211 > before.txt
$ diff -u before.txt after.txt
--- before.txt 2022-10-05 10:48:18.119195916 -0700
+++ after.txt 2022-10-05 10:46:49.446615025 -0700
@@ -1,4 +1,4 @@
-[14576] ENUM 'bpf_func_id' encoding=UNSIGNED size=4 vlen=211
+[9560] ENUM 'bpf_func_id' encoding=UNSIGNED size=4 vlen=211
'BPF_FUNC_unspec' val=0
'BPF_FUNC_map_lookup_elem' val=1
'BPF_FUNC_map_update_elem' val=2
As can be seen from diff above, the only thing that changed was resulting BTF
type ID of ENUM bpf_func_id, not any of the enumerators, their names or integer
values.
The only other place that needed fixing was scripts/bpf_doc.py used to generate
man pages and bpf_helper_defs.h header for libbpf and selftests. That script is
tightly-coupled to exact shape of ___BPF_FUNC_MAPPER macro definition, so had
to be trivially adapted.
Cc: Quentin Monnet <quentin@isovalent.com>
Reported-by: Andrea Terzolo <andrea.terzolo@polito.it>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20221006042452.2089843-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-10-06 04:24:51 +00:00
|
|
|
# resulting in FN(unspec, 0, ##ctx) being skipped and not added to
|
|
|
|
# self.define_unique_helpers.
|
|
|
|
self.seek_to('#define ___BPF_FUNC_MAPPER(FN, ctx...)',
|
2022-01-12 11:49:53 +00:00
|
|
|
'Could not find start of eBPF helper definition list')
|
2022-08-24 18:10:43 +00:00
|
|
|
# Searches for one FN(\w+) define or a backslash for newline
|
2023-08-29 07:49:31 +00:00
|
|
|
p = re.compile(r'\s*FN\((\w+), (\d+), ##ctx\)|\\\\')
|
2022-01-12 11:49:53 +00:00
|
|
|
fn_defines_str = ''
|
2022-10-06 04:24:52 +00:00
|
|
|
i = 0
|
2022-01-12 11:49:53 +00:00
|
|
|
while True:
|
|
|
|
capture = p.match(self.line)
|
|
|
|
if capture:
|
|
|
|
fn_defines_str += self.line
|
2022-10-06 04:24:52 +00:00
|
|
|
helper_name = capture.expand(r'bpf_\1')
|
2023-01-09 11:34:42 +00:00
|
|
|
self.helper_enum_vals[helper_name] = int(capture.group(2))
|
2022-10-06 04:24:52 +00:00
|
|
|
self.helper_enum_pos[helper_name] = i
|
|
|
|
i += 1
|
2022-01-12 11:49:53 +00:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
self.line = self.reader.readline()
|
|
|
|
# Find the number of occurences of FN(\w+)
|
2023-08-29 07:49:31 +00:00
|
|
|
self.define_unique_helpers = re.findall(r'FN\(\w+, \d+, ##ctx\)', fn_defines_str)
|
2022-01-12 11:49:53 +00:00
|
|
|
|
2022-10-06 04:24:52 +00:00
|
|
|
def validate_helpers(self):
|
|
|
|
last_helper = ''
|
2022-08-24 18:10:43 +00:00
|
|
|
seen_helpers = set()
|
2022-10-06 04:24:52 +00:00
|
|
|
seen_enum_vals = set()
|
|
|
|
i = 0
|
2022-08-24 18:10:43 +00:00
|
|
|
for helper in self.helpers:
|
|
|
|
proto = helper.proto_break_down()
|
|
|
|
name = proto['name']
|
|
|
|
try:
|
|
|
|
enum_val = self.helper_enum_vals[name]
|
2022-10-06 04:24:52 +00:00
|
|
|
enum_pos = self.helper_enum_pos[name]
|
2022-08-24 18:10:43 +00:00
|
|
|
except KeyError:
|
|
|
|
raise Exception("Helper %s is missing from enum bpf_func_id" % name)
|
|
|
|
|
2022-10-06 04:24:52 +00:00
|
|
|
if name in seen_helpers:
|
|
|
|
if last_helper != name:
|
|
|
|
raise Exception("Helper %s has multiple descriptions which are not grouped together" % name)
|
|
|
|
continue
|
|
|
|
|
2022-08-24 18:10:43 +00:00
|
|
|
# Enforce current practice of having the descriptions ordered
|
|
|
|
# by enum value.
|
2022-10-06 04:24:52 +00:00
|
|
|
if enum_pos != i:
|
|
|
|
raise Exception("Helper %s (ID %d) comment order (#%d) must be aligned with its position (#%d) in enum bpf_func_id" % (name, enum_val, i + 1, enum_pos + 1))
|
|
|
|
if enum_val in seen_enum_vals:
|
|
|
|
raise Exception("Helper %s has duplicated value %d" % (name, enum_val))
|
|
|
|
|
2022-08-24 18:10:43 +00:00
|
|
|
seen_helpers.add(name)
|
2022-10-06 04:24:52 +00:00
|
|
|
last_helper = name
|
|
|
|
seen_enum_vals.add(enum_val)
|
2022-08-24 18:10:43 +00:00
|
|
|
|
|
|
|
helper.enum_val = enum_val
|
2022-10-06 04:24:52 +00:00
|
|
|
i += 1
|
2022-08-24 18:10:43 +00:00
|
|
|
|
2021-03-02 17:19:42 +00:00
|
|
|
def run(self):
|
2022-01-19 11:44:42 +00:00
|
|
|
self.parse_desc_syscall()
|
|
|
|
self.parse_enum_syscall()
|
2022-01-12 11:49:53 +00:00
|
|
|
self.parse_desc_helpers()
|
|
|
|
self.parse_define_helpers()
|
2022-10-06 04:24:52 +00:00
|
|
|
self.validate_helpers()
|
2018-04-25 17:16:52 +00:00
|
|
|
self.reader.close()
|
|
|
|
|
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
class Printer(object):
|
|
|
|
"""
|
|
|
|
A generic class for printers. Printers should be created with an array of
|
|
|
|
Helper objects, and implement a way to print them in the desired fashion.
|
2021-03-02 17:19:41 +00:00
|
|
|
@parser: A HeaderParser with objects to print to standard output
|
2018-04-25 17:16:52 +00:00
|
|
|
"""
|
2021-03-02 17:19:41 +00:00
|
|
|
def __init__(self, parser):
|
|
|
|
self.parser = parser
|
|
|
|
self.elements = []
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
def print_header(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def print_footer(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def print_one(self, helper):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def print_all(self):
|
|
|
|
self.print_header()
|
2021-03-02 17:19:41 +00:00
|
|
|
for elem in self.elements:
|
|
|
|
self.print_one(elem)
|
2018-04-25 17:16:52 +00:00
|
|
|
self.print_footer()
|
|
|
|
|
2022-01-19 11:44:42 +00:00
|
|
|
def elem_number_check(self, desc_unique_elem, define_unique_elem, type, instance):
|
|
|
|
"""
|
|
|
|
Checks the number of helpers/syscalls documented within the header file
|
|
|
|
description with those defined as part of enum/macro and raise an
|
|
|
|
Exception if they don't match.
|
|
|
|
"""
|
|
|
|
nr_desc_unique_elem = len(desc_unique_elem)
|
|
|
|
nr_define_unique_elem = len(define_unique_elem)
|
|
|
|
if nr_desc_unique_elem != nr_define_unique_elem:
|
|
|
|
exception_msg = '''
|
|
|
|
The number of unique %s in description (%d) doesn\'t match the number of unique %s defined in %s (%d)
|
|
|
|
''' % (type, nr_desc_unique_elem, type, instance, nr_define_unique_elem)
|
|
|
|
if nr_desc_unique_elem < nr_define_unique_elem:
|
|
|
|
# Function description is parsed until no helper is found (which can be due to
|
|
|
|
# misformatting). Hence, only print the first missing/misformatted helper/enum.
|
|
|
|
exception_msg += '''
|
|
|
|
The description for %s is not present or formatted correctly.
|
|
|
|
''' % (define_unique_elem[nr_desc_unique_elem])
|
|
|
|
raise Exception(exception_msg)
|
2021-03-02 17:19:41 +00:00
|
|
|
|
2018-04-25 17:16:52 +00:00
|
|
|
class PrinterRST(Printer):
|
|
|
|
"""
|
2021-03-02 17:19:41 +00:00
|
|
|
A generic class for printers that print ReStructured Text. Printers should
|
|
|
|
be created with a HeaderParser object, and implement a way to print API
|
|
|
|
elements in the desired fashion.
|
|
|
|
@parser: A HeaderParser with objects to print to standard output
|
2018-04-25 17:16:52 +00:00
|
|
|
"""
|
2021-03-02 17:19:41 +00:00
|
|
|
def __init__(self, parser):
|
|
|
|
self.parser = parser
|
|
|
|
|
|
|
|
def print_license(self):
|
|
|
|
license = '''\
|
2018-04-25 17:16:52 +00:00
|
|
|
.. Copyright (C) All BPF authors and contributors from 2014 to present.
|
|
|
|
.. See git log include/uapi/linux/bpf.h in kernel tree for details.
|
|
|
|
..
|
2023-04-11 14:47:47 +00:00
|
|
|
.. SPDX-License-Identifier: Linux-man-pages-copyleft
|
2018-04-25 17:16:52 +00:00
|
|
|
..
|
|
|
|
.. Please do not edit this file. It was generated from the documentation
|
|
|
|
.. located in file include/uapi/linux/bpf.h of the Linux kernel sources
|
2021-03-02 17:19:41 +00:00
|
|
|
.. (helpers description), and from scripts/bpf_doc.py in the same
|
2018-04-25 17:16:52 +00:00
|
|
|
.. repository (header and footer).
|
2021-03-02 17:19:41 +00:00
|
|
|
'''
|
|
|
|
print(license)
|
|
|
|
|
|
|
|
def print_elem(self, elem):
|
|
|
|
if (elem.desc):
|
|
|
|
print('\tDescription')
|
|
|
|
# Do not strip all newline characters: formatted code at the end of
|
|
|
|
# a section must be followed by a blank line.
|
|
|
|
for line in re.sub('\n$', '', elem.desc, count=1).split('\n'):
|
|
|
|
print('{}{}'.format('\t\t' if line else '', line))
|
|
|
|
|
|
|
|
if (elem.ret):
|
|
|
|
print('\tReturn')
|
|
|
|
for line in elem.ret.rstrip().split('\n'):
|
|
|
|
print('{}{}'.format('\t\t' if line else '', line))
|
|
|
|
|
|
|
|
print('')
|
2018-04-25 17:16:52 +00:00
|
|
|
|
2022-08-23 15:53:26 +00:00
|
|
|
def get_kernel_version(self):
|
|
|
|
try:
|
|
|
|
version = subprocess.run(['git', 'describe'], cwd=linuxRoot,
|
|
|
|
capture_output=True, check=True)
|
|
|
|
version = version.stdout.decode().rstrip()
|
|
|
|
except:
|
|
|
|
try:
|
|
|
|
version = subprocess.run(['make', 'kernelversion'], cwd=linuxRoot,
|
|
|
|
capture_output=True, check=True)
|
|
|
|
version = version.stdout.decode().rstrip()
|
|
|
|
except:
|
|
|
|
return 'Linux'
|
|
|
|
return 'Linux {version}'.format(version=version)
|
|
|
|
|
2022-08-23 15:53:27 +00:00
|
|
|
def get_last_doc_update(self, delimiter):
|
|
|
|
try:
|
|
|
|
cmd = ['git', 'log', '-1', '--pretty=format:%cs', '--no-patch',
|
|
|
|
'-L',
|
2023-08-29 07:49:31 +00:00
|
|
|
'/{}/,/\\*\\//:include/uapi/linux/bpf.h'.format(delimiter)]
|
2022-08-23 15:53:27 +00:00
|
|
|
date = subprocess.run(cmd, cwd=linuxRoot,
|
|
|
|
capture_output=True, check=True)
|
|
|
|
return date.stdout.decode().rstrip()
|
|
|
|
except:
|
|
|
|
return ''
|
|
|
|
|
2021-03-02 17:19:41 +00:00
|
|
|
class PrinterHelpersRST(PrinterRST):
|
|
|
|
"""
|
|
|
|
A printer for dumping collected information about helpers as a ReStructured
|
|
|
|
Text page compatible with the rst2man program, which can be used to
|
|
|
|
generate a manual page for the helpers.
|
|
|
|
@parser: A HeaderParser with Helper objects to print to standard output
|
|
|
|
"""
|
|
|
|
def __init__(self, parser):
|
|
|
|
self.elements = parser.helpers
|
bpf: explicitly define BPF_FUNC_xxx integer values
Historically enum bpf_func_id's BPF_FUNC_xxx enumerators relied on
implicit sequential values being assigned by compiler. This is
convenient, as new BPF helpers are always added at the very end, but it
also has its downsides, some of them being:
- with over 200 helpers now it's very hard to know what's each helper's ID,
which is often important to know when working with BPF assembly (e.g.,
by dumping raw bpf assembly instructions with llvm-objdump -d
command). it's possible to work around this by looking into vmlinux.h,
dumping /sys/btf/kernel/vmlinux, looking at libbpf-provided
bpf_helper_defs.h, etc. But it always feels like an unnecessary step
and one should be able to quickly figure this out from UAPI header.
- when backporting and cherry-picking only some BPF helpers onto older
kernels it's important to be able to skip some enum values for helpers
that weren't backported, but preserve absolute integer IDs to keep BPF
helper IDs stable so that BPF programs stay portable across upstream
and backported kernels.
While neither problem is insurmountable, they come up frequently enough
and are annoying enough to warrant improving the situation. And for the
backporting the problem can easily go unnoticed for a while, especially
if backport is done with people not very familiar with BPF subsystem overall.
Anyways, it's easy to fix this by making sure that __BPF_FUNC_MAPPER
macro provides explicit helper IDs. Unfortunately that would potentially
break existing users that use UAPI-exposed __BPF_FUNC_MAPPER and are
expected to pass macro that accepts only symbolic helper identifier
(e.g., map_lookup_elem for bpf_map_lookup_elem() helper).
As such, we need to introduce a new macro (___BPF_FUNC_MAPPER) which
would specify both identifier and integer ID, but in such a way as to
allow existing __BPF_FUNC_MAPPER be expressed in terms of new
___BPF_FUNC_MAPPER macro. And that's what this patch is doing. To avoid
duplication and allow __BPF_FUNC_MAPPER stay *exactly* the same,
___BPF_FUNC_MAPPER accepts arbitrary "context" arguments, which can be
used to pass any extra macros, arguments, and whatnot. In our case we
use this to pass original user-provided macro that expects single
argument and __BPF_FUNC_MAPPER is using it's own three-argument
__BPF_FUNC_MAPPER_APPLY intermediate macro to impedance-match new and
old "callback" macros.
Once we resolve this, we use new ___BPF_FUNC_MAPPER to define enum
bpf_func_id with explicit values. The other users of __BPF_FUNC_MAPPER
in kernel (namely in kernel/bpf/disasm.c) are kept exactly the same both
as demonstration that backwards compat works, but also to avoid
unnecessary code churn.
Note that new ___BPF_FUNC_MAPPER() doesn't forcefully insert comma
between values, as that might not be appropriate in all possible cases
where ___BPF_FUNC_MAPPER might be used by users. This doesn't reduce
usability, as it's trivial to insert that comma inside "callback" macro.
To validate all the manually specified IDs are exactly right, we used
BTF to compare before and after values:
$ bpftool btf dump file ~/linux-build/default/vmlinux | rg bpf_func_id -A 211 > after.txt
$ git stash # stach UAPI changes
$ make -j90
... re-building kernel without UAPI changes ...
$ bpftool btf dump file ~/linux-build/default/vmlinux | rg bpf_func_id -A 211 > before.txt
$ diff -u before.txt after.txt
--- before.txt 2022-10-05 10:48:18.119195916 -0700
+++ after.txt 2022-10-05 10:46:49.446615025 -0700
@@ -1,4 +1,4 @@
-[14576] ENUM 'bpf_func_id' encoding=UNSIGNED size=4 vlen=211
+[9560] ENUM 'bpf_func_id' encoding=UNSIGNED size=4 vlen=211
'BPF_FUNC_unspec' val=0
'BPF_FUNC_map_lookup_elem' val=1
'BPF_FUNC_map_update_elem' val=2
As can be seen from diff above, the only thing that changed was resulting BTF
type ID of ENUM bpf_func_id, not any of the enumerators, their names or integer
values.
The only other place that needed fixing was scripts/bpf_doc.py used to generate
man pages and bpf_helper_defs.h header for libbpf and selftests. That script is
tightly-coupled to exact shape of ___BPF_FUNC_MAPPER macro definition, so had
to be trivially adapted.
Cc: Quentin Monnet <quentin@isovalent.com>
Reported-by: Andrea Terzolo <andrea.terzolo@polito.it>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20221006042452.2089843-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-10-06 04:24:51 +00:00
|
|
|
self.elem_number_check(parser.desc_unique_helpers, parser.define_unique_helpers, 'helper', '___BPF_FUNC_MAPPER')
|
2021-03-02 17:19:41 +00:00
|
|
|
|
|
|
|
def print_header(self):
|
|
|
|
header = '''\
|
2018-04-25 17:16:52 +00:00
|
|
|
===========
|
|
|
|
BPF-HELPERS
|
|
|
|
===========
|
|
|
|
-------------------------------------------------------------------------------
|
|
|
|
list of eBPF helper functions
|
|
|
|
-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
:Manual section: 7
|
2022-08-23 15:53:26 +00:00
|
|
|
:Version: {version}
|
2022-08-23 15:53:27 +00:00
|
|
|
{date_field}{date}
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
===========
|
|
|
|
|
|
|
|
The extended Berkeley Packet Filter (eBPF) subsystem consists in programs
|
|
|
|
written in a pseudo-assembly language, then attached to one of the several
|
|
|
|
kernel hooks and run in reaction of specific events. This framework differs
|
|
|
|
from the older, "classic" BPF (or "cBPF") in several aspects, one of them being
|
|
|
|
the ability to call special functions (or "helpers") from within a program.
|
|
|
|
These functions are restricted to a white-list of helpers defined in the
|
|
|
|
kernel.
|
|
|
|
|
|
|
|
These helpers are used by eBPF programs to interact with the system, or with
|
|
|
|
the context in which they work. For instance, they can be used to print
|
|
|
|
debugging messages, to get the time since the system was booted, to interact
|
|
|
|
with eBPF maps, or to manipulate network packets. Since there are several eBPF
|
|
|
|
program types, and that they do not run in the same context, each program type
|
|
|
|
can only call a subset of those helpers.
|
|
|
|
|
|
|
|
Due to eBPF conventions, a helper can not have more than five arguments.
|
|
|
|
|
|
|
|
Internally, eBPF programs call directly into the compiled helper functions
|
|
|
|
without requiring any foreign-function interface. As a result, calling helpers
|
|
|
|
introduces no overhead, thus offering excellent performance.
|
|
|
|
|
|
|
|
This document is an attempt to list and document the helpers available to eBPF
|
|
|
|
developers. They are sorted by chronological order (the oldest helpers in the
|
|
|
|
kernel at the top).
|
|
|
|
|
|
|
|
HELPERS
|
|
|
|
=======
|
|
|
|
'''
|
2022-08-23 15:53:26 +00:00
|
|
|
kernelVersion = self.get_kernel_version()
|
2022-08-23 15:53:27 +00:00
|
|
|
lastUpdate = self.get_last_doc_update(helpersDocStart)
|
2022-08-23 15:53:26 +00:00
|
|
|
|
2021-03-02 17:19:41 +00:00
|
|
|
PrinterRST.print_license(self)
|
2022-08-23 15:53:27 +00:00
|
|
|
print(header.format(version=kernelVersion,
|
|
|
|
date_field = ':Date: ' if lastUpdate else '',
|
|
|
|
date=lastUpdate))
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
def print_footer(self):
|
|
|
|
footer = '''
|
|
|
|
EXAMPLES
|
|
|
|
========
|
|
|
|
|
|
|
|
Example usage for most of the eBPF helpers listed in this manual page are
|
|
|
|
available within the Linux kernel sources, at the following locations:
|
|
|
|
|
|
|
|
* *samples/bpf/*
|
|
|
|
* *tools/testing/selftests/bpf/*
|
|
|
|
|
|
|
|
LICENSE
|
|
|
|
=======
|
|
|
|
|
|
|
|
eBPF programs can have an associated license, passed along with the bytecode
|
|
|
|
instructions to the kernel when the programs are loaded. The format for that
|
|
|
|
string is identical to the one in use for kernel modules (Dual licenses, such
|
|
|
|
as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
|
2024-02-13 23:05:46 +00:00
|
|
|
programs that are compatible with the GNU General Public License (GNU GPL).
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
In order to use such helpers, the eBPF program must be loaded with the correct
|
2023-08-29 07:49:31 +00:00
|
|
|
license string passed (via **attr**) to the **bpf**\\ () system call, and this
|
2018-04-25 17:16:52 +00:00
|
|
|
generally translates into the C source code of the program containing a line
|
|
|
|
similar to the following:
|
|
|
|
|
|
|
|
::
|
|
|
|
|
|
|
|
char ____license[] __attribute__((section("license"), used)) = "GPL";
|
|
|
|
|
|
|
|
IMPLEMENTATION
|
|
|
|
==============
|
|
|
|
|
|
|
|
This manual page is an effort to document the existing eBPF helper functions.
|
|
|
|
But as of this writing, the BPF sub-system is under heavy development. New eBPF
|
|
|
|
program or map types are added, along with new helper functions. Some helpers
|
|
|
|
are occasionally made available for additional program types. So in spite of
|
|
|
|
the efforts of the community, this page might not be up-to-date. If you want to
|
|
|
|
check by yourself what helper functions exist in your kernel, or what types of
|
|
|
|
programs they can support, here are some files among the kernel tree that you
|
|
|
|
may be interested in:
|
|
|
|
|
|
|
|
* *include/uapi/linux/bpf.h* is the main BPF header. It contains the full list
|
|
|
|
of all helper functions, as well as many other BPF definitions including most
|
|
|
|
of the flags, structs or constants used by the helpers.
|
|
|
|
* *net/core/filter.c* contains the definition of most network-related helper
|
|
|
|
functions, and the list of program types from which they can be used.
|
|
|
|
* *kernel/trace/bpf_trace.c* is the equivalent for most tracing program-related
|
|
|
|
helpers.
|
|
|
|
* *kernel/bpf/verifier.c* contains the functions used to check that valid types
|
|
|
|
of eBPF maps are used with a given helper function.
|
|
|
|
* *kernel/bpf/* directory contains other files in which additional helpers are
|
|
|
|
defined (for cgroups, sockmaps, etc.).
|
2020-05-11 16:15:35 +00:00
|
|
|
* The bpftool utility can be used to probe the availability of helper functions
|
|
|
|
on the system (as well as supported program and map types, and a number of
|
|
|
|
other parameters). To do so, run **bpftool feature probe** (see
|
2023-08-29 07:49:31 +00:00
|
|
|
**bpftool-feature**\\ (8) for details). Add the **unprivileged** keyword to
|
2020-05-11 16:15:35 +00:00
|
|
|
list features available to unprivileged users.
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
Compatibility between helper functions and program types can generally be found
|
|
|
|
in the files where helper functions are defined. Look for the **struct
|
|
|
|
bpf_func_proto** objects and for functions returning them: these functions
|
|
|
|
contain a list of helpers that a given program type can call. Note that the
|
|
|
|
**default:** label of the **switch ... case** used to filter helpers can call
|
|
|
|
other functions, themselves allowing access to additional helpers. The
|
|
|
|
requirement for GPL license is also in those **struct bpf_func_proto**.
|
|
|
|
|
|
|
|
Compatibility between helper functions and map types can be found in the
|
2023-08-29 07:49:31 +00:00
|
|
|
**check_map_func_compatibility**\\ () function in file *kernel/bpf/verifier.c*.
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
Helper functions that invalidate the checks on **data** and **data_end**
|
|
|
|
pointers for network processing are listed in function
|
2023-08-29 07:49:31 +00:00
|
|
|
**bpf_helper_changes_pkt_data**\\ () in file *net/core/filter.c*.
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
SEE ALSO
|
|
|
|
========
|
|
|
|
|
2023-08-29 07:49:31 +00:00
|
|
|
**bpf**\\ (2),
|
|
|
|
**bpftool**\\ (8),
|
|
|
|
**cgroups**\\ (7),
|
|
|
|
**ip**\\ (8),
|
|
|
|
**perf_event_open**\\ (2),
|
|
|
|
**sendmsg**\\ (2),
|
|
|
|
**socket**\\ (7),
|
|
|
|
**tc-bpf**\\ (8)'''
|
2018-04-25 17:16:52 +00:00
|
|
|
print(footer)
|
|
|
|
|
|
|
|
def print_proto(self, helper):
|
|
|
|
"""
|
|
|
|
Format function protocol with bold and italics markers. This makes RST
|
|
|
|
file less readable, but gives nice results in the manual page.
|
|
|
|
"""
|
|
|
|
proto = helper.proto_break_down()
|
|
|
|
|
|
|
|
print('**%s %s%s(' % (proto['ret_type'],
|
|
|
|
proto['ret_star'].replace('*', '\\*'),
|
|
|
|
proto['name']),
|
|
|
|
end='')
|
|
|
|
|
|
|
|
comma = ''
|
|
|
|
for a in proto['args']:
|
|
|
|
one_arg = '{}{}'.format(comma, a['type'])
|
|
|
|
if a['name']:
|
|
|
|
if a['star']:
|
2023-08-29 07:49:31 +00:00
|
|
|
one_arg += ' {}**\\ '.format(a['star'].replace('*', '\\*'))
|
2018-04-25 17:16:52 +00:00
|
|
|
else:
|
|
|
|
one_arg += '** '
|
|
|
|
one_arg += '*{}*\\ **'.format(a['name'])
|
|
|
|
comma = ', '
|
|
|
|
print(one_arg, end='')
|
|
|
|
|
|
|
|
print(')**')
|
|
|
|
|
|
|
|
def print_one(self, helper):
|
|
|
|
self.print_proto(helper)
|
2021-03-02 17:19:41 +00:00
|
|
|
self.print_elem(helper)
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
|
2021-03-02 17:19:42 +00:00
|
|
|
class PrinterSyscallRST(PrinterRST):
|
|
|
|
"""
|
|
|
|
A printer for dumping collected information about the syscall API as a
|
|
|
|
ReStructured Text page compatible with the rst2man program, which can be
|
|
|
|
used to generate a manual page for the syscall.
|
|
|
|
@parser: A HeaderParser with APIElement objects to print to standard
|
|
|
|
output
|
|
|
|
"""
|
|
|
|
def __init__(self, parser):
|
|
|
|
self.elements = parser.commands
|
2022-01-19 11:44:42 +00:00
|
|
|
self.elem_number_check(parser.desc_syscalls, parser.enum_syscalls, 'syscall', 'bpf_cmd')
|
2021-03-02 17:19:42 +00:00
|
|
|
|
|
|
|
def print_header(self):
|
|
|
|
header = '''\
|
|
|
|
===
|
|
|
|
bpf
|
|
|
|
===
|
|
|
|
-------------------------------------------------------------------------------
|
|
|
|
Perform a command on an extended BPF object
|
|
|
|
-------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
:Manual section: 2
|
|
|
|
|
|
|
|
COMMANDS
|
|
|
|
========
|
|
|
|
'''
|
|
|
|
PrinterRST.print_license(self)
|
|
|
|
print(header)
|
|
|
|
|
|
|
|
def print_one(self, command):
|
|
|
|
print('**%s**' % (command.proto))
|
|
|
|
self.print_elem(command)
|
2018-04-25 17:16:52 +00:00
|
|
|
|
|
|
|
|
2019-10-07 03:07:37 +00:00
|
|
|
class PrinterHelpers(Printer):
|
|
|
|
"""
|
|
|
|
A printer for dumping collected information about helpers as C header to
|
|
|
|
be included from BPF program.
|
2021-03-02 17:19:41 +00:00
|
|
|
@parser: A HeaderParser with Helper objects to print to standard output
|
2019-10-07 03:07:37 +00:00
|
|
|
"""
|
2021-03-02 17:19:41 +00:00
|
|
|
def __init__(self, parser):
|
|
|
|
self.elements = parser.helpers
|
bpf: explicitly define BPF_FUNC_xxx integer values
Historically enum bpf_func_id's BPF_FUNC_xxx enumerators relied on
implicit sequential values being assigned by compiler. This is
convenient, as new BPF helpers are always added at the very end, but it
also has its downsides, some of them being:
- with over 200 helpers now it's very hard to know what's each helper's ID,
which is often important to know when working with BPF assembly (e.g.,
by dumping raw bpf assembly instructions with llvm-objdump -d
command). it's possible to work around this by looking into vmlinux.h,
dumping /sys/btf/kernel/vmlinux, looking at libbpf-provided
bpf_helper_defs.h, etc. But it always feels like an unnecessary step
and one should be able to quickly figure this out from UAPI header.
- when backporting and cherry-picking only some BPF helpers onto older
kernels it's important to be able to skip some enum values for helpers
that weren't backported, but preserve absolute integer IDs to keep BPF
helper IDs stable so that BPF programs stay portable across upstream
and backported kernels.
While neither problem is insurmountable, they come up frequently enough
and are annoying enough to warrant improving the situation. And for the
backporting the problem can easily go unnoticed for a while, especially
if backport is done with people not very familiar with BPF subsystem overall.
Anyways, it's easy to fix this by making sure that __BPF_FUNC_MAPPER
macro provides explicit helper IDs. Unfortunately that would potentially
break existing users that use UAPI-exposed __BPF_FUNC_MAPPER and are
expected to pass macro that accepts only symbolic helper identifier
(e.g., map_lookup_elem for bpf_map_lookup_elem() helper).
As such, we need to introduce a new macro (___BPF_FUNC_MAPPER) which
would specify both identifier and integer ID, but in such a way as to
allow existing __BPF_FUNC_MAPPER be expressed in terms of new
___BPF_FUNC_MAPPER macro. And that's what this patch is doing. To avoid
duplication and allow __BPF_FUNC_MAPPER stay *exactly* the same,
___BPF_FUNC_MAPPER accepts arbitrary "context" arguments, which can be
used to pass any extra macros, arguments, and whatnot. In our case we
use this to pass original user-provided macro that expects single
argument and __BPF_FUNC_MAPPER is using it's own three-argument
__BPF_FUNC_MAPPER_APPLY intermediate macro to impedance-match new and
old "callback" macros.
Once we resolve this, we use new ___BPF_FUNC_MAPPER to define enum
bpf_func_id with explicit values. The other users of __BPF_FUNC_MAPPER
in kernel (namely in kernel/bpf/disasm.c) are kept exactly the same both
as demonstration that backwards compat works, but also to avoid
unnecessary code churn.
Note that new ___BPF_FUNC_MAPPER() doesn't forcefully insert comma
between values, as that might not be appropriate in all possible cases
where ___BPF_FUNC_MAPPER might be used by users. This doesn't reduce
usability, as it's trivial to insert that comma inside "callback" macro.
To validate all the manually specified IDs are exactly right, we used
BTF to compare before and after values:
$ bpftool btf dump file ~/linux-build/default/vmlinux | rg bpf_func_id -A 211 > after.txt
$ git stash # stach UAPI changes
$ make -j90
... re-building kernel without UAPI changes ...
$ bpftool btf dump file ~/linux-build/default/vmlinux | rg bpf_func_id -A 211 > before.txt
$ diff -u before.txt after.txt
--- before.txt 2022-10-05 10:48:18.119195916 -0700
+++ after.txt 2022-10-05 10:46:49.446615025 -0700
@@ -1,4 +1,4 @@
-[14576] ENUM 'bpf_func_id' encoding=UNSIGNED size=4 vlen=211
+[9560] ENUM 'bpf_func_id' encoding=UNSIGNED size=4 vlen=211
'BPF_FUNC_unspec' val=0
'BPF_FUNC_map_lookup_elem' val=1
'BPF_FUNC_map_update_elem' val=2
As can be seen from diff above, the only thing that changed was resulting BTF
type ID of ENUM bpf_func_id, not any of the enumerators, their names or integer
values.
The only other place that needed fixing was scripts/bpf_doc.py used to generate
man pages and bpf_helper_defs.h header for libbpf and selftests. That script is
tightly-coupled to exact shape of ___BPF_FUNC_MAPPER macro definition, so had
to be trivially adapted.
Cc: Quentin Monnet <quentin@isovalent.com>
Reported-by: Andrea Terzolo <andrea.terzolo@polito.it>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20221006042452.2089843-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-10-06 04:24:51 +00:00
|
|
|
self.elem_number_check(parser.desc_unique_helpers, parser.define_unique_helpers, 'helper', '___BPF_FUNC_MAPPER')
|
2019-10-07 03:07:37 +00:00
|
|
|
|
|
|
|
type_fwds = [
|
|
|
|
'struct bpf_fib_lookup',
|
bpf: Introduce SK_LOOKUP program type with a dedicated attach point
Add a new program type BPF_PROG_TYPE_SK_LOOKUP with a dedicated attach type
BPF_SK_LOOKUP. The new program kind is to be invoked by the transport layer
when looking up a listening socket for a new connection request for
connection oriented protocols, or when looking up an unconnected socket for
a packet for connection-less protocols.
When called, SK_LOOKUP BPF program can select a socket that will receive
the packet. This serves as a mechanism to overcome the limits of what
bind() API allows to express. Two use-cases driving this work are:
(1) steer packets destined to an IP range, on fixed port to a socket
192.0.2.0/24, port 80 -> NGINX socket
(2) steer packets destined to an IP address, on any port to a socket
198.51.100.1, any port -> L7 proxy socket
In its run-time context program receives information about the packet that
triggered the socket lookup. Namely IP version, L4 protocol identifier, and
address 4-tuple. Context can be further extended to include ingress
interface identifier.
To select a socket BPF program fetches it from a map holding socket
references, like SOCKMAP or SOCKHASH, and calls bpf_sk_assign(ctx, sk, ...)
helper to record the selection. Transport layer then uses the selected
socket as a result of socket lookup.
In its basic form, SK_LOOKUP acts as a filter and hence must return either
SK_PASS or SK_DROP. If the program returns with SK_PASS, transport should
look for a socket to receive the packet, or use the one selected by the
program if available, while SK_DROP informs the transport layer that the
lookup should fail.
This patch only enables the user to attach an SK_LOOKUP program to a
network namespace. Subsequent patches hook it up to run on local delivery
path in ipv4 and ipv6 stacks.
Suggested-by: Marek Majkowski <marek@cloudflare.com>
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200717103536.397595-3-jakub@cloudflare.com
2020-07-17 10:35:23 +00:00
|
|
|
'struct bpf_sk_lookup',
|
2019-10-07 03:07:37 +00:00
|
|
|
'struct bpf_perf_event_data',
|
|
|
|
'struct bpf_perf_event_value',
|
2020-03-13 15:46:50 +00:00
|
|
|
'struct bpf_pidns_info',
|
2020-10-28 18:12:04 +00:00
|
|
|
'struct bpf_redir_neigh',
|
2019-10-07 03:07:37 +00:00
|
|
|
'struct bpf_sock',
|
|
|
|
'struct bpf_sock_addr',
|
|
|
|
'struct bpf_sock_ops',
|
|
|
|
'struct bpf_sock_tuple',
|
|
|
|
'struct bpf_spin_lock',
|
|
|
|
'struct bpf_sysctl',
|
|
|
|
'struct bpf_tcp_sock',
|
|
|
|
'struct bpf_tunnel_key',
|
|
|
|
'struct bpf_xfrm_state',
|
2020-11-17 23:29:28 +00:00
|
|
|
'struct linux_binprm',
|
2019-10-07 03:07:37 +00:00
|
|
|
'struct pt_regs',
|
|
|
|
'struct sk_reuseport_md',
|
|
|
|
'struct sockaddr',
|
|
|
|
'struct tcphdr',
|
bpf: Add bpf_seq_printf and bpf_seq_write helpers
Two helpers bpf_seq_printf and bpf_seq_write, are added for
writing data to the seq_file buffer.
bpf_seq_printf supports common format string flag/width/type
fields so at least I can get identical results for
netlink and ipv6_route targets.
For bpf_seq_printf and bpf_seq_write, return value -EOVERFLOW
specifically indicates a write failure due to overflow, which
means the object will be repeated in the next bpf invocation
if object collection stays the same. Note that if the object
collection is changed, depending how collection traversal is
done, even if the object still in the collection, it may not
be visited.
For bpf_seq_printf, format %s, %p{i,I}{4,6} needs to
read kernel memory. Reading kernel memory may fail in
the following two cases:
- invalid kernel address, or
- valid kernel address but requiring a major fault
If reading kernel memory failed, the %s string will be
an empty string and %p{i,I}{4,6} will be all 0.
Not returning error to bpf program is consistent with
what bpf_trace_printk() does for now.
bpf_seq_printf may return -EBUSY meaning that internal percpu
buffer for memory copy of strings or other pointees is
not available. Bpf program can return 1 to indicate it
wants the same object to be repeated. Right now, this should not
happen on no-RT kernels since migrate_disable(), which guards
bpf prog call, calls preempt_disable().
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200509175914.2476661-1-yhs@fb.com
2020-05-09 17:59:14 +00:00
|
|
|
'struct seq_file',
|
2020-06-23 23:08:09 +00:00
|
|
|
'struct tcp6_sock',
|
2020-06-23 23:08:11 +00:00
|
|
|
'struct tcp_sock',
|
|
|
|
'struct tcp_timewait_sock',
|
|
|
|
'struct tcp_request_sock',
|
2020-06-23 23:08:15 +00:00
|
|
|
'struct udp6_sock',
|
2021-10-21 13:47:51 +00:00
|
|
|
'struct unix_sock',
|
2020-06-30 06:28:44 +00:00
|
|
|
'struct task_struct',
|
bpf: Implement cgroup storage available to non-cgroup-attached bpf progs
Similar to sk/inode/task storage, implement similar cgroup local storage.
There already exists a local storage implementation for cgroup-attached
bpf programs. See map type BPF_MAP_TYPE_CGROUP_STORAGE and helper
bpf_get_local_storage(). But there are use cases such that non-cgroup
attached bpf progs wants to access cgroup local storage data. For example,
tc egress prog has access to sk and cgroup. It is possible to use
sk local storage to emulate cgroup local storage by storing data in socket.
But this is a waste as it could be lots of sockets belonging to a particular
cgroup. Alternatively, a separate map can be created with cgroup id as the key.
But this will introduce additional overhead to manipulate the new map.
A cgroup local storage, similar to existing sk/inode/task storage,
should help for this use case.
The life-cycle of storage is managed with the life-cycle of the
cgroup struct. i.e. the storage is destroyed along with the owning cgroup
with a call to bpf_cgrp_storage_free() when cgroup itself
is deleted.
The userspace map operations can be done by using a cgroup fd as a key
passed to the lookup, update and delete operations.
Typically, the following code is used to get the current cgroup:
struct task_struct *task = bpf_get_current_task_btf();
... task->cgroups->dfl_cgrp ...
and in structure task_struct definition:
struct task_struct {
....
struct css_set __rcu *cgroups;
....
}
With sleepable program, accessing task->cgroups is not protected by rcu_read_lock.
So the current implementation only supports non-sleepable program and supporting
sleepable program will be the next step together with adding rcu_read_lock
protection for rcu tagged structures.
Since map name BPF_MAP_TYPE_CGROUP_STORAGE has been used for old cgroup local
storage support, the new map name BPF_MAP_TYPE_CGRP_STORAGE is used
for cgroup storage available to non-cgroup-attached bpf programs. The old
cgroup storage supports bpf_get_local_storage() helper to get the cgroup data.
The new cgroup storage helper bpf_cgrp_storage_get() can provide similar
functionality. While old cgroup storage pre-allocates storage memory, the new
mechanism can also pre-allocate with a user space bpf_map_update_elem() call
to avoid potential run-time memory allocation failure.
Therefore, the new cgroup storage can provide all functionality w.r.t.
the old one. So in uapi bpf.h, the old BPF_MAP_TYPE_CGROUP_STORAGE is alias to
BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED to indicate the old cgroup storage can
be deprecated since the new one can provide the same functionality.
Acked-by: David Vernet <void@manifault.com>
Signed-off-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/r/20221026042850.673791-1-yhs@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-10-26 04:28:50 +00:00
|
|
|
'struct cgroup',
|
2019-10-07 03:07:37 +00:00
|
|
|
|
|
|
|
'struct __sk_buff',
|
|
|
|
'struct sk_msg_md',
|
2019-10-10 04:25:34 +00:00
|
|
|
'struct xdp_md',
|
2020-08-25 19:21:20 +00:00
|
|
|
'struct path',
|
bpf: Add bpf_snprintf_btf helper
A helper is added to support tracing kernel type information in BPF
using the BPF Type Format (BTF). Its signature is
long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr,
u32 btf_ptr_size, u64 flags);
struct btf_ptr * specifies
- a pointer to the data to be traced
- the BTF id of the type of data pointed to
- a flags field is provided for future use; these flags
are not to be confused with the BTF_F_* flags
below that control how the btf_ptr is displayed; the
flags member of the struct btf_ptr may be used to
disambiguate types in kernel versus module BTF, etc;
the main distinction is the flags relate to the type
and information needed in identifying it; not how it
is displayed.
For example a BPF program with a struct sk_buff *skb
could do the following:
static struct btf_ptr b = { };
b.ptr = skb;
b.type_id = __builtin_btf_type_id(struct sk_buff, 1);
bpf_snprintf_btf(str, sizeof(str), &b, sizeof(b), 0, 0);
Default output looks like this:
(struct sk_buff){
.transport_header = (__u16)65535,
.mac_header = (__u16)65535,
.end = (sk_buff_data_t)192,
.head = (unsigned char *)0x000000007524fd8b,
.data = (unsigned char *)0x000000007524fd8b,
.truesize = (unsigned int)768,
.users = (refcount_t){
.refs = (atomic_t){
.counter = (int)1,
},
},
}
Flags modifying display are as follows:
- BTF_F_COMPACT: no formatting around type information
- BTF_F_NONAME: no struct/union member names/types
- BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
equivalent to %px.
- BTF_F_ZERO: show zero-valued struct/union members;
they are not displayed by default
Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/1601292670-1616-4-git-send-email-alan.maguire@oracle.com
2020-09-28 11:31:05 +00:00
|
|
|
'struct btf_ptr',
|
2020-11-24 15:12:09 +00:00
|
|
|
'struct inode',
|
2020-12-04 11:36:05 +00:00
|
|
|
'struct socket',
|
|
|
|
'struct file',
|
bpf: Introduce bpf timers.
Introduce 'struct bpf_timer { __u64 :64; __u64 :64; };' that can be embedded
in hash/array/lru maps as a regular field and helpers to operate on it:
// Initialize the timer.
// First 4 bits of 'flags' specify clockid.
// Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, int flags);
// Configure the timer to call 'callback_fn' static function.
long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
// Arm the timer to expire 'nsec' nanoseconds from the current time.
long bpf_timer_start(struct bpf_timer *timer, u64 nsec, u64 flags);
// Cancel the timer and wait for callback_fn to finish if it was running.
long bpf_timer_cancel(struct bpf_timer *timer);
Here is how BPF program might look like:
struct map_elem {
int counter;
struct bpf_timer timer;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1000);
__type(key, int);
__type(value, struct map_elem);
} hmap SEC(".maps");
static int timer_cb(void *map, int *key, struct map_elem *val);
/* val points to particular map element that contains bpf_timer. */
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(test1, int a)
{
struct map_elem *val;
int key = 0;
val = bpf_map_lookup_elem(&hmap, &key);
if (val) {
bpf_timer_init(&val->timer, &hmap, CLOCK_REALTIME);
bpf_timer_set_callback(&val->timer, timer_cb);
bpf_timer_start(&val->timer, 1000 /* call timer_cb2 in 1 usec */, 0);
}
}
This patch adds helper implementations that rely on hrtimers
to call bpf functions as timers expire.
The following patches add necessary safety checks.
Only programs with CAP_BPF are allowed to use bpf_timer.
The amount of timers used by the program is constrained by
the memcg recorded at map creation time.
The bpf_timer_init() helper needs explicit 'map' argument because inner maps
are dynamic and not known at load time. While the bpf_timer_set_callback() is
receiving hidden 'aux->prog' argument supplied by the verifier.
The prog pointer is needed to do refcnting of bpf program to make sure that
program doesn't get freed while the timer is armed. This approach relies on
"user refcnt" scheme used in prog_array that stores bpf programs for
bpf_tail_call. The bpf_timer_set_callback() will increment the prog refcnt which is
paired with bpf_timer_cancel() that will drop the prog refcnt. The
ops->map_release_uref is responsible for cancelling the timers and dropping
prog refcnt when user space reference to a map reaches zero.
This uref approach is done to make sure that Ctrl-C of user space process will
not leave timers running forever unless the user space explicitly pinned a map
that contained timers in bpffs.
bpf_timer_init() and bpf_timer_set_callback() will return -EPERM if map doesn't
have user references (is not held by open file descriptor from user space and
not pinned in bpffs).
The bpf_map_delete_elem() and bpf_map_update_elem() operations cancel
and free the timer if given map element had it allocated.
"bpftool map update" command can be used to cancel timers.
The 'struct bpf_timer' is explicitly __attribute__((aligned(8))) because
'__u64 :64' has 1 byte alignment of 8 byte padding.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20210715005417.78572-4-alexei.starovoitov@gmail.com
2021-07-15 00:54:09 +00:00
|
|
|
'struct bpf_timer',
|
2022-05-19 23:30:10 +00:00
|
|
|
'struct mptcp_sock',
|
bpf: Add verifier support for dynptrs
This patch adds the bulk of the verifier work for supporting dynamic
pointers (dynptrs) in bpf.
A bpf_dynptr is opaque to the bpf program. It is a 16-byte structure
defined internally as:
struct bpf_dynptr_kern {
void *data;
u32 size;
u32 offset;
} __aligned(8);
The upper 8 bits of *size* is reserved (it contains extra metadata about
read-only status and dynptr type). Consequently, a dynptr only supports
memory less than 16 MB.
There are different types of dynptrs (eg malloc, ringbuf, ...). In this
patchset, the most basic one, dynptrs to a bpf program's local memory,
is added. For now only local memory that is of reg type PTR_TO_MAP_VALUE
is supported.
In the verifier, dynptr state information will be tracked in stack
slots. When the program passes in an uninitialized dynptr
(ARG_PTR_TO_DYNPTR | MEM_UNINIT), the stack slots corresponding
to the frame pointer where the dynptr resides at are marked
STACK_DYNPTR. For helper functions that take in initialized dynptrs (eg
bpf_dynptr_read + bpf_dynptr_write which are added later in this
patchset), the verifier enforces that the dynptr has been initialized
properly by checking that their corresponding stack slots have been
marked as STACK_DYNPTR.
The 6th patch in this patchset adds test cases that the verifier should
successfully reject, such as for example attempting to use a dynptr
after doing a direct write into it inside the bpf program.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: David Vernet <void@manifault.com>
Link: https://lore.kernel.org/bpf/20220523210712.3641569-2-joannelkoong@gmail.com
2022-05-23 21:07:07 +00:00
|
|
|
'struct bpf_dynptr',
|
2022-06-15 13:48:44 +00:00
|
|
|
'struct iphdr',
|
|
|
|
'struct ipv6hdr',
|
2019-10-07 03:07:37 +00:00
|
|
|
]
|
|
|
|
known_types = {
|
|
|
|
'...',
|
|
|
|
'void',
|
|
|
|
'const void',
|
|
|
|
'char',
|
|
|
|
'const char',
|
|
|
|
'int',
|
|
|
|
'long',
|
|
|
|
'unsigned long',
|
|
|
|
|
|
|
|
'__be16',
|
|
|
|
'__be32',
|
|
|
|
'__wsum',
|
|
|
|
|
|
|
|
'struct bpf_fib_lookup',
|
|
|
|
'struct bpf_perf_event_data',
|
|
|
|
'struct bpf_perf_event_value',
|
2020-03-04 20:41:56 +00:00
|
|
|
'struct bpf_pidns_info',
|
2020-10-20 21:25:56 +00:00
|
|
|
'struct bpf_redir_neigh',
|
bpf: Introduce SK_LOOKUP program type with a dedicated attach point
Add a new program type BPF_PROG_TYPE_SK_LOOKUP with a dedicated attach type
BPF_SK_LOOKUP. The new program kind is to be invoked by the transport layer
when looking up a listening socket for a new connection request for
connection oriented protocols, or when looking up an unconnected socket for
a packet for connection-less protocols.
When called, SK_LOOKUP BPF program can select a socket that will receive
the packet. This serves as a mechanism to overcome the limits of what
bind() API allows to express. Two use-cases driving this work are:
(1) steer packets destined to an IP range, on fixed port to a socket
192.0.2.0/24, port 80 -> NGINX socket
(2) steer packets destined to an IP address, on any port to a socket
198.51.100.1, any port -> L7 proxy socket
In its run-time context program receives information about the packet that
triggered the socket lookup. Namely IP version, L4 protocol identifier, and
address 4-tuple. Context can be further extended to include ingress
interface identifier.
To select a socket BPF program fetches it from a map holding socket
references, like SOCKMAP or SOCKHASH, and calls bpf_sk_assign(ctx, sk, ...)
helper to record the selection. Transport layer then uses the selected
socket as a result of socket lookup.
In its basic form, SK_LOOKUP acts as a filter and hence must return either
SK_PASS or SK_DROP. If the program returns with SK_PASS, transport should
look for a socket to receive the packet, or use the one selected by the
program if available, while SK_DROP informs the transport layer that the
lookup should fail.
This patch only enables the user to attach an SK_LOOKUP program to a
network namespace. Subsequent patches hook it up to run on local delivery
path in ipv4 and ipv6 stacks.
Suggested-by: Marek Majkowski <marek@cloudflare.com>
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200717103536.397595-3-jakub@cloudflare.com
2020-07-17 10:35:23 +00:00
|
|
|
'struct bpf_sk_lookup',
|
2019-10-07 03:07:37 +00:00
|
|
|
'struct bpf_sock',
|
|
|
|
'struct bpf_sock_addr',
|
|
|
|
'struct bpf_sock_ops',
|
|
|
|
'struct bpf_sock_tuple',
|
|
|
|
'struct bpf_spin_lock',
|
|
|
|
'struct bpf_sysctl',
|
|
|
|
'struct bpf_tcp_sock',
|
|
|
|
'struct bpf_tunnel_key',
|
|
|
|
'struct bpf_xfrm_state',
|
2020-11-17 23:29:28 +00:00
|
|
|
'struct linux_binprm',
|
2019-10-07 03:07:37 +00:00
|
|
|
'struct pt_regs',
|
|
|
|
'struct sk_reuseport_md',
|
|
|
|
'struct sockaddr',
|
|
|
|
'struct tcphdr',
|
bpf: Add bpf_seq_printf and bpf_seq_write helpers
Two helpers bpf_seq_printf and bpf_seq_write, are added for
writing data to the seq_file buffer.
bpf_seq_printf supports common format string flag/width/type
fields so at least I can get identical results for
netlink and ipv6_route targets.
For bpf_seq_printf and bpf_seq_write, return value -EOVERFLOW
specifically indicates a write failure due to overflow, which
means the object will be repeated in the next bpf invocation
if object collection stays the same. Note that if the object
collection is changed, depending how collection traversal is
done, even if the object still in the collection, it may not
be visited.
For bpf_seq_printf, format %s, %p{i,I}{4,6} needs to
read kernel memory. Reading kernel memory may fail in
the following two cases:
- invalid kernel address, or
- valid kernel address but requiring a major fault
If reading kernel memory failed, the %s string will be
an empty string and %p{i,I}{4,6} will be all 0.
Not returning error to bpf program is consistent with
what bpf_trace_printk() does for now.
bpf_seq_printf may return -EBUSY meaning that internal percpu
buffer for memory copy of strings or other pointees is
not available. Bpf program can return 1 to indicate it
wants the same object to be repeated. Right now, this should not
happen on no-RT kernels since migrate_disable(), which guards
bpf prog call, calls preempt_disable().
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200509175914.2476661-1-yhs@fb.com
2020-05-09 17:59:14 +00:00
|
|
|
'struct seq_file',
|
2020-06-23 23:08:09 +00:00
|
|
|
'struct tcp6_sock',
|
2020-06-23 23:08:11 +00:00
|
|
|
'struct tcp_sock',
|
|
|
|
'struct tcp_timewait_sock',
|
|
|
|
'struct tcp_request_sock',
|
2020-06-23 23:08:15 +00:00
|
|
|
'struct udp6_sock',
|
2021-10-21 13:47:51 +00:00
|
|
|
'struct unix_sock',
|
2020-06-30 06:28:44 +00:00
|
|
|
'struct task_struct',
|
bpf: Implement cgroup storage available to non-cgroup-attached bpf progs
Similar to sk/inode/task storage, implement similar cgroup local storage.
There already exists a local storage implementation for cgroup-attached
bpf programs. See map type BPF_MAP_TYPE_CGROUP_STORAGE and helper
bpf_get_local_storage(). But there are use cases such that non-cgroup
attached bpf progs wants to access cgroup local storage data. For example,
tc egress prog has access to sk and cgroup. It is possible to use
sk local storage to emulate cgroup local storage by storing data in socket.
But this is a waste as it could be lots of sockets belonging to a particular
cgroup. Alternatively, a separate map can be created with cgroup id as the key.
But this will introduce additional overhead to manipulate the new map.
A cgroup local storage, similar to existing sk/inode/task storage,
should help for this use case.
The life-cycle of storage is managed with the life-cycle of the
cgroup struct. i.e. the storage is destroyed along with the owning cgroup
with a call to bpf_cgrp_storage_free() when cgroup itself
is deleted.
The userspace map operations can be done by using a cgroup fd as a key
passed to the lookup, update and delete operations.
Typically, the following code is used to get the current cgroup:
struct task_struct *task = bpf_get_current_task_btf();
... task->cgroups->dfl_cgrp ...
and in structure task_struct definition:
struct task_struct {
....
struct css_set __rcu *cgroups;
....
}
With sleepable program, accessing task->cgroups is not protected by rcu_read_lock.
So the current implementation only supports non-sleepable program and supporting
sleepable program will be the next step together with adding rcu_read_lock
protection for rcu tagged structures.
Since map name BPF_MAP_TYPE_CGROUP_STORAGE has been used for old cgroup local
storage support, the new map name BPF_MAP_TYPE_CGRP_STORAGE is used
for cgroup storage available to non-cgroup-attached bpf programs. The old
cgroup storage supports bpf_get_local_storage() helper to get the cgroup data.
The new cgroup storage helper bpf_cgrp_storage_get() can provide similar
functionality. While old cgroup storage pre-allocates storage memory, the new
mechanism can also pre-allocate with a user space bpf_map_update_elem() call
to avoid potential run-time memory allocation failure.
Therefore, the new cgroup storage can provide all functionality w.r.t.
the old one. So in uapi bpf.h, the old BPF_MAP_TYPE_CGROUP_STORAGE is alias to
BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED to indicate the old cgroup storage can
be deprecated since the new one can provide the same functionality.
Acked-by: David Vernet <void@manifault.com>
Signed-off-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/r/20221026042850.673791-1-yhs@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-10-26 04:28:50 +00:00
|
|
|
'struct cgroup',
|
2020-08-25 19:21:20 +00:00
|
|
|
'struct path',
|
bpf: Add bpf_snprintf_btf helper
A helper is added to support tracing kernel type information in BPF
using the BPF Type Format (BTF). Its signature is
long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr,
u32 btf_ptr_size, u64 flags);
struct btf_ptr * specifies
- a pointer to the data to be traced
- the BTF id of the type of data pointed to
- a flags field is provided for future use; these flags
are not to be confused with the BTF_F_* flags
below that control how the btf_ptr is displayed; the
flags member of the struct btf_ptr may be used to
disambiguate types in kernel versus module BTF, etc;
the main distinction is the flags relate to the type
and information needed in identifying it; not how it
is displayed.
For example a BPF program with a struct sk_buff *skb
could do the following:
static struct btf_ptr b = { };
b.ptr = skb;
b.type_id = __builtin_btf_type_id(struct sk_buff, 1);
bpf_snprintf_btf(str, sizeof(str), &b, sizeof(b), 0, 0);
Default output looks like this:
(struct sk_buff){
.transport_header = (__u16)65535,
.mac_header = (__u16)65535,
.end = (sk_buff_data_t)192,
.head = (unsigned char *)0x000000007524fd8b,
.data = (unsigned char *)0x000000007524fd8b,
.truesize = (unsigned int)768,
.users = (refcount_t){
.refs = (atomic_t){
.counter = (int)1,
},
},
}
Flags modifying display are as follows:
- BTF_F_COMPACT: no formatting around type information
- BTF_F_NONAME: no struct/union member names/types
- BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
equivalent to %px.
- BTF_F_ZERO: show zero-valued struct/union members;
they are not displayed by default
Signed-off-by: Alan Maguire <alan.maguire@oracle.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/1601292670-1616-4-git-send-email-alan.maguire@oracle.com
2020-09-28 11:31:05 +00:00
|
|
|
'struct btf_ptr',
|
2020-11-24 15:12:09 +00:00
|
|
|
'struct inode',
|
2020-12-04 11:36:05 +00:00
|
|
|
'struct socket',
|
|
|
|
'struct file',
|
bpf: Introduce bpf timers.
Introduce 'struct bpf_timer { __u64 :64; __u64 :64; };' that can be embedded
in hash/array/lru maps as a regular field and helpers to operate on it:
// Initialize the timer.
// First 4 bits of 'flags' specify clockid.
// Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, int flags);
// Configure the timer to call 'callback_fn' static function.
long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
// Arm the timer to expire 'nsec' nanoseconds from the current time.
long bpf_timer_start(struct bpf_timer *timer, u64 nsec, u64 flags);
// Cancel the timer and wait for callback_fn to finish if it was running.
long bpf_timer_cancel(struct bpf_timer *timer);
Here is how BPF program might look like:
struct map_elem {
int counter;
struct bpf_timer timer;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1000);
__type(key, int);
__type(value, struct map_elem);
} hmap SEC(".maps");
static int timer_cb(void *map, int *key, struct map_elem *val);
/* val points to particular map element that contains bpf_timer. */
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(test1, int a)
{
struct map_elem *val;
int key = 0;
val = bpf_map_lookup_elem(&hmap, &key);
if (val) {
bpf_timer_init(&val->timer, &hmap, CLOCK_REALTIME);
bpf_timer_set_callback(&val->timer, timer_cb);
bpf_timer_start(&val->timer, 1000 /* call timer_cb2 in 1 usec */, 0);
}
}
This patch adds helper implementations that rely on hrtimers
to call bpf functions as timers expire.
The following patches add necessary safety checks.
Only programs with CAP_BPF are allowed to use bpf_timer.
The amount of timers used by the program is constrained by
the memcg recorded at map creation time.
The bpf_timer_init() helper needs explicit 'map' argument because inner maps
are dynamic and not known at load time. While the bpf_timer_set_callback() is
receiving hidden 'aux->prog' argument supplied by the verifier.
The prog pointer is needed to do refcnting of bpf program to make sure that
program doesn't get freed while the timer is armed. This approach relies on
"user refcnt" scheme used in prog_array that stores bpf programs for
bpf_tail_call. The bpf_timer_set_callback() will increment the prog refcnt which is
paired with bpf_timer_cancel() that will drop the prog refcnt. The
ops->map_release_uref is responsible for cancelling the timers and dropping
prog refcnt when user space reference to a map reaches zero.
This uref approach is done to make sure that Ctrl-C of user space process will
not leave timers running forever unless the user space explicitly pinned a map
that contained timers in bpffs.
bpf_timer_init() and bpf_timer_set_callback() will return -EPERM if map doesn't
have user references (is not held by open file descriptor from user space and
not pinned in bpffs).
The bpf_map_delete_elem() and bpf_map_update_elem() operations cancel
and free the timer if given map element had it allocated.
"bpftool map update" command can be used to cancel timers.
The 'struct bpf_timer' is explicitly __attribute__((aligned(8))) because
'__u64 :64' has 1 byte alignment of 8 byte padding.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20210715005417.78572-4-alexei.starovoitov@gmail.com
2021-07-15 00:54:09 +00:00
|
|
|
'struct bpf_timer',
|
2022-05-19 23:30:10 +00:00
|
|
|
'struct mptcp_sock',
|
bpf: Add verifier support for dynptrs
This patch adds the bulk of the verifier work for supporting dynamic
pointers (dynptrs) in bpf.
A bpf_dynptr is opaque to the bpf program. It is a 16-byte structure
defined internally as:
struct bpf_dynptr_kern {
void *data;
u32 size;
u32 offset;
} __aligned(8);
The upper 8 bits of *size* is reserved (it contains extra metadata about
read-only status and dynptr type). Consequently, a dynptr only supports
memory less than 16 MB.
There are different types of dynptrs (eg malloc, ringbuf, ...). In this
patchset, the most basic one, dynptrs to a bpf program's local memory,
is added. For now only local memory that is of reg type PTR_TO_MAP_VALUE
is supported.
In the verifier, dynptr state information will be tracked in stack
slots. When the program passes in an uninitialized dynptr
(ARG_PTR_TO_DYNPTR | MEM_UNINIT), the stack slots corresponding
to the frame pointer where the dynptr resides at are marked
STACK_DYNPTR. For helper functions that take in initialized dynptrs (eg
bpf_dynptr_read + bpf_dynptr_write which are added later in this
patchset), the verifier enforces that the dynptr has been initialized
properly by checking that their corresponding stack slots have been
marked as STACK_DYNPTR.
The 6th patch in this patchset adds test cases that the verifier should
successfully reject, such as for example attempting to use a dynptr
after doing a direct write into it inside the bpf program.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: David Vernet <void@manifault.com>
Link: https://lore.kernel.org/bpf/20220523210712.3641569-2-joannelkoong@gmail.com
2022-05-23 21:07:07 +00:00
|
|
|
'struct bpf_dynptr',
|
bpf: Rework process_dynptr_func
Recently, user ringbuf support introduced a PTR_TO_DYNPTR register type
for use in callback state, because in case of user ringbuf helpers,
there is no dynptr on the stack that is passed into the callback. To
reflect such a state, a special register type was created.
However, some checks have been bypassed incorrectly during the addition
of this feature. First, for arg_type with MEM_UNINIT flag which
initialize a dynptr, they must be rejected for such register type.
Secondly, in the future, there are plans to add dynptr helpers that
operate on the dynptr itself and may change its offset and other
properties.
In all of these cases, PTR_TO_DYNPTR shouldn't be allowed to be passed
to such helpers, however the current code simply returns 0.
The rejection for helpers that release the dynptr is already handled.
For fixing this, we take a step back and rework existing code in a way
that will allow fitting in all classes of helpers and have a coherent
model for dealing with the variety of use cases in which dynptr is used.
First, for ARG_PTR_TO_DYNPTR, it can either be set alone or together
with a DYNPTR_TYPE_* constant that denotes the only type it accepts.
Next, helpers which initialize a dynptr use MEM_UNINIT to indicate this
fact. To make the distinction clear, use MEM_RDONLY flag to indicate
that the helper only operates on the memory pointed to by the dynptr,
not the dynptr itself. In C parlance, it would be equivalent to taking
the dynptr as a point to const argument.
When either of these flags are not present, the helper is allowed to
mutate both the dynptr itself and also the memory it points to.
Currently, the read only status of the memory is not tracked in the
dynptr, but it would be trivial to add this support inside dynptr state
of the register.
With these changes and renaming PTR_TO_DYNPTR to CONST_PTR_TO_DYNPTR to
better reflect its usage, it can no longer be passed to helpers that
initialize a dynptr, i.e. bpf_dynptr_from_mem, bpf_ringbuf_reserve_dynptr.
A note to reviewers is that in code that does mark_stack_slots_dynptr,
and unmark_stack_slots_dynptr, we implicitly rely on the fact that
PTR_TO_STACK reg is the only case that can reach that code path, as one
cannot pass CONST_PTR_TO_DYNPTR to helpers that don't set MEM_RDONLY. In
both cases such helpers won't be setting that flag.
The next patch will add a couple of selftest cases to make sure this
doesn't break.
Fixes: 205715673844 ("bpf: Add bpf_user_ringbuf_drain() helper")
Acked-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20221207204141.308952-4-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-12-07 20:41:37 +00:00
|
|
|
'const struct bpf_dynptr',
|
2022-06-15 13:48:44 +00:00
|
|
|
'struct iphdr',
|
|
|
|
'struct ipv6hdr',
|
2019-10-07 03:07:37 +00:00
|
|
|
}
|
|
|
|
mapped_types = {
|
|
|
|
'u8': '__u8',
|
|
|
|
'u16': '__u16',
|
|
|
|
'u32': '__u32',
|
|
|
|
'u64': '__u64',
|
|
|
|
's8': '__s8',
|
|
|
|
's16': '__s16',
|
|
|
|
's32': '__s32',
|
|
|
|
's64': '__s64',
|
|
|
|
'size_t': 'unsigned long',
|
|
|
|
'struct bpf_map': 'void',
|
|
|
|
'struct sk_buff': 'struct __sk_buff',
|
|
|
|
'const struct sk_buff': 'const struct __sk_buff',
|
|
|
|
'struct sk_msg_buff': 'struct sk_msg_md',
|
|
|
|
'struct xdp_buff': 'struct xdp_md',
|
|
|
|
}
|
bpf: Introduce SK_LOOKUP program type with a dedicated attach point
Add a new program type BPF_PROG_TYPE_SK_LOOKUP with a dedicated attach type
BPF_SK_LOOKUP. The new program kind is to be invoked by the transport layer
when looking up a listening socket for a new connection request for
connection oriented protocols, or when looking up an unconnected socket for
a packet for connection-less protocols.
When called, SK_LOOKUP BPF program can select a socket that will receive
the packet. This serves as a mechanism to overcome the limits of what
bind() API allows to express. Two use-cases driving this work are:
(1) steer packets destined to an IP range, on fixed port to a socket
192.0.2.0/24, port 80 -> NGINX socket
(2) steer packets destined to an IP address, on any port to a socket
198.51.100.1, any port -> L7 proxy socket
In its run-time context program receives information about the packet that
triggered the socket lookup. Namely IP version, L4 protocol identifier, and
address 4-tuple. Context can be further extended to include ingress
interface identifier.
To select a socket BPF program fetches it from a map holding socket
references, like SOCKMAP or SOCKHASH, and calls bpf_sk_assign(ctx, sk, ...)
helper to record the selection. Transport layer then uses the selected
socket as a result of socket lookup.
In its basic form, SK_LOOKUP acts as a filter and hence must return either
SK_PASS or SK_DROP. If the program returns with SK_PASS, transport should
look for a socket to receive the packet, or use the one selected by the
program if available, while SK_DROP informs the transport layer that the
lookup should fail.
This patch only enables the user to attach an SK_LOOKUP program to a
network namespace. Subsequent patches hook it up to run on local delivery
path in ipv4 and ipv6 stacks.
Suggested-by: Marek Majkowski <marek@cloudflare.com>
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200717103536.397595-3-jakub@cloudflare.com
2020-07-17 10:35:23 +00:00
|
|
|
# Helpers overloaded for different context types.
|
|
|
|
overloaded_helpers = [
|
|
|
|
'bpf_get_socket_cookie',
|
|
|
|
'bpf_sk_assign',
|
|
|
|
]
|
2019-10-07 03:07:37 +00:00
|
|
|
|
|
|
|
def print_header(self):
|
|
|
|
header = '''\
|
2021-03-02 17:19:41 +00:00
|
|
|
/* This is auto-generated file. See bpf_doc.py for details. */
|
2019-10-07 03:07:37 +00:00
|
|
|
|
|
|
|
/* Forward declarations of BPF structs */'''
|
|
|
|
|
|
|
|
print(header)
|
|
|
|
for fwd in self.type_fwds:
|
|
|
|
print('%s;' % fwd)
|
|
|
|
print('')
|
|
|
|
|
|
|
|
def print_footer(self):
|
|
|
|
footer = ''
|
|
|
|
print(footer)
|
|
|
|
|
|
|
|
def map_type(self, t):
|
|
|
|
if t in self.known_types:
|
|
|
|
return t
|
|
|
|
if t in self.mapped_types:
|
|
|
|
return self.mapped_types[t]
|
2019-10-20 11:23:44 +00:00
|
|
|
print("Unrecognized type '%s', please add it to known types!" % t,
|
|
|
|
file=sys.stderr)
|
2019-10-07 03:07:37 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
seen_helpers = set()
|
|
|
|
|
|
|
|
def print_one(self, helper):
|
|
|
|
proto = helper.proto_break_down()
|
|
|
|
|
|
|
|
if proto['name'] in self.seen_helpers:
|
|
|
|
return
|
|
|
|
self.seen_helpers.add(proto['name'])
|
|
|
|
|
|
|
|
print('/*')
|
|
|
|
print(" * %s" % proto['name'])
|
|
|
|
print(" *")
|
|
|
|
if (helper.desc):
|
|
|
|
# Do not strip all newline characters: formatted code at the end of
|
|
|
|
# a section must be followed by a blank line.
|
|
|
|
for line in re.sub('\n$', '', helper.desc, count=1).split('\n'):
|
|
|
|
print(' *{}{}'.format(' \t' if line else '', line))
|
|
|
|
|
|
|
|
if (helper.ret):
|
|
|
|
print(' *')
|
|
|
|
print(' * Returns')
|
|
|
|
for line in helper.ret.rstrip().split('\n'):
|
|
|
|
print(' *{}{}'.format(' \t' if line else '', line))
|
|
|
|
|
|
|
|
print(' */')
|
bpf: Generate const static pointers for kernel helpers
The generated bpf_helper_defs.h file currently contains definitions
like this for the kernel helpers, which are static objects:
static void *(*bpf_map_lookup_elem)(void *map, const void *key) = (void *) 1;
These work well in both clang and GCC because both compilers do
constant propagation with -O1 and higher optimization, resulting in
`call 1' BPF instructions being generated, which are calls to kernel
helpers.
However, there is a discrepancy on how the -Wunused-variable
warning (activated by -Wall) is handled in these compilers:
- clang will not emit -Wunused-variable warnings for static variables
defined in C header files, be them constant or not constant.
- GCC will not emit -Wunused-variable warnings for _constant_ static
variables defined in header files, but it will emit warnings for
non-constant static variables defined in header files.
There is no reason for these bpf_helpers_def.h pointers to not be
declared constant, and it is actually desirable to do so, since their
values are not to be changed. So this patch modifies bpf_doc.py to
generate prototypes like:
static void *(* const bpf_map_lookup_elem)(void *map, const void *key) = (void *) 1;
This allows GCC to not error while compiling BPF programs with `-Wall
-Werror', while still being able to detect and error on legitimate
unused variables in the program themselves.
This change doesn't impact the desired constant propagation in neither
Clang nor GCC with -O1 and higher. On the contrary, being declared as
constant may increase the odds they get constant folded when
used/referred to in certain circumstances.
Tested in bpf-next master.
No regressions.
Signed-off-by: Jose E. Marchesi <jose.marchesi@oracle.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/bpf/20240127185031.29854-1-jose.marchesi@oracle.com
2024-01-27 18:50:31 +00:00
|
|
|
print('static %s %s(* const %s)(' % (self.map_type(proto['ret_type']),
|
2019-10-07 03:07:37 +00:00
|
|
|
proto['ret_star'], proto['name']), end='')
|
|
|
|
comma = ''
|
|
|
|
for i, a in enumerate(proto['args']):
|
|
|
|
t = a['type']
|
|
|
|
n = a['name']
|
bpf: Introduce SK_LOOKUP program type with a dedicated attach point
Add a new program type BPF_PROG_TYPE_SK_LOOKUP with a dedicated attach type
BPF_SK_LOOKUP. The new program kind is to be invoked by the transport layer
when looking up a listening socket for a new connection request for
connection oriented protocols, or when looking up an unconnected socket for
a packet for connection-less protocols.
When called, SK_LOOKUP BPF program can select a socket that will receive
the packet. This serves as a mechanism to overcome the limits of what
bind() API allows to express. Two use-cases driving this work are:
(1) steer packets destined to an IP range, on fixed port to a socket
192.0.2.0/24, port 80 -> NGINX socket
(2) steer packets destined to an IP address, on any port to a socket
198.51.100.1, any port -> L7 proxy socket
In its run-time context program receives information about the packet that
triggered the socket lookup. Namely IP version, L4 protocol identifier, and
address 4-tuple. Context can be further extended to include ingress
interface identifier.
To select a socket BPF program fetches it from a map holding socket
references, like SOCKMAP or SOCKHASH, and calls bpf_sk_assign(ctx, sk, ...)
helper to record the selection. Transport layer then uses the selected
socket as a result of socket lookup.
In its basic form, SK_LOOKUP acts as a filter and hence must return either
SK_PASS or SK_DROP. If the program returns with SK_PASS, transport should
look for a socket to receive the packet, or use the one selected by the
program if available, while SK_DROP informs the transport layer that the
lookup should fail.
This patch only enables the user to attach an SK_LOOKUP program to a
network namespace. Subsequent patches hook it up to run on local delivery
path in ipv4 and ipv6 stacks.
Suggested-by: Marek Majkowski <marek@cloudflare.com>
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200717103536.397595-3-jakub@cloudflare.com
2020-07-17 10:35:23 +00:00
|
|
|
if proto['name'] in self.overloaded_helpers and i == 0:
|
2019-10-07 03:07:37 +00:00
|
|
|
t = 'void'
|
|
|
|
n = 'ctx'
|
|
|
|
one_arg = '{}{}'.format(comma, self.map_type(t))
|
|
|
|
if n:
|
|
|
|
if a['star']:
|
|
|
|
one_arg += ' {}'.format(a['star'])
|
|
|
|
else:
|
|
|
|
one_arg += ' '
|
|
|
|
one_arg += '{}'.format(n)
|
|
|
|
comma = ', '
|
|
|
|
print(one_arg, end='')
|
|
|
|
|
2022-08-24 18:10:43 +00:00
|
|
|
print(') = (void *) %d;' % helper.enum_val)
|
2019-10-07 03:07:37 +00:00
|
|
|
print('')
|
|
|
|
|
2018-04-25 17:16:52 +00:00
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
# If script is launched from scripts/ from kernel tree and can access
|
|
|
|
# ../include/uapi/linux/bpf.h, use it as a default name for the file to parse,
|
|
|
|
# otherwise the --filename argument will be required from the command line.
|
|
|
|
script = os.path.abspath(sys.argv[0])
|
|
|
|
linuxRoot = os.path.dirname(os.path.dirname(script))
|
|
|
|
bpfh = os.path.join(linuxRoot, 'include/uapi/linux/bpf.h')
|
|
|
|
|
2021-03-02 17:19:41 +00:00
|
|
|
printers = {
|
|
|
|
'helpers': PrinterHelpersRST,
|
2021-03-02 17:19:42 +00:00
|
|
|
'syscall': PrinterSyscallRST,
|
2021-03-02 17:19:41 +00:00
|
|
|
}
|
|
|
|
|
2018-04-25 17:16:52 +00:00
|
|
|
argParser = argparse.ArgumentParser(description="""
|
2021-03-02 17:19:41 +00:00
|
|
|
Parse eBPF header file and generate documentation for the eBPF API.
|
2018-04-25 17:16:52 +00:00
|
|
|
The RST-formatted output produced can be turned into a manual page with the
|
|
|
|
rst2man utility.
|
|
|
|
""")
|
2019-10-07 03:07:37 +00:00
|
|
|
argParser.add_argument('--header', action='store_true',
|
|
|
|
help='generate C header file')
|
2018-04-25 17:16:52 +00:00
|
|
|
if (os.path.isfile(bpfh)):
|
|
|
|
argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h',
|
|
|
|
default=bpfh)
|
|
|
|
else:
|
|
|
|
argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h')
|
2021-03-02 17:19:41 +00:00
|
|
|
argParser.add_argument('target', nargs='?', default='helpers',
|
|
|
|
choices=printers.keys(), help='eBPF API target')
|
2018-04-25 17:16:52 +00:00
|
|
|
args = argParser.parse_args()
|
|
|
|
|
|
|
|
# Parse file.
|
|
|
|
headerParser = HeaderParser(args.filename)
|
|
|
|
headerParser.run()
|
|
|
|
|
|
|
|
# Print formatted output to standard output.
|
2019-10-07 03:07:37 +00:00
|
|
|
if args.header:
|
2021-03-02 17:19:42 +00:00
|
|
|
if args.target != 'helpers':
|
|
|
|
raise NotImplementedError('Only helpers header generation is supported')
|
2021-03-02 17:19:41 +00:00
|
|
|
printer = PrinterHelpers(headerParser)
|
2019-10-07 03:07:37 +00:00
|
|
|
else:
|
2021-03-02 17:19:41 +00:00
|
|
|
printer = printers[args.target](headerParser)
|
2018-04-25 17:16:52 +00:00
|
|
|
printer.print_all()
|