forked from Minki/linux
bpf: decouple btf from seq bpf fs dump and enable more maps
Commita26ca7c982
("bpf: btf: Add pretty print support to the basic arraymap") and699c86d6ec
("bpf: btf: add pretty print for hash/lru_hash maps") enabled support for BTF and dumping via BPF fs for array and hash/lru map. However, both can be decoupled from each other such that regular BPF maps can be supported for attaching BTF key/value information, while not all maps necessarily need to dump via map_seq_show_elem() callback. The basic sanity check which is a prerequisite for all maps is that key/value size has to match in any case, and some maps can have extra checks via map_check_btf() callback, e.g. probing certain types or indicating no support in general. With that we can also enable retrieving BTF info for per-cpu map types and lpm. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Yonghong Song <yhs@fb.com>
This commit is contained in:
parent
9d6f417714
commit
e8d2bec045
@ -23,7 +23,7 @@ struct bpf_prog;
|
||||
struct bpf_map;
|
||||
struct sock;
|
||||
struct seq_file;
|
||||
struct btf;
|
||||
struct btf_type;
|
||||
|
||||
/* map is generic key/value storage optionally accesible by eBPF programs */
|
||||
struct bpf_map_ops {
|
||||
@ -48,8 +48,9 @@ struct bpf_map_ops {
|
||||
u32 (*map_fd_sys_lookup_elem)(void *ptr);
|
||||
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
|
||||
struct seq_file *m);
|
||||
int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf,
|
||||
u32 key_type_id, u32 value_type_id);
|
||||
int (*map_check_btf)(const struct bpf_map *map,
|
||||
const struct btf_type *key_type,
|
||||
const struct btf_type *value_type);
|
||||
};
|
||||
|
||||
struct bpf_map {
|
||||
@ -118,9 +119,13 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
|
||||
|
||||
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
|
||||
{
|
||||
return map->ops->map_seq_show_elem && map->ops->map_check_btf;
|
||||
return map->btf && map->ops->map_seq_show_elem;
|
||||
}
|
||||
|
||||
int map_check_no_btf(const struct bpf_map *map,
|
||||
const struct btf_type *key_type,
|
||||
const struct btf_type *value_type);
|
||||
|
||||
extern const struct bpf_map_ops bpf_map_offload_ops;
|
||||
|
||||
/* function argument constraints */
|
||||
|
@ -358,27 +358,20 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
|
||||
u32 btf_key_id, u32 btf_value_id)
|
||||
static int array_map_check_btf(const struct bpf_map *map,
|
||||
const struct btf_type *key_type,
|
||||
const struct btf_type *value_type)
|
||||
{
|
||||
const struct btf_type *key_type, *value_type;
|
||||
u32 key_size, value_size;
|
||||
u32 int_data;
|
||||
|
||||
key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
|
||||
if (!key_type || BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
|
||||
if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
|
||||
return -EINVAL;
|
||||
|
||||
int_data = *(u32 *)(key_type + 1);
|
||||
/* bpf array can only take a u32 key. This check makes
|
||||
* sure that the btf matches the attr used during map_create.
|
||||
/* bpf array can only take a u32 key. This check makes sure
|
||||
* that the btf matches the attr used during map_create.
|
||||
*/
|
||||
if (BTF_INT_BITS(int_data) != 32 || key_size != 4 ||
|
||||
BTF_INT_OFFSET(int_data))
|
||||
return -EINVAL;
|
||||
|
||||
value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
|
||||
if (!value_type || value_size != map->value_size)
|
||||
if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
@ -405,6 +398,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
|
||||
.map_lookup_elem = percpu_array_map_lookup_elem,
|
||||
.map_update_elem = array_map_update_elem,
|
||||
.map_delete_elem = array_map_delete_elem,
|
||||
.map_check_btf = array_map_check_btf,
|
||||
};
|
||||
|
||||
static int fd_array_map_alloc_check(union bpf_attr *attr)
|
||||
@ -546,6 +540,7 @@ const struct bpf_map_ops prog_array_map_ops = {
|
||||
.map_fd_put_ptr = prog_fd_array_put_ptr,
|
||||
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
|
||||
.map_release_uref = bpf_fd_array_map_clear,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
|
||||
@ -634,6 +629,7 @@ const struct bpf_map_ops perf_event_array_map_ops = {
|
||||
.map_fd_get_ptr = perf_event_fd_array_get_ptr,
|
||||
.map_fd_put_ptr = perf_event_fd_array_put_ptr,
|
||||
.map_release = perf_event_fd_array_release,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CGROUPS
|
||||
@ -665,6 +661,7 @@ const struct bpf_map_ops cgroup_array_map_ops = {
|
||||
.map_delete_elem = fd_array_map_delete_elem,
|
||||
.map_fd_get_ptr = cgroup_fd_array_get_ptr,
|
||||
.map_fd_put_ptr = cgroup_fd_array_put_ptr,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
#endif
|
||||
|
||||
@ -749,4 +746,5 @@ const struct bpf_map_ops array_of_maps_map_ops = {
|
||||
.map_fd_put_ptr = bpf_map_fd_put_ptr,
|
||||
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
|
||||
.map_gen_lookup = array_of_map_gen_lookup,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
@ -555,6 +555,7 @@ const struct bpf_map_ops cpu_map_ops = {
|
||||
.map_update_elem = cpu_map_update_elem,
|
||||
.map_lookup_elem = cpu_map_lookup_elem,
|
||||
.map_get_next_key = cpu_map_get_next_key,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
|
||||
|
@ -484,6 +484,7 @@ const struct bpf_map_ops dev_map_ops = {
|
||||
.map_lookup_elem = dev_map_lookup_elem,
|
||||
.map_update_elem = dev_map_update_elem,
|
||||
.map_delete_elem = dev_map_delete_elem,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
static int dev_map_notification(struct notifier_block *notifier,
|
||||
|
@ -1185,23 +1185,6 @@ static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int htab_map_check_btf(const struct bpf_map *map, const struct btf *btf,
|
||||
u32 btf_key_id, u32 btf_value_id)
|
||||
{
|
||||
const struct btf_type *key_type, *value_type;
|
||||
u32 key_size, value_size;
|
||||
|
||||
key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
|
||||
if (!key_type || key_size != map->key_size)
|
||||
return -EINVAL;
|
||||
|
||||
value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
|
||||
if (!value_type || value_size != map->value_size)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct bpf_map_ops htab_map_ops = {
|
||||
.map_alloc_check = htab_map_alloc_check,
|
||||
.map_alloc = htab_map_alloc,
|
||||
@ -1212,7 +1195,6 @@ const struct bpf_map_ops htab_map_ops = {
|
||||
.map_delete_elem = htab_map_delete_elem,
|
||||
.map_gen_lookup = htab_map_gen_lookup,
|
||||
.map_seq_show_elem = htab_map_seq_show_elem,
|
||||
.map_check_btf = htab_map_check_btf,
|
||||
};
|
||||
|
||||
const struct bpf_map_ops htab_lru_map_ops = {
|
||||
@ -1225,7 +1207,6 @@ const struct bpf_map_ops htab_lru_map_ops = {
|
||||
.map_delete_elem = htab_lru_map_delete_elem,
|
||||
.map_gen_lookup = htab_lru_map_gen_lookup,
|
||||
.map_seq_show_elem = htab_map_seq_show_elem,
|
||||
.map_check_btf = htab_map_check_btf,
|
||||
};
|
||||
|
||||
/* Called from eBPF program */
|
||||
@ -1452,4 +1433,5 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
|
||||
.map_fd_put_ptr = bpf_map_fd_put_ptr,
|
||||
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
|
||||
.map_gen_lookup = htab_of_map_gen_lookup,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
@ -334,7 +334,8 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
|
||||
struct bpf_map *map = arg;
|
||||
|
||||
return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
|
||||
map->btf ? &bpffs_map_fops : &bpffs_obj_fops);
|
||||
bpf_map_support_seq_show(map) ?
|
||||
&bpffs_map_fops : &bpffs_obj_fops);
|
||||
}
|
||||
|
||||
static struct dentry *
|
||||
|
@ -246,6 +246,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
|
||||
.map_lookup_elem = cgroup_storage_lookup_elem,
|
||||
.map_update_elem = cgroup_storage_update_elem,
|
||||
.map_delete_elem = cgroup_storage_delete_elem,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
|
||||
|
@ -10,11 +10,13 @@
|
||||
*/
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <uapi/linux/btf.h>
|
||||
|
||||
/* Intermediate node */
|
||||
#define LPM_TREE_NODE_FLAG_IM BIT(0)
|
||||
@ -686,6 +688,15 @@ free_stack:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int trie_check_btf(const struct bpf_map *map,
|
||||
const struct btf_type *key_type,
|
||||
const struct btf_type *value_type)
|
||||
{
|
||||
/* Keys must have struct bpf_lpm_trie_key embedded. */
|
||||
return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ?
|
||||
-EINVAL : 0;
|
||||
}
|
||||
|
||||
const struct bpf_map_ops trie_map_ops = {
|
||||
.map_alloc = trie_alloc,
|
||||
.map_free = trie_free,
|
||||
@ -693,4 +704,5 @@ const struct bpf_map_ops trie_map_ops = {
|
||||
.map_lookup_elem = trie_lookup_elem,
|
||||
.map_update_elem = trie_update_elem,
|
||||
.map_delete_elem = trie_delete_elem,
|
||||
.map_check_btf = trie_check_btf,
|
||||
};
|
||||
|
@ -2495,6 +2495,7 @@ const struct bpf_map_ops sock_map_ops = {
|
||||
.map_update_elem = sock_map_update_elem,
|
||||
.map_delete_elem = sock_map_delete_elem,
|
||||
.map_release_uref = sock_map_release,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
const struct bpf_map_ops sock_hash_ops = {
|
||||
@ -2505,6 +2506,7 @@ const struct bpf_map_ops sock_hash_ops = {
|
||||
.map_update_elem = sock_hash_update_elem,
|
||||
.map_delete_elem = sock_hash_delete_elem,
|
||||
.map_release_uref = sock_map_release,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
|
@ -607,6 +607,7 @@ const struct bpf_map_ops stack_map_ops = {
|
||||
.map_lookup_elem = stack_map_lookup_elem,
|
||||
.map_update_elem = stack_map_update_elem,
|
||||
.map_delete_elem = stack_map_delete_elem,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
static int __init stack_map_init(void)
|
||||
|
@ -103,6 +103,7 @@ int bpf_check_uarg_tail_zero(void __user *uaddr,
|
||||
const struct bpf_map_ops bpf_map_offload_ops = {
|
||||
.map_alloc = bpf_map_offload_map_alloc,
|
||||
.map_free = bpf_map_offload_map_free,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
|
||||
@ -455,6 +456,34 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int map_check_no_btf(const struct bpf_map *map,
|
||||
const struct btf_type *key_type,
|
||||
const struct btf_type *value_type)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
|
||||
u32 btf_key_id, u32 btf_value_id)
|
||||
{
|
||||
const struct btf_type *key_type, *value_type;
|
||||
u32 key_size, value_size;
|
||||
int ret = 0;
|
||||
|
||||
key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
|
||||
if (!key_type || key_size != map->key_size)
|
||||
return -EINVAL;
|
||||
|
||||
value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
|
||||
if (!value_type || value_size != map->value_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (map->ops->map_check_btf)
|
||||
ret = map->ops->map_check_btf(map, key_type, value_type);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id
|
||||
/* called via syscall */
|
||||
static int map_create(union bpf_attr *attr)
|
||||
@ -489,8 +518,7 @@ static int map_create(union bpf_attr *attr)
|
||||
atomic_set(&map->refcnt, 1);
|
||||
atomic_set(&map->usercnt, 1);
|
||||
|
||||
if (bpf_map_support_seq_show(map) &&
|
||||
(attr->btf_key_type_id || attr->btf_value_type_id)) {
|
||||
if (attr->btf_key_type_id || attr->btf_value_type_id) {
|
||||
struct btf *btf;
|
||||
|
||||
if (!attr->btf_key_type_id || !attr->btf_value_type_id) {
|
||||
@ -504,7 +532,7 @@ static int map_create(union bpf_attr *attr)
|
||||
goto free_map_nouncharge;
|
||||
}
|
||||
|
||||
err = map->ops->map_check_btf(map, btf, attr->btf_key_type_id,
|
||||
err = map_check_btf(map, btf, attr->btf_key_type_id,
|
||||
attr->btf_value_type_id);
|
||||
if (err) {
|
||||
btf_put(btf);
|
||||
|
@ -227,6 +227,5 @@ const struct bpf_map_ops xsk_map_ops = {
|
||||
.map_lookup_elem = xsk_map_lookup_elem,
|
||||
.map_update_elem = xsk_map_update_elem,
|
||||
.map_delete_elem = xsk_map_delete_elem,
|
||||
.map_check_btf = map_check_no_btf,
|
||||
};
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user