forked from Minki/linux
libbpf: Add "map_extra" as a per-map-type extra flag
This patch adds the libbpf infrastructure for supporting a per-map-type "map_extra" field, whose definition will be idiosyncratic depending on map type. For example, for the bloom filter map, the lower 4 bits of map_extra is used to denote the number of hash functions. Please note that until libbpf 1.0 is here, the "bpf_create_map_params" struct is used as a temporary means for propagating the map_extra field to the kernel. Signed-off-by: Joanne Koong <joannekoong@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20211027234504.30744-3-joannekoong@fb.com
This commit is contained in:
parent
9330986c03
commit
47512102cd
@ -77,7 +77,7 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
|
||||
return fd;
|
||||
}
|
||||
|
||||
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
||||
int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
@ -102,11 +102,36 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
||||
create_attr->btf_vmlinux_value_type_id;
|
||||
else
|
||||
attr.inner_map_fd = create_attr->inner_map_fd;
|
||||
attr.map_extra = create_attr->map_extra;
|
||||
|
||||
fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
||||
{
|
||||
struct bpf_create_map_params p = {};
|
||||
|
||||
p.map_type = create_attr->map_type;
|
||||
p.key_size = create_attr->key_size;
|
||||
p.value_size = create_attr->value_size;
|
||||
p.max_entries = create_attr->max_entries;
|
||||
p.map_flags = create_attr->map_flags;
|
||||
p.name = create_attr->name;
|
||||
p.numa_node = create_attr->numa_node;
|
||||
p.btf_fd = create_attr->btf_fd;
|
||||
p.btf_key_type_id = create_attr->btf_key_type_id;
|
||||
p.btf_value_type_id = create_attr->btf_value_type_id;
|
||||
p.map_ifindex = create_attr->map_ifindex;
|
||||
if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS)
|
||||
p.btf_vmlinux_value_type_id =
|
||||
create_attr->btf_vmlinux_value_type_id;
|
||||
else
|
||||
p.inner_map_fd = create_attr->inner_map_fd;
|
||||
|
||||
return libbpf__bpf_create_map_xattr(&p);
|
||||
}
|
||||
|
||||
int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size, int max_entries,
|
||||
__u32 map_flags, int node)
|
||||
|
@ -43,7 +43,7 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level);
|
||||
int bpf_gen__finish(struct bpf_gen *gen);
|
||||
void bpf_gen__free(struct bpf_gen *gen);
|
||||
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
|
||||
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx);
|
||||
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
|
||||
struct bpf_prog_load_params;
|
||||
void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
|
||||
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
|
||||
|
@ -431,7 +431,7 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
|
||||
}
|
||||
|
||||
void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
struct bpf_create_map_attr *map_attr, int map_idx)
|
||||
struct bpf_create_map_params *map_attr, int map_idx)
|
||||
{
|
||||
int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
|
||||
bool close_inner_map_fd = false;
|
||||
@ -443,6 +443,7 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
attr.key_size = map_attr->key_size;
|
||||
attr.value_size = map_attr->value_size;
|
||||
attr.map_flags = map_attr->map_flags;
|
||||
attr.map_extra = map_attr->map_extra;
|
||||
memcpy(attr.map_name, map_attr->name,
|
||||
min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
|
||||
attr.numa_node = map_attr->numa_node;
|
||||
|
@ -400,6 +400,7 @@ struct bpf_map {
|
||||
char *pin_path;
|
||||
bool pinned;
|
||||
bool reused;
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
enum extern_type {
|
||||
@ -2324,6 +2325,13 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
|
||||
}
|
||||
map_def->pinning = val;
|
||||
map_def->parts |= MAP_DEF_PINNING;
|
||||
} else if (strcmp(name, "map_extra") == 0) {
|
||||
__u32 map_extra;
|
||||
|
||||
if (!get_map_field_int(map_name, btf, m, &map_extra))
|
||||
return -EINVAL;
|
||||
map_def->map_extra = map_extra;
|
||||
map_def->parts |= MAP_DEF_MAP_EXTRA;
|
||||
} else {
|
||||
if (strict) {
|
||||
pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
|
||||
@ -2348,6 +2356,7 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
|
||||
map->def.value_size = def->value_size;
|
||||
map->def.max_entries = def->max_entries;
|
||||
map->def.map_flags = def->map_flags;
|
||||
map->map_extra = def->map_extra;
|
||||
|
||||
map->numa_node = def->numa_node;
|
||||
map->btf_key_type_id = def->key_type_id;
|
||||
@ -2371,7 +2380,10 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
|
||||
if (def->parts & MAP_DEF_MAX_ENTRIES)
|
||||
pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
|
||||
if (def->parts & MAP_DEF_MAP_FLAGS)
|
||||
pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags);
|
||||
pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
|
||||
if (def->parts & MAP_DEF_MAP_EXTRA)
|
||||
pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
|
||||
(unsigned long long)def->map_extra);
|
||||
if (def->parts & MAP_DEF_PINNING)
|
||||
pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
|
||||
if (def->parts & MAP_DEF_NUMA_NODE)
|
||||
@ -4210,6 +4222,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
|
||||
map->btf_key_type_id = info.btf_key_type_id;
|
||||
map->btf_value_type_id = info.btf_value_type_id;
|
||||
map->reused = true;
|
||||
map->map_extra = info.map_extra;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -4724,7 +4737,8 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
|
||||
map_info.key_size == map->def.key_size &&
|
||||
map_info.value_size == map->def.value_size &&
|
||||
map_info.max_entries == map->def.max_entries &&
|
||||
map_info.map_flags == map->def.map_flags);
|
||||
map_info.map_flags == map->def.map_flags &&
|
||||
map_info.map_extra == map->map_extra);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -4807,7 +4821,7 @@ static void bpf_map__destroy(struct bpf_map *map);
|
||||
|
||||
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
|
||||
{
|
||||
struct bpf_create_map_attr create_attr;
|
||||
struct bpf_create_map_params create_attr;
|
||||
struct bpf_map_def *def = &map->def;
|
||||
int err = 0;
|
||||
|
||||
@ -4821,6 +4835,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
||||
create_attr.key_size = def->key_size;
|
||||
create_attr.value_size = def->value_size;
|
||||
create_attr.numa_node = map->numa_node;
|
||||
create_attr.map_extra = map->map_extra;
|
||||
|
||||
if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
|
||||
int nr_cpus;
|
||||
@ -4895,7 +4910,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
||||
*/
|
||||
map->fd = 0;
|
||||
} else {
|
||||
map->fd = bpf_create_map_xattr(&create_attr);
|
||||
map->fd = libbpf__bpf_create_map_xattr(&create_attr);
|
||||
}
|
||||
if (map->fd < 0 && (create_attr.btf_key_type_id ||
|
||||
create_attr.btf_value_type_id)) {
|
||||
@ -4910,7 +4925,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
||||
create_attr.btf_value_type_id = 0;
|
||||
map->btf_key_type_id = 0;
|
||||
map->btf_value_type_id = 0;
|
||||
map->fd = bpf_create_map_xattr(&create_attr);
|
||||
map->fd = libbpf__bpf_create_map_xattr(&create_attr);
|
||||
}
|
||||
|
||||
err = map->fd < 0 ? -errno : 0;
|
||||
@ -8880,6 +8895,19 @@ int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u64 bpf_map__map_extra(const struct bpf_map *map)
|
||||
{
|
||||
return map->map_extra;
|
||||
}
|
||||
|
||||
int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
|
||||
{
|
||||
if (map->fd >= 0)
|
||||
return libbpf_err(-EBUSY);
|
||||
map->map_extra = map_extra;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u32 bpf_map__numa_node(const struct bpf_map *map)
|
||||
{
|
||||
return map->numa_node;
|
||||
|
@ -600,6 +600,9 @@ LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
|
||||
/* get/set map if_index */
|
||||
LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
|
||||
/* get/set map map_extra flags */
|
||||
LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
|
||||
|
||||
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
|
||||
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
|
||||
|
@ -389,6 +389,8 @@ LIBBPF_0.5.0 {
|
||||
|
||||
LIBBPF_0.6.0 {
|
||||
global:
|
||||
bpf_map__map_extra;
|
||||
bpf_map__set_map_extra;
|
||||
bpf_object__next_map;
|
||||
bpf_object__next_program;
|
||||
bpf_object__prev_map;
|
||||
|
@ -193,8 +193,9 @@ enum map_def_parts {
|
||||
MAP_DEF_NUMA_NODE = 0x080,
|
||||
MAP_DEF_PINNING = 0x100,
|
||||
MAP_DEF_INNER_MAP = 0x200,
|
||||
MAP_DEF_MAP_EXTRA = 0x400,
|
||||
|
||||
MAP_DEF_ALL = 0x3ff, /* combination of all above */
|
||||
MAP_DEF_ALL = 0x7ff, /* combination of all above */
|
||||
};
|
||||
|
||||
struct btf_map_def {
|
||||
@ -208,6 +209,7 @@ struct btf_map_def {
|
||||
__u32 map_flags;
|
||||
__u32 numa_node;
|
||||
__u32 pinning;
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
int parse_btf_map_def(const char *map_name, struct btf *btf,
|
||||
@ -303,6 +305,27 @@ struct bpf_prog_load_params {
|
||||
|
||||
int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
|
||||
|
||||
struct bpf_create_map_params {
|
||||
const char *name;
|
||||
enum bpf_map_type map_type;
|
||||
__u32 map_flags;
|
||||
__u32 key_size;
|
||||
__u32 value_size;
|
||||
__u32 max_entries;
|
||||
__u32 numa_node;
|
||||
__u32 btf_fd;
|
||||
__u32 btf_key_type_id;
|
||||
__u32 btf_value_type_id;
|
||||
__u32 map_ifindex;
|
||||
union {
|
||||
__u32 inner_map_fd;
|
||||
__u32 btf_vmlinux_value_type_id;
|
||||
};
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr);
|
||||
|
||||
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
|
||||
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
|
||||
const char **prefix, int *kind);
|
||||
|
Loading…
Reference in New Issue
Block a user