libbpf: Add "map_extra" as a per-map-type extra flag
This patch adds the libbpf infrastructure for supporting a per-map-type "map_extra" field, whose definition will be idiosyncratic depending on map type. For example, for the bloom filter map, the lower 4 bits of map_extra is used to denote the number of hash functions. Please note that until libbpf 1.0 is here, the "bpf_create_map_params" struct is used as a temporary means for propagating the map_extra field to the kernel. Signed-off-by: Joanne Koong <joannekoong@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20211027234504.30744-3-joannekoong@fb.com
This commit is contained in:
committed by
Alexei Starovoitov
parent
9330986c03
commit
47512102cd
@@ -400,6 +400,7 @@ struct bpf_map {
|
||||
char *pin_path;
|
||||
bool pinned;
|
||||
bool reused;
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
enum extern_type {
|
||||
@@ -2324,6 +2325,13 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
|
||||
}
|
||||
map_def->pinning = val;
|
||||
map_def->parts |= MAP_DEF_PINNING;
|
||||
} else if (strcmp(name, "map_extra") == 0) {
|
||||
__u32 map_extra;
|
||||
|
||||
if (!get_map_field_int(map_name, btf, m, &map_extra))
|
||||
return -EINVAL;
|
||||
map_def->map_extra = map_extra;
|
||||
map_def->parts |= MAP_DEF_MAP_EXTRA;
|
||||
} else {
|
||||
if (strict) {
|
||||
pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
|
||||
@@ -2348,6 +2356,7 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
|
||||
map->def.value_size = def->value_size;
|
||||
map->def.max_entries = def->max_entries;
|
||||
map->def.map_flags = def->map_flags;
|
||||
map->map_extra = def->map_extra;
|
||||
|
||||
map->numa_node = def->numa_node;
|
||||
map->btf_key_type_id = def->key_type_id;
|
||||
@@ -2371,7 +2380,10 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
|
||||
if (def->parts & MAP_DEF_MAX_ENTRIES)
|
||||
pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
|
||||
if (def->parts & MAP_DEF_MAP_FLAGS)
|
||||
pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags);
|
||||
pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
|
||||
if (def->parts & MAP_DEF_MAP_EXTRA)
|
||||
pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
|
||||
(unsigned long long)def->map_extra);
|
||||
if (def->parts & MAP_DEF_PINNING)
|
||||
pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
|
||||
if (def->parts & MAP_DEF_NUMA_NODE)
|
||||
@@ -4210,6 +4222,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
|
||||
map->btf_key_type_id = info.btf_key_type_id;
|
||||
map->btf_value_type_id = info.btf_value_type_id;
|
||||
map->reused = true;
|
||||
map->map_extra = info.map_extra;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -4724,7 +4737,8 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
|
||||
map_info.key_size == map->def.key_size &&
|
||||
map_info.value_size == map->def.value_size &&
|
||||
map_info.max_entries == map->def.max_entries &&
|
||||
map_info.map_flags == map->def.map_flags);
|
||||
map_info.map_flags == map->def.map_flags &&
|
||||
map_info.map_extra == map->map_extra);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -4807,7 +4821,7 @@ static void bpf_map__destroy(struct bpf_map *map);
|
||||
|
||||
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
|
||||
{
|
||||
struct bpf_create_map_attr create_attr;
|
||||
struct bpf_create_map_params create_attr;
|
||||
struct bpf_map_def *def = &map->def;
|
||||
int err = 0;
|
||||
|
||||
@@ -4821,6 +4835,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
||||
create_attr.key_size = def->key_size;
|
||||
create_attr.value_size = def->value_size;
|
||||
create_attr.numa_node = map->numa_node;
|
||||
create_attr.map_extra = map->map_extra;
|
||||
|
||||
if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
|
||||
int nr_cpus;
|
||||
@@ -4895,7 +4910,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
||||
*/
|
||||
map->fd = 0;
|
||||
} else {
|
||||
map->fd = bpf_create_map_xattr(&create_attr);
|
||||
map->fd = libbpf__bpf_create_map_xattr(&create_attr);
|
||||
}
|
||||
if (map->fd < 0 && (create_attr.btf_key_type_id ||
|
||||
create_attr.btf_value_type_id)) {
|
||||
@@ -4910,7 +4925,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
|
||||
create_attr.btf_value_type_id = 0;
|
||||
map->btf_key_type_id = 0;
|
||||
map->btf_value_type_id = 0;
|
||||
map->fd = bpf_create_map_xattr(&create_attr);
|
||||
map->fd = libbpf__bpf_create_map_xattr(&create_attr);
|
||||
}
|
||||
|
||||
err = map->fd < 0 ? -errno : 0;
|
||||
@@ -8880,6 +8895,19 @@ int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u64 bpf_map__map_extra(const struct bpf_map *map)
|
||||
{
|
||||
return map->map_extra;
|
||||
}
|
||||
|
||||
int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
|
||||
{
|
||||
if (map->fd >= 0)
|
||||
return libbpf_err(-EBUSY);
|
||||
map->map_extra = map_extra;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u32 bpf_map__numa_node(const struct bpf_map *map)
|
||||
{
|
||||
return map->numa_node;
|
||||
|
||||
Reference in New Issue
Block a user