libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generic non-thread safe hash map implementation.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2019 Facebook
|
|
|
|
*/
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include "hashmap.h"
|
|
|
|
|
libbpf: Poison kernel-only integer types
It's been a recurring issue with types like u32 slipping into libbpf source
code accidentally. This is not detected during builds inside kernel source
tree, but becomes a compilation error in libbpf's Github repo. Libbpf is
supposed to use only __{s,u}{8,16,32,64} typedefs, so poison {s,u}{8,16,32,64}
explicitly in every .c file. Doing that in a bit more centralized way, e.g.,
inside libbpf_internal.h breaks selftests, which are both using kernel u32 and
libbpf_internal.h.
This patch also fixes a new u32 occurence in libbpf.c, added recently.
Fixes: 590a00888250 ("bpf: libbpf: Add STRUCT_OPS support")
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20200110181916.271446-1-andriin@fb.com
2020-01-10 18:19:16 +00:00
|
|
|
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
|
|
|
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
|
|
|
|
2020-08-19 01:36:06 +00:00
|
|
|
/* prevent accidental re-addition of reallocarray() */
|
|
|
|
#pragma GCC poison reallocarray
|
|
|
|
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
/* start with 4 buckets */
|
|
|
|
#define HASHMAP_MIN_CAP_BITS 2
|
|
|
|
|
|
|
|
static void hashmap_add_entry(struct hashmap_entry **pprev,
|
|
|
|
struct hashmap_entry *entry)
|
|
|
|
{
|
|
|
|
entry->next = *pprev;
|
|
|
|
*pprev = entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hashmap_del_entry(struct hashmap_entry **pprev,
|
|
|
|
struct hashmap_entry *entry)
|
|
|
|
{
|
|
|
|
*pprev = entry->next;
|
|
|
|
entry->next = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
|
|
|
|
hashmap_equal_fn equal_fn, void *ctx)
|
|
|
|
{
|
|
|
|
map->hash_fn = hash_fn;
|
|
|
|
map->equal_fn = equal_fn;
|
|
|
|
map->ctx = ctx;
|
|
|
|
|
|
|
|
map->buckets = NULL;
|
|
|
|
map->cap = 0;
|
|
|
|
map->cap_bits = 0;
|
|
|
|
map->sz = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
|
|
|
|
hashmap_equal_fn equal_fn,
|
|
|
|
void *ctx)
|
|
|
|
{
|
|
|
|
struct hashmap *map = malloc(sizeof(struct hashmap));
|
|
|
|
|
|
|
|
if (!map)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
hashmap__init(map, hash_fn, equal_fn, ctx);
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hashmap__clear(struct hashmap *map)
|
|
|
|
{
|
2020-04-29 01:21:04 +00:00
|
|
|
struct hashmap_entry *cur, *tmp;
|
2020-05-15 16:50:03 +00:00
|
|
|
size_t bkt;
|
2020-04-29 01:21:04 +00:00
|
|
|
|
|
|
|
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
|
|
|
|
free(cur);
|
|
|
|
}
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
free(map->buckets);
|
2020-04-29 01:21:04 +00:00
|
|
|
map->buckets = NULL;
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
map->cap = map->cap_bits = map->sz = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hashmap__free(struct hashmap *map)
|
|
|
|
{
|
2022-01-07 15:26:19 +00:00
|
|
|
if (IS_ERR_OR_NULL(map))
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
hashmap__clear(map);
|
|
|
|
free(map);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t hashmap__size(const struct hashmap *map)
|
|
|
|
{
|
|
|
|
return map->sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t hashmap__capacity(const struct hashmap *map)
|
|
|
|
{
|
|
|
|
return map->cap;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool hashmap_needs_to_grow(struct hashmap *map)
|
|
|
|
{
|
|
|
|
/* grow if empty or more than 75% filled */
|
|
|
|
return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hashmap_grow(struct hashmap *map)
|
|
|
|
{
|
|
|
|
struct hashmap_entry **new_buckets;
|
|
|
|
struct hashmap_entry *cur, *tmp;
|
|
|
|
size_t new_cap_bits, new_cap;
|
2020-05-15 16:50:03 +00:00
|
|
|
size_t h, bkt;
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
|
|
|
|
new_cap_bits = map->cap_bits + 1;
|
|
|
|
if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
|
|
|
|
new_cap_bits = HASHMAP_MIN_CAP_BITS;
|
|
|
|
|
|
|
|
new_cap = 1UL << new_cap_bits;
|
|
|
|
new_buckets = calloc(new_cap, sizeof(new_buckets[0]));
|
|
|
|
if (!new_buckets)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
|
|
|
|
h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
|
|
|
|
hashmap_add_entry(&new_buckets[h], cur);
|
|
|
|
}
|
|
|
|
|
|
|
|
map->cap = new_cap;
|
|
|
|
map->cap_bits = new_cap_bits;
|
|
|
|
free(map->buckets);
|
|
|
|
map->buckets = new_buckets;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool hashmap_find_entry(const struct hashmap *map,
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 14:26:09 +00:00
|
|
|
const long key, size_t hash,
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
struct hashmap_entry ***pprev,
|
|
|
|
struct hashmap_entry **entry)
|
|
|
|
{
|
|
|
|
struct hashmap_entry *cur, **prev_ptr;
|
|
|
|
|
|
|
|
if (!map->buckets)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
|
|
|
|
cur;
|
|
|
|
prev_ptr = &cur->next, cur = cur->next) {
|
|
|
|
if (map->equal_fn(cur->key, key, map->ctx)) {
|
|
|
|
if (pprev)
|
|
|
|
*pprev = prev_ptr;
|
|
|
|
*entry = cur;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 14:26:09 +00:00
|
|
|
int hashmap_insert(struct hashmap *map, long key, long value,
|
|
|
|
enum hashmap_insert_strategy strategy,
|
|
|
|
long *old_key, long *old_value)
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
{
|
|
|
|
struct hashmap_entry *entry;
|
|
|
|
size_t h;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (old_key)
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 14:26:09 +00:00
|
|
|
*old_key = 0;
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
if (old_value)
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 14:26:09 +00:00
|
|
|
*old_value = 0;
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
|
|
if (strategy != HASHMAP_APPEND &&
|
|
|
|
hashmap_find_entry(map, key, h, NULL, &entry)) {
|
|
|
|
if (old_key)
|
|
|
|
*old_key = entry->key;
|
|
|
|
if (old_value)
|
|
|
|
*old_value = entry->value;
|
|
|
|
|
|
|
|
if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) {
|
|
|
|
entry->key = key;
|
|
|
|
entry->value = value;
|
|
|
|
return 0;
|
|
|
|
} else if (strategy == HASHMAP_ADD) {
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strategy == HASHMAP_UPDATE)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (hashmap_needs_to_grow(map)) {
|
|
|
|
err = hashmap_grow(map);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = malloc(sizeof(struct hashmap_entry));
|
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
entry->key = key;
|
|
|
|
entry->value = value;
|
|
|
|
hashmap_add_entry(&map->buckets[h], entry);
|
|
|
|
map->sz++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 14:26:09 +00:00
|
|
|
bool hashmap_find(const struct hashmap *map, long key, long *value)
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
{
|
|
|
|
struct hashmap_entry *entry;
|
|
|
|
size_t h;
|
|
|
|
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
|
|
if (!hashmap_find_entry(map, key, h, NULL, &entry))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (value)
|
|
|
|
*value = entry->value;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
libbpf: Hashmap interface update to allow both long and void* keys/values
An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.
This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.
Perf copies hashmap implementation from libbpf and has to be
updated as well.
Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.
Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:
#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})
bool hashmap_find(const struct hashmap *map, long key, long *value);
#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.
This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].
[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 14:26:09 +00:00
|
|
|
bool hashmap_delete(struct hashmap *map, long key,
|
|
|
|
long *old_key, long *old_value)
|
libbpf: add resizable non-thread safe internal hashmap
There is a need for fast point lookups inside libbpf for multiple use
cases (e.g., name resolution for BTF-to-C conversion, by-name lookups in
BTF for upcoming BPF CO-RE relocation support, etc). This patch
implements simple resizable non-thread safe hashmap using single linked
list chains.
Four different insert strategies are supported:
- HASHMAP_ADD - only add key/value if key doesn't exist yet;
- HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
update value;
- HASHMAP_UPDATE - update value, if key already exists; otherwise, do
nothing and return -ENOENT;
- HASHMAP_APPEND - always add key/value pair, even if key already exists.
This turns hashmap into a multimap by allowing multiple values to be
associated with the same key. Most useful read API for such hashmap is
hashmap__for_each_key_entry() iteration. If hashmap__find() is still
used, it will return last inserted key/value entry (first in a bucket
chain).
For HASHMAP_SET and HASHMAP_UPDATE, old key/value pair is returned, so
that calling code can handle proper memory management, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-05-24 18:59:00 +00:00
|
|
|
{
|
|
|
|
struct hashmap_entry **pprev, *entry;
|
|
|
|
size_t h;
|
|
|
|
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
|
|
if (!hashmap_find_entry(map, key, h, &pprev, &entry))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (old_key)
|
|
|
|
*old_key = entry->key;
|
|
|
|
if (old_value)
|
|
|
|
*old_value = entry->value;
|
|
|
|
|
|
|
|
hashmap_del_entry(pprev, entry);
|
|
|
|
free(entry);
|
|
|
|
map->sz--;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|