mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 15:11:31 +00:00
bpf: hashtab: move checks out of alloc function
Use the new callback to perform allocation checks for hash maps. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
daffc5a2e6
commit
9328e0d1bc
@ -227,6 +227,70 @@ static int alloc_extra_elems(struct bpf_htab *htab)
|
||||
}
|
||||
|
||||
/* Called from syscall */
|
||||
static int htab_map_alloc_check(union bpf_attr *attr)
|
||||
{
|
||||
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
|
||||
bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
|
||||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
|
||||
/* percpu_lru means each cpu has its own LRU list.
|
||||
* it is different from BPF_MAP_TYPE_PERCPU_HASH where
|
||||
* the map's value itself is percpu. percpu_lru has
|
||||
* nothing to do with the map's value.
|
||||
*/
|
||||
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
|
||||
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
|
||||
offsetof(struct htab_elem, hash_node.pprev));
|
||||
BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
|
||||
offsetof(struct htab_elem, hash_node.pprev));
|
||||
|
||||
if (lru && !capable(CAP_SYS_ADMIN))
|
||||
/* LRU implementation is much complicated than other
|
||||
* maps. Hence, limit to CAP_SYS_ADMIN for now.
|
||||
*/
|
||||
return -EPERM;
|
||||
|
||||
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
|
||||
/* reserved bits should not be used */
|
||||
return -EINVAL;
|
||||
|
||||
if (!lru && percpu_lru)
|
||||
return -EINVAL;
|
||||
|
||||
if (lru && !prealloc)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
|
||||
return -EINVAL;
|
||||
|
||||
/* check sanity of attributes.
|
||||
* value_size == 0 may be allowed in the future to use map as a set
|
||||
*/
|
||||
if (attr->max_entries == 0 || attr->key_size == 0 ||
|
||||
attr->value_size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->key_size > MAX_BPF_STACK)
|
||||
/* eBPF programs initialize keys on stack, so they cannot be
|
||||
* larger than max stack size
|
||||
*/
|
||||
return -E2BIG;
|
||||
|
||||
if (attr->value_size >= KMALLOC_MAX_SIZE -
|
||||
MAX_BPF_STACK - sizeof(struct htab_elem))
|
||||
/* if value_size is bigger, the user space won't be able to
|
||||
* access the elements via bpf syscall. This check also makes
|
||||
* sure that the elem_size doesn't overflow and it's
|
||||
* kmalloc-able later in htab_map_update_elem()
|
||||
*/
|
||||
return -E2BIG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
@ -245,52 +309,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||
int err, i;
|
||||
u64 cost;
|
||||
|
||||
BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
|
||||
offsetof(struct htab_elem, hash_node.pprev));
|
||||
BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
|
||||
offsetof(struct htab_elem, hash_node.pprev));
|
||||
|
||||
if (lru && !capable(CAP_SYS_ADMIN))
|
||||
/* LRU implementation is much complicated than other
|
||||
* maps. Hence, limit to CAP_SYS_ADMIN for now.
|
||||
*/
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
|
||||
/* reserved bits should not be used */
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!lru && percpu_lru)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (lru && !prealloc)
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
|
||||
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* check sanity of attributes.
|
||||
* value_size == 0 may be allowed in the future to use map as a set
|
||||
*/
|
||||
if (attr->max_entries == 0 || attr->key_size == 0 ||
|
||||
attr->value_size == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (attr->key_size > MAX_BPF_STACK)
|
||||
/* eBPF programs initialize keys on stack, so they cannot be
|
||||
* larger than max stack size
|
||||
*/
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
if (attr->value_size >= KMALLOC_MAX_SIZE -
|
||||
MAX_BPF_STACK - sizeof(struct htab_elem))
|
||||
/* if value_size is bigger, the user space won't be able to
|
||||
* access the elements via bpf syscall. This check also makes
|
||||
* sure that the elem_size doesn't overflow and it's
|
||||
* kmalloc-able later in htab_map_update_elem()
|
||||
*/
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
htab = kzalloc(sizeof(*htab), GFP_USER);
|
||||
if (!htab)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -1142,6 +1160,7 @@ static void htab_map_free(struct bpf_map *map)
|
||||
}
|
||||
|
||||
const struct bpf_map_ops htab_map_ops = {
|
||||
.map_alloc_check = htab_map_alloc_check,
|
||||
.map_alloc = htab_map_alloc,
|
||||
.map_free = htab_map_free,
|
||||
.map_get_next_key = htab_map_get_next_key,
|
||||
@ -1152,6 +1171,7 @@ const struct bpf_map_ops htab_map_ops = {
|
||||
};
|
||||
|
||||
const struct bpf_map_ops htab_lru_map_ops = {
|
||||
.map_alloc_check = htab_map_alloc_check,
|
||||
.map_alloc = htab_map_alloc,
|
||||
.map_free = htab_map_free,
|
||||
.map_get_next_key = htab_map_get_next_key,
|
||||
@ -1235,6 +1255,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
|
||||
}
|
||||
|
||||
const struct bpf_map_ops htab_percpu_map_ops = {
|
||||
.map_alloc_check = htab_map_alloc_check,
|
||||
.map_alloc = htab_map_alloc,
|
||||
.map_free = htab_map_free,
|
||||
.map_get_next_key = htab_map_get_next_key,
|
||||
@ -1244,6 +1265,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
|
||||
};
|
||||
|
||||
const struct bpf_map_ops htab_lru_percpu_map_ops = {
|
||||
.map_alloc_check = htab_map_alloc_check,
|
||||
.map_alloc = htab_map_alloc,
|
||||
.map_free = htab_map_free,
|
||||
.map_get_next_key = htab_map_get_next_key,
|
||||
@ -1252,11 +1274,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
|
||||
.map_delete_elem = htab_lru_map_delete_elem,
|
||||
};
|
||||
|
||||
static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
|
||||
static int fd_htab_map_alloc_check(union bpf_attr *attr)
|
||||
{
|
||||
if (attr->value_size != sizeof(u32))
|
||||
return ERR_PTR(-EINVAL);
|
||||
return htab_map_alloc(attr);
|
||||
return -EINVAL;
|
||||
return htab_map_alloc_check(attr);
|
||||
}
|
||||
|
||||
static void fd_htab_map_free(struct bpf_map *map)
|
||||
@ -1327,7 +1349,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
|
||||
if (IS_ERR(inner_map_meta))
|
||||
return inner_map_meta;
|
||||
|
||||
map = fd_htab_map_alloc(attr);
|
||||
map = htab_map_alloc(attr);
|
||||
if (IS_ERR(map)) {
|
||||
bpf_map_meta_free(inner_map_meta);
|
||||
return map;
|
||||
@ -1371,6 +1393,7 @@ static void htab_of_map_free(struct bpf_map *map)
|
||||
}
|
||||
|
||||
const struct bpf_map_ops htab_of_maps_map_ops = {
|
||||
.map_alloc_check = fd_htab_map_alloc_check,
|
||||
.map_alloc = htab_of_map_alloc,
|
||||
.map_free = htab_of_map_free,
|
||||
.map_get_next_key = htab_map_get_next_key,
|
||||
|
Loading…
Reference in New Issue
Block a user