bpf: allocate cgroup storage entries on attaching bpf programs
If a bpf program is using cgroup local storage, allocate a bpf_cgroup_storage structure automatically on attaching the program to a cgroup and save the pointer into the corresponding bpf_prog_list entry. Analogically, release the cgroup local storage on detaching of the bpf program. Signed-off-by: Roman Gushchin <guro@fb.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
parent
aa0ad5b039
commit
d7bf2c10af
@ -43,6 +43,7 @@ struct bpf_cgroup_storage {
|
|||||||
struct bpf_prog_list {
|
struct bpf_prog_list {
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
struct bpf_prog *prog;
|
struct bpf_prog *prog;
|
||||||
|
struct bpf_cgroup_storage *storage;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bpf_prog_array;
|
struct bpf_prog_array;
|
||||||
|
@ -34,6 +34,8 @@ void cgroup_bpf_put(struct cgroup *cgrp)
|
|||||||
list_for_each_entry_safe(pl, tmp, progs, node) {
|
list_for_each_entry_safe(pl, tmp, progs, node) {
|
||||||
list_del(&pl->node);
|
list_del(&pl->node);
|
||||||
bpf_prog_put(pl->prog);
|
bpf_prog_put(pl->prog);
|
||||||
|
bpf_cgroup_storage_unlink(pl->storage);
|
||||||
|
bpf_cgroup_storage_free(pl->storage);
|
||||||
kfree(pl);
|
kfree(pl);
|
||||||
static_branch_dec(&cgroup_bpf_enabled_key);
|
static_branch_dec(&cgroup_bpf_enabled_key);
|
||||||
}
|
}
|
||||||
@ -188,6 +190,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||||||
{
|
{
|
||||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||||
struct bpf_prog *old_prog = NULL;
|
struct bpf_prog *old_prog = NULL;
|
||||||
|
struct bpf_cgroup_storage *storage, *old_storage = NULL;
|
||||||
struct cgroup_subsys_state *css;
|
struct cgroup_subsys_state *css;
|
||||||
struct bpf_prog_list *pl;
|
struct bpf_prog_list *pl;
|
||||||
bool pl_was_allocated;
|
bool pl_was_allocated;
|
||||||
@ -210,31 +213,47 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||||||
if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
|
if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
|
|
||||||
|
storage = bpf_cgroup_storage_alloc(prog);
|
||||||
|
if (IS_ERR(storage))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
if (flags & BPF_F_ALLOW_MULTI) {
|
if (flags & BPF_F_ALLOW_MULTI) {
|
||||||
list_for_each_entry(pl, progs, node)
|
list_for_each_entry(pl, progs, node) {
|
||||||
if (pl->prog == prog)
|
if (pl->prog == prog) {
|
||||||
/* disallow attaching the same prog twice */
|
/* disallow attaching the same prog twice */
|
||||||
|
bpf_cgroup_storage_free(storage);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
|
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
|
||||||
if (!pl)
|
if (!pl) {
|
||||||
|
bpf_cgroup_storage_free(storage);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
pl_was_allocated = true;
|
pl_was_allocated = true;
|
||||||
pl->prog = prog;
|
pl->prog = prog;
|
||||||
|
pl->storage = storage;
|
||||||
list_add_tail(&pl->node, progs);
|
list_add_tail(&pl->node, progs);
|
||||||
} else {
|
} else {
|
||||||
if (list_empty(progs)) {
|
if (list_empty(progs)) {
|
||||||
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
|
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
|
||||||
if (!pl)
|
if (!pl) {
|
||||||
|
bpf_cgroup_storage_free(storage);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
pl_was_allocated = true;
|
pl_was_allocated = true;
|
||||||
list_add_tail(&pl->node, progs);
|
list_add_tail(&pl->node, progs);
|
||||||
} else {
|
} else {
|
||||||
pl = list_first_entry(progs, typeof(*pl), node);
|
pl = list_first_entry(progs, typeof(*pl), node);
|
||||||
old_prog = pl->prog;
|
old_prog = pl->prog;
|
||||||
|
old_storage = pl->storage;
|
||||||
|
bpf_cgroup_storage_unlink(old_storage);
|
||||||
pl_was_allocated = false;
|
pl_was_allocated = false;
|
||||||
}
|
}
|
||||||
pl->prog = prog;
|
pl->prog = prog;
|
||||||
|
pl->storage = storage;
|
||||||
}
|
}
|
||||||
|
|
||||||
cgrp->bpf.flags[type] = flags;
|
cgrp->bpf.flags[type] = flags;
|
||||||
@ -257,10 +276,13 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static_branch_inc(&cgroup_bpf_enabled_key);
|
static_branch_inc(&cgroup_bpf_enabled_key);
|
||||||
|
if (old_storage)
|
||||||
|
bpf_cgroup_storage_free(old_storage);
|
||||||
if (old_prog) {
|
if (old_prog) {
|
||||||
bpf_prog_put(old_prog);
|
bpf_prog_put(old_prog);
|
||||||
static_branch_dec(&cgroup_bpf_enabled_key);
|
static_branch_dec(&cgroup_bpf_enabled_key);
|
||||||
}
|
}
|
||||||
|
bpf_cgroup_storage_link(storage, cgrp, type);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
@ -276,6 +298,9 @@ cleanup:
|
|||||||
|
|
||||||
/* and cleanup the prog list */
|
/* and cleanup the prog list */
|
||||||
pl->prog = old_prog;
|
pl->prog = old_prog;
|
||||||
|
bpf_cgroup_storage_free(pl->storage);
|
||||||
|
pl->storage = old_storage;
|
||||||
|
bpf_cgroup_storage_link(old_storage, cgrp, type);
|
||||||
if (pl_was_allocated) {
|
if (pl_was_allocated) {
|
||||||
list_del(&pl->node);
|
list_del(&pl->node);
|
||||||
kfree(pl);
|
kfree(pl);
|
||||||
@ -356,6 +381,8 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|||||||
|
|
||||||
/* now can actually delete it from this cgroup list */
|
/* now can actually delete it from this cgroup list */
|
||||||
list_del(&pl->node);
|
list_del(&pl->node);
|
||||||
|
bpf_cgroup_storage_unlink(pl->storage);
|
||||||
|
bpf_cgroup_storage_free(pl->storage);
|
||||||
kfree(pl);
|
kfree(pl);
|
||||||
if (list_empty(progs))
|
if (list_empty(progs))
|
||||||
/* last program was detached, reset flags to zero */
|
/* last program was detached, reset flags to zero */
|
||||||
|
Loading…
Reference in New Issue
Block a user