mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 13:41:51 +00:00
cgroup: bpf: enable bpf programs to integrate with rstat
Enable bpf programs to make use of rstat to collect cgroup hierarchical stats efficiently: - Add cgroup_rstat_updated() kfunc, for bpf progs that collect stats. - Add cgroup_rstat_flush() sleepable kfunc, for bpf progs that read stats. - Add an empty bpf_rstat_flush() hook that is called during rstat flushing, for bpf progs that flush stats to attach to. Attaching a bpf prog to this hook effectively registers it as a flush callback. Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Hao Luo <haoluo@google.com> Link: https://lore.kernel.org/r/20220824233117.1312810-4-haoluo@google.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
fe0dd9d4b7
commit
a319185be9
@ -3,6 +3,10 @@
|
||||
|
||||
#include <linux/sched/cputime.h>
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
|
||||
static DEFINE_SPINLOCK(cgroup_rstat_lock);
|
||||
static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
|
||||
|
||||
@ -141,6 +145,31 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
|
||||
return pos;
|
||||
}
|
||||
|
||||
/*
|
||||
* A hook for bpf stat collectors to attach to and flush their stats.
|
||||
* Together with providing bpf kfuncs for cgroup_rstat_updated() and
|
||||
* cgroup_rstat_flush(), this enables a complete workflow where bpf progs that
|
||||
* collect cgroup stats can integrate with rstat for efficient flushing.
|
||||
*
|
||||
* A static noinline declaration here could cause the compiler to optimize away
|
||||
* the function. A global noinline declaration will keep the definition, but may
|
||||
* optimize away the callsite. Therefore, __weak is needed to ensure that the
|
||||
* call is still emitted, by telling the compiler that we don't know what the
|
||||
* function might eventually be.
|
||||
*
|
||||
* __diag_* below are needed to dismiss the missing prototype warning.
|
||||
*/
|
||||
__diag_push();
|
||||
__diag_ignore_all("-Wmissing-prototypes",
|
||||
"kfuncs which will be used in BPF programs");
|
||||
|
||||
__weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
|
||||
struct cgroup *parent, int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
__diag_pop();
|
||||
|
||||
/* see cgroup_rstat_flush() */
|
||||
static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
||||
__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
|
||||
@ -168,6 +197,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
cgroup_base_stat_flush(pos, cpu);
|
||||
bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(css, &pos->rstat_css_list,
|
||||
@ -501,3 +531,21 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq)
|
||||
seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
|
||||
BTF_SET8_START(bpf_rstat_kfunc_ids)
|
||||
BTF_ID_FLAGS(func, cgroup_rstat_updated)
|
||||
BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE)
|
||||
BTF_SET8_END(bpf_rstat_kfunc_ids)
|
||||
|
||||
static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
|
||||
.owner = THIS_MODULE,
|
||||
.set = &bpf_rstat_kfunc_ids,
|
||||
};
|
||||
|
||||
static int __init bpf_rstat_kfunc_init(void)
|
||||
{
|
||||
return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
|
||||
&bpf_rstat_kfunc_set);
|
||||
}
|
||||
late_initcall(bpf_rstat_kfunc_init);
|
||||
|
Loading…
Reference in New Issue
Block a user