perf: Fix cgroup event scheduling
There appears to be a problem in __perf_event_task_sched_in() wrt cgroup event scheduling. The normal event scheduling order is: CPU pinned Task pinned CPU flexible Task flexible And since perf_cgroup_sched*() only schedules the cpu context, we must call this _before_ adding the task events. Note: double check what happens on the ctx switch optimization where the task ctx isn't scheduled. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c994d61367
commit
7e41d17753
@ -2806,6 +2806,16 @@ void __perf_event_task_sched_in(struct task_struct *prev,
|
||||
struct perf_event_context *ctx;
|
||||
int ctxn;
|
||||
|
||||
/*
|
||||
* If cgroup events exist on this CPU, then we need to check if we have
|
||||
* to switch in PMU state; cgroup event are system-wide mode only.
|
||||
*
|
||||
* Since cgroup events are CPU events, we must schedule these in before
|
||||
* we schedule in the task events.
|
||||
*/
|
||||
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
|
||||
perf_cgroup_sched_in(prev, task);
|
||||
|
||||
for_each_task_context_nr(ctxn) {
|
||||
ctx = task->perf_event_ctxp[ctxn];
|
||||
if (likely(!ctx))
|
||||
@ -2813,13 +2823,6 @@ void __perf_event_task_sched_in(struct task_struct *prev,
|
||||
|
||||
perf_event_context_sched_in(ctx, task);
|
||||
}
|
||||
/*
|
||||
* if cgroup events exist on this CPU, then we need
|
||||
* to check if we have to switch in PMU state.
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
|
||||
perf_cgroup_sched_in(prev, task);
|
||||
|
||||
if (atomic_read(&nr_switch_events))
|
||||
perf_event_switch(task, prev, true);
|
||||
|
Loading…
Reference in New Issue
Block a user