mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 22:51:35 +00:00
perf_events: Optimize perf_event_task_tick()
Pretty much all of the calls do perf_disable/perf_enable cycles, pull that out to cut back on hardware programming. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f24bb999d2
commit
9717e6cd3d
@ -1573,12 +1573,8 @@ static void rotate_ctx(struct perf_event_context *ctx)
|
||||
raw_spin_lock(&ctx->lock);
|
||||
|
||||
/* Rotate the first entry last of non-pinned groups */
|
||||
perf_disable();
|
||||
|
||||
list_rotate_left(&ctx->flexible_groups);
|
||||
|
||||
perf_enable();
|
||||
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
@ -1593,6 +1589,8 @@ void perf_event_task_tick(struct task_struct *curr)
|
||||
cpuctx = &__get_cpu_var(perf_cpu_context);
|
||||
ctx = curr->perf_event_ctxp;
|
||||
|
||||
perf_disable();
|
||||
|
||||
perf_ctx_adjust_freq(&cpuctx->ctx);
|
||||
if (ctx)
|
||||
perf_ctx_adjust_freq(ctx);
|
||||
@ -1608,6 +1606,8 @@ void perf_event_task_tick(struct task_struct *curr)
|
||||
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
|
||||
if (ctx)
|
||||
task_ctx_sched_in(curr, EVENT_FLEXIBLE);
|
||||
|
||||
perf_enable();
|
||||
}
|
||||
|
||||
static int event_enable_on_exec(struct perf_event *event,
|
||||
|
Loading…
Reference in New Issue
Block a user