perf, x86: Avoid double disable on throttle vs ioctl(PERF_IOC_DISABLE)

Calling ioctl(PERF_EVENT_IOC_DISABLE) on a thottled counter would result
in a double disable, cure this by using x86_pmu_{start,stop} for
throttle/unthrottle and teach x86_pmu_stop() to check ->active_mask.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2010-03-08 17:51:33 +01:00 committed by Ingo Molnar
parent c08053e627
commit 71e2d28280
2 changed files with 7 additions and 15 deletions

View File

@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event)
static void x86_pmu_unthrottle(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
cpuc->events[hwc->idx] != event))
return;
x86_pmu.enable(event);
int ret = x86_pmu_start(event);
WARN_ON_ONCE(ret);
}
void perf_event_print_debug(void)
@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
/*
* Must be done before we disable, otherwise the nmi handler
* could reenable again:
*/
__clear_bit(idx, cpuc->active_mask);
if (!__test_and_clear_bit(idx, cpuc->active_mask))
return;
x86_pmu.disable(event);
/*
@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
continue;
if (perf_event_overflow(event, 1, &data, regs))
x86_pmu.disable(event);
x86_pmu_stop(event);
}
if (handled)

View File

@ -774,7 +774,7 @@ again:
data.period = event->hw.last_period;
if (perf_event_overflow(event, 1, &data, regs))
intel_pmu_disable_event(event);
x86_pmu_stop(event);
}
intel_pmu_ack_status(ack);