forked from Minki/linux
perf/x86: Fix 'active_events' imbalance
Commit1b7b938f18
("perf/x86/intel: Fix PMI handling for Intel PT") conditionally increments active_events in x86_add_exclusive() but unconditionally decrements in x86_del_exclusive(). These extra decrements can lead to the situation where active_events is zero and thus the PMI handler is 'disabled' while we have active events on the PMU generating PMIs. This leads to a truckload of: Uhhuh. NMI received for unknown reason 21 on CPU 28. Do you have a strange power saving mode enabled? Dazed and confused, but trying to continue messages and generally messes up perf. Remove the condition on the increment, double increment balanced by a double decrement is perfectly fine. Restructure the code a little bit to make the unconditional inc a bit more natural. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: alexander.shishkin@linux.intel.com Cc: brgerst@gmail.com Cc: dvlasenk@redhat.com Cc: luto@amacapital.net Cc: oleg@redhat.com Fixes:1b7b938f18
("perf/x86/intel: Fix PMI handling for Intel PT") Link: http://lkml.kernel.org/r/20150624144750.GJ18673@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
2d6dac2fcc
commit
93472aff80
@ -357,34 +357,24 @@ void x86_release_hardware(void)
|
||||
*/
|
||||
int x86_add_exclusive(unsigned int what)
|
||||
{
|
||||
int ret = -EBUSY, i;
|
||||
int i;
|
||||
|
||||
if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what]))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&pmc_reserve_mutex);
|
||||
for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
|
||||
if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
|
||||
goto out;
|
||||
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
|
||||
mutex_lock(&pmc_reserve_mutex);
|
||||
for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
|
||||
if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
|
||||
goto fail_unlock;
|
||||
}
|
||||
atomic_inc(&x86_pmu.lbr_exclusive[what]);
|
||||
mutex_unlock(&pmc_reserve_mutex);
|
||||
}
|
||||
|
||||
atomic_inc(&x86_pmu.lbr_exclusive[what]);
|
||||
ret = 0;
|
||||
atomic_inc(&active_events);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
fail_unlock:
|
||||
mutex_unlock(&pmc_reserve_mutex);
|
||||
|
||||
/*
|
||||
* Assuming that all exclusive events will share the PMI handler
|
||||
* (which checks active_events for whether there is work to do),
|
||||
* we can bump active_events counter right here, except for
|
||||
* x86_lbr_exclusive_lbr events that go through x86_pmu_event_init()
|
||||
* path, which already bumps active_events for them.
|
||||
*/
|
||||
if (!ret && what != x86_lbr_exclusive_lbr)
|
||||
atomic_inc(&active_events);
|
||||
|
||||
return ret;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
void x86_del_exclusive(unsigned int what)
|
||||
|
Loading…
Reference in New Issue
Block a user