perf/x86: Avoid kfree() in CPU_{STARTING,DYING}

On -rt kfree() can schedule, but CPU_{STARTING,DYING} should be
atomic. So use a list to defer kfree until CPU_{ONLINE,DEAD}.

Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: peterz@infradead.org
Cc: eranian@google.com
Cc: ak@linux.intel.com
Link: http://lkml.kernel.org/r/1366113067-3262-2-git-send-email-zheng.z.yan@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Yan, Zheng 2013-04-16 19:51:05 +08:00 committed by Ingo Molnar
parent 73e21ce28d
commit 22cc4ccf63

View File

@ -2622,6 +2622,21 @@ static void __init uncore_pci_exit(void)
}
}
/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
static LIST_HEAD(boxes_to_free);
static void __cpuinit uncore_kfree_boxes(void)
{
struct intel_uncore_box *box;
while (!list_empty(&boxes_to_free)) {
box = list_entry(boxes_to_free.next,
struct intel_uncore_box, list);
list_del(&box->list);
kfree(box);
}
}
static void __cpuinit uncore_cpu_dying(int cpu)
{
struct intel_uncore_type *type;
@ -2636,7 +2651,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
box = *per_cpu_ptr(pmu->box, cpu);
*per_cpu_ptr(pmu->box, cpu) = NULL;
if (box && atomic_dec_and_test(&box->refcnt))
kfree(box);
list_add(&box->list, &boxes_to_free);
}
}
}
@ -2666,8 +2681,11 @@ static int __cpuinit uncore_cpu_starting(int cpu)
if (exist && exist->phys_id == phys_id) {
atomic_inc(&exist->refcnt);
*per_cpu_ptr(pmu->box, cpu) = exist;
kfree(box);
box = NULL;
if (box) {
list_add(&box->list,
&boxes_to_free);
box = NULL;
}
break;
}
}
@ -2806,6 +2824,10 @@ static int
case CPU_DYING:
uncore_cpu_dying(cpu);
break;
case CPU_ONLINE:
case CPU_DEAD:
uncore_kfree_boxes();
break;
default:
break;
}