sched/x86: Rewrite set_cpu_sibling_map()

Commit ad7687dde ("x86/numa: Check for nonsensical topologies on real
hw as well") is broken in that the condition can trigger for valid
setups but only changes the end result for invalid setups with no real
means of discerning between those.

Rewrite set_cpu_sibling_map() to make the code clearer and make sure
to only warn when the check changes the end result.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-klcwahu3gx467uhfiqjyhdcs@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2012-05-11 13:05:59 +02:00 committed by Ingo Molnar
parent dd7d8634e6
commit 316ad24830

View File

@ -315,70 +315,90 @@ void __cpuinit smp_store_cpu_info(int id)
identify_secondary_cpu(c); identify_secondary_cpu(c);
} }
static void __cpuinit link_thread_siblings(int cpu1, int cpu2) static bool __cpuinit
topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
{ {
cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); return !WARN_ONCE(cpu_to_node(cpu1) != cpu_to_node(cpu2),
cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); "[node: %d != %d]. Ignoring dependency.\n",
cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
} }
#define link_mask(_m, c1, c2) \
do { \
cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \
cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
} while (0)
static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id &&
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
c->compute_unit_id == o->compute_unit_id)
return topology_sane(c, o, "smt");
} else if (c->phys_proc_id == o->phys_proc_id &&
c->cpu_core_id == o->cpu_core_id) {
return topology_sane(c, o, "smt");
}
return false;
}
static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
return topology_sane(c, o, "llc");
return false;
}
static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
if (c->phys_proc_id == o->phys_proc_id)
return topology_sane(c, o, "mc");
return false;
}
void __cpuinit set_cpu_sibling_map(int cpu) void __cpuinit set_cpu_sibling_map(int cpu)
{ {
int i; bool has_mc = boot_cpu_data.x86_max_cores > 1;
bool has_smt = smp_num_siblings > 1;
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
struct cpuinfo_x86 *o;
int i;
cpumask_set_cpu(cpu, cpu_sibling_setup_mask); cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
if (smp_num_siblings > 1) { if (!has_smt && !has_mc) {
for_each_cpu(i, cpu_sibling_setup_mask) {
struct cpuinfo_x86 *o = &cpu_data(i);
if (cpu_to_node(cpu) != cpu_to_node(i)) {
WARN_ONCE(1, "sched: CPU #%d's thread-sibling CPU #%d not on the same node! [node %d != %d]. Ignoring sibling dependency.\n", cpu, i, cpu_to_node(cpu), cpu_to_node(i));
continue;
}
if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (c->phys_proc_id == o->phys_proc_id &&
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) &&
c->compute_unit_id == o->compute_unit_id)
link_thread_siblings(cpu, i);
} else if (c->phys_proc_id == o->phys_proc_id &&
c->cpu_core_id == o->cpu_core_id) {
link_thread_siblings(cpu, i);
}
}
} else {
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
} cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
cpumask_set_cpu(cpu, cpu_core_mask(cpu));
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
c->booted_cores = 1; c->booted_cores = 1;
return; return;
} }
for_each_cpu(i, cpu_sibling_setup_mask) { for_each_cpu(i, cpu_sibling_setup_mask) {
if (cpu_to_node(cpu) != cpu_to_node(i)) { o = &cpu_data(i);
WARN_ONCE(1, "sched: CPU #%d's core-sibling CPU #%d not on the same node! [node %d != %d]. Ignoring sibling dependency.\n", cpu, i, cpu_to_node(cpu), cpu_to_node(i));
continue;
}
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && if ((i == cpu) || (has_smt && match_smt(c, o)))
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { link_mask(sibling, cpu, i);
cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); if ((i == cpu) || (has_mc && match_llc(c, o)))
} link_mask(llc_shared, cpu, i);
if ((i == cpu) || (has_mc && match_mc(c, o))) {
link_mask(core, cpu, i);
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
cpumask_set_cpu(i, cpu_core_mask(cpu));
cpumask_set_cpu(cpu, cpu_core_mask(i));
/* /*
* Does this new cpu bringup a new core? * Does this new cpu bringup a new core?
*/ */