diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index d511bf73ade9..1fb98b255b4c 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -1314,7 +1314,7 @@ int setup_profiling_timer(unsigned int multiplier) } #ifdef CONFIG_SCHED_SMT -/* cpumask of CPUs with asymetric SMT dependancy */ +/* cpumask of CPUs with asymmetric SMT dependency */ static int powerpc_smt_flags(void) { int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; @@ -1327,14 +1327,6 @@ static int powerpc_smt_flags(void) } #endif -static struct sched_domain_topology_level powerpc_topology[] = { -#ifdef CONFIG_SCHED_SMT - { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, -#endif - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; - /* * P9 has a slightly odd architecture where pairs of cores share an L2 cache. * This topology makes it *much* cheaper to migrate tasks between adjacent cores @@ -1362,7 +1354,7 @@ static const struct cpumask *smallcore_smt_mask(int cpu) } #endif -static struct sched_domain_topology_level power9_topology[] = { +static struct sched_domain_topology_level powerpc_topology[] = { #ifdef CONFIG_SCHED_SMT { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, #endif @@ -1387,21 +1379,10 @@ void __init smp_cpus_done(unsigned int max_cpus) #ifdef CONFIG_SCHED_SMT if (has_big_cores) { pr_info("Big cores detected but using small core scheduling\n"); - power9_topology[0].mask = smallcore_smt_mask; powerpc_topology[0].mask = smallcore_smt_mask; } #endif - /* - * If any CPU detects that it's sharing a cache with another CPU then - * use the deeper topology that is aware of this sharing. - */ - if (shared_caches) { - pr_info("Using shared cache scheduler topology\n"); - set_sched_topology(power9_topology); - } else { - pr_info("Using standard scheduler topology\n"); - set_sched_topology(powerpc_topology); - } + set_sched_topology(powerpc_topology); } #ifdef CONFIG_HOTPLUG_CPU