mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
sched/topology: Small cleanup
Move the allocation of topology specific cpumasks into the topology code. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
73bb059f9b
commit
8d5dc5126b
@ -5958,7 +5958,6 @@ void __init sched_init_smp(void)
|
||||
cpumask_var_t non_isolated_cpus;
|
||||
|
||||
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
|
||||
alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
|
||||
|
||||
sched_init_numa();
|
||||
|
||||
@ -5968,7 +5967,7 @@ void __init sched_init_smp(void)
|
||||
* happen.
|
||||
*/
|
||||
mutex_lock(&sched_domains_mutex);
|
||||
init_sched_domains(cpu_active_mask);
|
||||
sched_init_domains(cpu_active_mask);
|
||||
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
|
||||
if (cpumask_empty(non_isolated_cpus))
|
||||
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
|
||||
@ -6197,7 +6196,6 @@ void __init sched_init(void)
|
||||
calc_load_update = jiffies + LOAD_FREQ;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
|
||||
/* May be allocated at isolcpus cmdline parse time */
|
||||
if (cpu_isolated_map == NULL)
|
||||
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
|
||||
|
@ -606,11 +606,9 @@ struct root_domain {
|
||||
|
||||
extern struct root_domain def_root_domain;
|
||||
extern struct mutex sched_domains_mutex;
|
||||
extern cpumask_var_t fallback_doms;
|
||||
extern cpumask_var_t sched_domains_tmpmask;
|
||||
|
||||
extern void init_defrootdomain(void);
|
||||
extern int init_sched_domains(const struct cpumask *cpu_map);
|
||||
extern int sched_init_domains(const struct cpumask *cpu_map);
|
||||
extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -1526,7 +1526,7 @@ static struct sched_domain_attr *dattr_cur;
|
||||
* cpumask) fails, then fallback to a single sched domain,
|
||||
* as determined by the single cpumask fallback_doms.
|
||||
*/
|
||||
cpumask_var_t fallback_doms;
|
||||
static cpumask_var_t fallback_doms;
|
||||
|
||||
/*
|
||||
* arch_update_cpu_topology lets virtualized architectures update the
|
||||
@ -1568,10 +1568,13 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
|
||||
* For now this just excludes isolated CPUs, but could be used to
|
||||
* exclude other special cases in the future.
|
||||
*/
|
||||
int init_sched_domains(const struct cpumask *cpu_map)
|
||||
int sched_init_domains(const struct cpumask *cpu_map)
|
||||
{
|
||||
int err;
|
||||
|
||||
zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
|
||||
zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
|
||||
|
||||
arch_update_cpu_topology();
|
||||
ndoms_cur = 1;
|
||||
doms_cur = alloc_sched_domains(ndoms_cur);
|
||||
|
Loading…
Reference in New Issue
Block a user