x86: fill cpu to apicid and present map in mpparse

This is the way x86_64 does, and complement the already
present patch that does the bios cpu to apicid mapping here

Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Glauber de Oliveira Costa 2008-03-19 14:25:25 -03:00 committed by Ingo Molnar
parent 73bf102b1c
commit a6c422ccdb
2 changed files with 24 additions and 19 deletions

View File

@ -105,7 +105,8 @@ static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinit
static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
{
int ver, apicid;
int ver, apicid, cpu;
cpumask_t tmp_map;
physid_mask_t phys_cpu;
if (!(m->mpc_cpuflag & CPU_ENABLED)) {
@ -198,6 +199,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
cpu_set(num_processors, cpu_possible_map);
num_processors++;
cpus_complement(tmp_map, cpu_present_map);
cpu = first_cpu(tmp_map);
if (m->mpc_cpuflag & CPU_BOOTPROCESSOR)
/*
* x86_bios_cpu_apicid is required to have processors listed
* in same order as logical cpu numbers. Hence the first
* entry is BSP, and so on.
*/
cpu = 0;
/*
* Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
@ -220,12 +231,16 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
}
/* are we being called early in kernel startup? */
if (x86_cpu_to_apicid_early_ptr) {
u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
cpu_to_apicid[cpu] = m->mpc_apicid;
bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
} else {
int cpu = num_processors - 1;
per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
}
cpu_set(cpu, cpu_present_map);
}
static void __init MP_bus_info (struct mpc_config_bus *m)

View File

@ -525,16 +525,6 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
#endif /* WAKE_SECONDARY_VIA_INIT */
extern cpumask_t cpu_initialized;
static inline int alloc_cpu_id(void)
{
cpumask_t tmp_map;
int cpu;
cpus_complement(tmp_map, cpu_present_map);
cpu = first_cpu(tmp_map);
if (cpu >= NR_CPUS)
return -ENODEV;
return cpu;
}
#ifdef CONFIG_HOTPLUG_CPU
static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS];
@ -605,7 +595,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
irq_ctx_init(cpu);
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
/*
* This grunge runs the startup process for
* the targeted processor.
@ -666,10 +655,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
cpu_clear(cpu, cpu_possible_map);
per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
cpucount--;
} else {
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
cpu_set(cpu, cpu_present_map);
}
/* mark "stuck" area as not stuck */
@ -745,6 +732,7 @@ EXPORT_SYMBOL(xquad_portio);
static void __init disable_smp(void)
{
cpu_possible_map = cpumask_of_cpu(0);
cpu_present_map = cpumask_of_cpu(0);
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
map_cpu_to_logical_apicid();
@ -825,7 +813,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
boot_cpu_logical_apicid = logical_smp_processor_id();
per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
current_thread_info()->cpu = 0;
@ -866,8 +853,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
continue;
if (max_cpus <= cpucount+1)
continue;
if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
/* Utterly temporary */
for (cpu = 0; cpu < NR_CPUS; cpu++)
if (per_cpu(x86_cpu_to_apicid, cpu) == apicid)
break;
if (do_boot_cpu(apicid, cpu))
printk("CPU #%d not responding - cannot use it.\n",
apicid);
else