forked from Minki/linux
x86: use specialized routine for setup per-cpu area
We use the same routing as x86_64, moved now to setup.c. Just with a few ifdefs inside. Note that this routing uses prefill_possible_map(). It has the very nice side effect of allowing hotplugging of cpus that are marked as present but disabled by acpi bios. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
fbac7fcbad
commit
4fe29a8564
@ -117,7 +117,7 @@ config ARCH_HAS_CPU_RELAX
|
||||
def_bool y
|
||||
|
||||
config HAVE_SETUP_PER_CPU_AREA
|
||||
def_bool X86_64
|
||||
def_bool X86_64 || (X86_SMP && !X86_VOYAGER)
|
||||
|
||||
config ARCH_HIBERNATION_POSSIBLE
|
||||
def_bool y
|
||||
|
@ -18,7 +18,7 @@ CFLAGS_tsc_64.o := $(nostackp)
|
||||
obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
|
||||
obj-y += traps_$(BITS).o irq_$(BITS).o
|
||||
obj-y += time_$(BITS).o ioport.o ldt.o
|
||||
obj-y += setup_$(BITS).o i8259_$(BITS).o
|
||||
obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o
|
||||
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
|
||||
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
|
||||
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
|
||||
|
103
arch/x86/kernel/setup.c
Normal file
103
arch/x86/kernel/setup.c
Normal file
@ -0,0 +1,103 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/topology.h>
|
||||
|
||||
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
||||
/*
|
||||
* Copy data used in early init routines from the initial arrays to the
|
||||
* per cpu data areas. These arrays then become expendable and the
|
||||
* *_early_ptr's are zeroed indicating that the static arrays are gone.
|
||||
*/
|
||||
static void __init setup_per_cpu_maps(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
#ifdef CONFIG_SMP
|
||||
if (per_cpu_offset(cpu)) {
|
||||
#endif
|
||||
per_cpu(x86_cpu_to_apicid, cpu) =
|
||||
x86_cpu_to_apicid_init[cpu];
|
||||
per_cpu(x86_bios_cpu_apicid, cpu) =
|
||||
x86_bios_cpu_apicid_init[cpu];
|
||||
#ifdef CONFIG_NUMA
|
||||
per_cpu(x86_cpu_to_node_map, cpu) =
|
||||
x86_cpu_to_node_map_init[cpu];
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
} else
|
||||
printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
|
||||
cpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* indicate the early static arrays will soon be gone */
|
||||
x86_cpu_to_apicid_early_ptr = NULL;
|
||||
x86_bios_cpu_apicid_early_ptr = NULL;
|
||||
#ifdef CONFIG_NUMA
|
||||
x86_cpu_to_node_map_early_ptr = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Great future not-so-futuristic plan: make i386 and x86_64 do it
|
||||
* the same way
|
||||
*/
|
||||
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
|
||||
EXPORT_SYMBOL(__per_cpu_offset);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Great future plan:
|
||||
* Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
|
||||
* Always point %gs to its beginning
|
||||
*/
|
||||
void __init setup_per_cpu_areas(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long size;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
prefill_possible_map();
|
||||
#endif
|
||||
|
||||
/* Copy section for each CPU (we discard the original) */
|
||||
size = PERCPU_ENOUGH_ROOM;
|
||||
|
||||
printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
|
||||
size);
|
||||
for_each_cpu_mask(i, cpu_possible_map) {
|
||||
char *ptr;
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
ptr = alloc_bootmem_pages(size);
|
||||
#else
|
||||
int node = early_cpu_to_node(i);
|
||||
if (!node_online(node) || !NODE_DATA(node))
|
||||
ptr = alloc_bootmem_pages(size);
|
||||
else
|
||||
ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
|
||||
#endif
|
||||
if (!ptr)
|
||||
panic("Cannot allocate cpu data for CPU %d\n", i);
|
||||
#ifdef CONFIG_X86_64
|
||||
cpu_pda(i)->data_offset = ptr - __per_cpu_start;
|
||||
#else
|
||||
__per_cpu_offset[i] = ptr - __per_cpu_start;
|
||||
#endif
|
||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||
}
|
||||
|
||||
/* setup percpu data maps early */
|
||||
setup_per_cpu_maps();
|
||||
}
|
||||
|
||||
#endif
|
@ -85,83 +85,6 @@ static int __init nonx32_setup(char *str)
|
||||
}
|
||||
__setup("noexec32=", nonx32_setup);
|
||||
|
||||
/*
|
||||
* Copy data used in early init routines from the initial arrays to the
|
||||
* per cpu data areas. These arrays then become expendable and the
|
||||
* *_early_ptr's are zeroed indicating that the static arrays are gone.
|
||||
*/
|
||||
static void __init setup_per_cpu_maps(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
#ifdef CONFIG_SMP
|
||||
if (per_cpu_offset(cpu)) {
|
||||
#endif
|
||||
per_cpu(x86_cpu_to_apicid, cpu) =
|
||||
x86_cpu_to_apicid_init[cpu];
|
||||
per_cpu(x86_bios_cpu_apicid, cpu) =
|
||||
x86_bios_cpu_apicid_init[cpu];
|
||||
#ifdef CONFIG_NUMA
|
||||
per_cpu(x86_cpu_to_node_map, cpu) =
|
||||
x86_cpu_to_node_map_init[cpu];
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
}
|
||||
else
|
||||
printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
|
||||
cpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* indicate the early static arrays will soon be gone */
|
||||
x86_cpu_to_apicid_early_ptr = NULL;
|
||||
x86_bios_cpu_apicid_early_ptr = NULL;
|
||||
#ifdef CONFIG_NUMA
|
||||
x86_cpu_to_node_map_early_ptr = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Great future plan:
|
||||
* Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
|
||||
* Always point %gs to its beginning
|
||||
*/
|
||||
void __init setup_per_cpu_areas(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long size;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
prefill_possible_map();
|
||||
#endif
|
||||
|
||||
/* Copy section for each CPU (we discard the original) */
|
||||
size = PERCPU_ENOUGH_ROOM;
|
||||
|
||||
printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
|
||||
for_each_cpu_mask (i, cpu_possible_map) {
|
||||
char *ptr;
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
ptr = alloc_bootmem_pages(size);
|
||||
#else
|
||||
int node = early_cpu_to_node(i);
|
||||
|
||||
if (!node_online(node) || !NODE_DATA(node))
|
||||
ptr = alloc_bootmem_pages(size);
|
||||
else
|
||||
ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
|
||||
#endif
|
||||
if (!ptr)
|
||||
panic("Cannot allocate cpu data for CPU %d\n", i);
|
||||
cpu_pda(i)->data_offset = ptr - __per_cpu_start;
|
||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||
}
|
||||
|
||||
/* setup percpu data maps early */
|
||||
setup_per_cpu_maps();
|
||||
}
|
||||
|
||||
void pda_init(int cpu)
|
||||
{
|
||||
struct x8664_pda *pda = cpu_pda(cpu);
|
||||
|
@ -665,6 +665,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
||||
unmap_cpu_to_logical_apicid(cpu);
|
||||
cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
|
||||
cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
|
||||
cpu_clear(cpu, cpu_possible_map);
|
||||
cpucount--;
|
||||
} else {
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
||||
@ -743,6 +744,7 @@ EXPORT_SYMBOL(xquad_portio);
|
||||
|
||||
static void __init disable_smp(void)
|
||||
{
|
||||
cpu_possible_map = cpumask_of_cpu(0);
|
||||
smpboot_clear_io_apic_irqs();
|
||||
phys_cpu_present_map = physid_mask_of_physid(0);
|
||||
map_cpu_to_logical_apicid();
|
||||
|
Loading…
Reference in New Issue
Block a user