[S390] smp: delay idle task creation

Delay idle task creation until a cpu gets set online instead of
creating them for all possible cpus at system startup.
For one cpu system this should safe more than 1 MB.
On my debug system with lots of debug stuff enabled this saves 2 MB.

Same as on x86.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Heiko Carstens 2011-01-05 12:48:08 +01:00 committed by Martin Schwidefsky
parent 09a8e7adcf
commit f230886b0b

View File

@ -23,6 +23,7 @@
#define KMSG_COMPONENT "cpu" #define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
@ -477,18 +478,20 @@ int __cpuinit start_secondary(void *cpuvoid)
return 0; return 0;
} }
static void __init smp_create_idle(unsigned int cpu) struct create_idle {
{ struct work_struct work;
struct task_struct *p; struct task_struct *idle;
struct completion done;
int cpu;
};
/* static void __cpuinit smp_fork_idle(struct work_struct *work)
* don't care about the psw and regs settings since we'll never {
* reschedule the forked task. struct create_idle *c_idle;
*/
p = fork_idle(cpu); c_idle = container_of(work, struct create_idle, work);
if (IS_ERR(p)) c_idle->idle = fork_idle(c_idle->cpu);
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); complete(&c_idle->done);
current_set[cpu] = p;
} }
static int __cpuinit smp_alloc_lowcore(int cpu) static int __cpuinit smp_alloc_lowcore(int cpu)
@ -552,6 +555,7 @@ static void smp_free_lowcore(int cpu)
int __cpuinit __cpu_up(unsigned int cpu) int __cpuinit __cpu_up(unsigned int cpu)
{ {
struct _lowcore *cpu_lowcore; struct _lowcore *cpu_lowcore;
struct create_idle c_idle;
struct task_struct *idle; struct task_struct *idle;
struct stack_frame *sf; struct stack_frame *sf;
u32 lowcore; u32 lowcore;
@ -559,6 +563,18 @@ int __cpuinit __cpu_up(unsigned int cpu)
if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
return -EIO; return -EIO;
idle = current_set[cpu];
if (!idle) {
c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
c_idle.cpu = cpu;
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
if (IS_ERR(c_idle.idle))
return PTR_ERR(c_idle.idle);
idle = c_idle.idle;
current_set[cpu] = c_idle.idle;
}
if (smp_alloc_lowcore(cpu)) if (smp_alloc_lowcore(cpu))
return -ENOMEM; return -ENOMEM;
do { do {
@ -573,7 +589,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
udelay(10); udelay(10);
idle = current_set[cpu];
cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore = lowcore_ptr[cpu];
cpu_lowcore->kernel_stack = (unsigned long) cpu_lowcore->kernel_stack = (unsigned long)
task_stack_page(idle) + THREAD_SIZE; task_stack_page(idle) + THREAD_SIZE;
@ -685,7 +700,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#endif #endif
unsigned long async_stack, panic_stack; unsigned long async_stack, panic_stack;
struct _lowcore *lowcore; struct _lowcore *lowcore;
unsigned int cpu;
smp_detect_cpus(); smp_detect_cpus();
@ -720,9 +734,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
BUG(); BUG();
#endif #endif
for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
} }
void __init smp_prepare_boot_cpu(void) void __init smp_prepare_boot_cpu(void)