s390: use init_thread_union aka initial stack for the first process

s390 is the only architecture which switches from the initial stack to a
later on allocated different stack for the first process.
This is (at least) problematic for the stackleak feature, which instruments
functions to save the current stackpointer within the task structure of the
running process.

The stackleak code compares stack pointers of the current process - and
doesn't expect that the kernel stack of a task can change. Even though the
stackleak feature itself will not cause any harm, the assumption about
kernel stacks being consistent is there, and only s390 doesn't follow that.

Therefore switch back to use init_thread_union, just like all other
architectures.

Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2023-03-27 11:37:24 +02:00 committed by Vasily Gorbik
parent cfea9bc78b
commit 944c78376a
2 changed files with 19 additions and 31 deletions

View File

@ -380,6 +380,12 @@ void stack_free(unsigned long stack)
#endif #endif
} }
void __init arch_call_rest_init(void)
{
smp_reinit_ipl_cpu();
rest_init();
}
int __init arch_early_irq_init(void) int __init arch_early_irq_init(void)
{ {
unsigned long stack; unsigned long stack;
@ -391,28 +397,21 @@ int __init arch_early_irq_init(void)
return 0; return 0;
} }
void __init arch_call_rest_init(void) static unsigned long __init stack_alloc_early(void)
{ {
unsigned long stack; unsigned long stack;
smp_reinit_ipl_cpu(); stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
stack = stack_alloc(); if (!stack) {
if (!stack) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
panic("Couldn't allocate kernel stack"); __func__, THREAD_SIZE, THREAD_SIZE);
current->stack = (void *) stack; }
#ifdef CONFIG_VMAP_STACK return stack;
current->stack_vm_area = (void *) stack;
#endif
set_task_stack_end_magic(current);
stack += STACK_INIT_OFFSET;
S390_lowcore.kernel_stack = stack;
call_on_stack_noreturn(rest_init, stack);
} }
static void __init setup_lowcore(void) static void __init setup_lowcore(void)
{ {
struct lowcore *lc, *abs_lc; struct lowcore *lc, *abs_lc;
unsigned long mcck_stack;
/* /*
* Setup lowcore for boot cpu * Setup lowcore for boot cpu
@ -436,7 +435,6 @@ static void __init setup_lowcore(void)
lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler; lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max; lc->clock_comparator = clock_comparator_max;
lc->nodat_stack = ((unsigned long)&init_thread_union) + STACK_INIT_OFFSET;
lc->current_task = (unsigned long)&init_task; lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC; lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags; lc->machine_flags = S390_lowcore.machine_flags;
@ -449,17 +447,14 @@ static void __init setup_lowcore(void)
lc->steal_timer = S390_lowcore.steal_timer; lc->steal_timer = S390_lowcore.steal_timer;
lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock; lc->last_update_clock = S390_lowcore.last_update_clock;
/* /*
* Allocate the global restart stack which is the same for * Allocate the global restart stack which is the same for
* all CPUs in cast *one* of them does a PSW restart. * all CPUs in case *one* of them does a PSW restart.
*/ */
restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE); restart_stack = (void *)(stack_alloc_early() + STACK_INIT_OFFSET);
if (!restart_stack) lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET;
panic("%s: Failed to allocate %lu bytes align=0x%lx\n", lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET;
__func__, THREAD_SIZE, THREAD_SIZE); lc->kernel_stack = S390_lowcore.kernel_stack;
restart_stack += STACK_INIT_OFFSET;
/* /*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
* restart data to the absolute zero lowcore. This is necessary if * restart data to the absolute zero lowcore. This is necessary if
@ -470,13 +465,6 @@ static void __init setup_lowcore(void)
lc->restart_data = 0; lc->restart_data = 0;
lc->restart_source = -1U; lc->restart_source = -1U;
__ctl_store(lc->cregs_save_area, 0, 15); __ctl_store(lc->cregs_save_area, 0, 15);
mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
if (!mcck_stack)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, THREAD_SIZE, THREAD_SIZE);
lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
lc->spinlock_lockval = arch_spin_lockval(0); lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0; lc->spinlock_index = 0;
arch_spin_lock_setup(0); arch_spin_lock_setup(0);

View File

@ -1295,7 +1295,7 @@ int __init smp_reinit_ipl_cpu(void)
free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER); free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE); memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl)); memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
return 0; return 0;
} }