From bdb8c9353ead1a4176e9ba034ea7ceb210c8a59c Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Thu, 6 May 2021 20:06:00 +0200 Subject: [PATCH] s390/mm: ensure switch_mm() is executed with interrupts disabled Architecture callback switch_mm() is allowed to be called with enabled interrupts. However, our implementation of switch_mm() does not expect that. Let's follow other architectures and make sure switch_mm() is always executed with interrupts disabled, regardless of what happens with the generic kernel code in the future. Signed-off-by: Alexander Gordeev Reviewed-by: Heiko Carstens Signed-off-by: Vasily Gorbik --- arch/s390/include/asm/mmu_context.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index e7cffc7b5c2f..c7937f369e62 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -70,8 +70,8 @@ static inline int init_new_context(struct task_struct *tsk, return 0; } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) +static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) { int cpu = smp_processor_id(); @@ -85,6 +85,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, if (prev != next) cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); } +#define switch_mm_irqs_off switch_mm_irqs_off + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} #define finish_arch_post_lock_switch finish_arch_post_lock_switch static inline void finish_arch_post_lock_switch(void)