asm-generic: Conditionally enable do_softirq_own_stack() via Kconfig.

Remove the CONFIG_PREEMPT_RT symbol from the ifdef around
do_softirq_own_stack() and move it to Kconfig instead.

Enable softirq stacks based on SOFTIRQ_ON_OWN_STACK which depends on
HAVE_SOFTIRQ_ON_OWN_STACK and its default value is set to !PREEMPT_RT.
This ensures that softirq stacks are not used on PREEMPT_RT and avoids
a 'select' statement on an option which has a 'depends' statement.

Link: https://lore.kernel.org/YvN5E%2FPrHfUhggr7@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Sebastian Andrzej Siewior 2022-08-25 10:25:05 +02:00 committed by Arnd Bergmann
parent b90cb10531
commit 8cbb2b50ee
10 changed files with 13 additions and 10 deletions

View File

@ -923,6 +923,9 @@ config HAVE_SOFTIRQ_ON_OWN_STACK
Architecture provides a function to run __do_softirq() on a Architecture provides a function to run __do_softirq() on a
separate stack. separate stack.
config SOFTIRQ_ON_OWN_STACK
def_bool HAVE_SOFTIRQ_ON_OWN_STACK && !PREEMPT_RT
config ALTERNATE_USER_ADDRESS_SPACE config ALTERNATE_USER_ADDRESS_SPACE
bool bool
help help

View File

@ -70,7 +70,7 @@ static void __init init_irq_stacks(void)
} }
} }
#ifndef CONFIG_PREEMPT_RT #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static void ____do_softirq(void *arg) static void ____do_softirq(void *arg)
{ {
__do_softirq(); __do_softirq();

View File

@ -480,7 +480,7 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1; *irq_stack_in_use = 1;
} }
#ifndef CONFIG_PREEMPT_RT #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
execute_on_irq_stack(__do_softirq, 0); execute_on_irq_stack(__do_softirq, 0);

View File

@ -199,7 +199,7 @@ static inline void check_stack_overflow(unsigned long sp)
} }
} }
#ifndef CONFIG_PREEMPT_RT #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static __always_inline void call_do_softirq(const void *sp) static __always_inline void call_do_softirq(const void *sp)
{ {
/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */ /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
@ -335,7 +335,7 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly;
void *softirq_ctx[NR_CPUS] __read_mostly; void *softirq_ctx[NR_CPUS] __read_mostly;
void *hardirq_ctx[NR_CPUS] __read_mostly; void *hardirq_ctx[NR_CPUS] __read_mostly;
#ifndef CONFIG_PREEMPT_RT #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
call_do_softirq(softirq_ctx[smp_processor_id()]); call_do_softirq(softirq_ctx[smp_processor_id()]);

View File

@ -5,7 +5,7 @@
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#ifndef CONFIG_PREEMPT_RT #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static inline void do_softirq_own_stack(void) static inline void do_softirq_own_stack(void)
{ {
call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq); call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);

View File

@ -149,7 +149,7 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL; hardirq_ctx[cpu] = NULL;
} }
#ifndef CONFIG_PREEMPT_RT #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
struct thread_info *curctx; struct thread_info *curctx;

View File

@ -855,7 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
#ifndef CONFIG_PREEMPT_RT #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
void *orig_sp, *sp = softirq_stack[smp_processor_id()]; void *orig_sp, *sp = softirq_stack[smp_processor_id()];

View File

@ -203,7 +203,7 @@
IRQ_CONSTRAINTS, regs, vector); \ IRQ_CONSTRAINTS, regs, vector); \
} }
#ifndef CONFIG_PREEMPT_RT #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
/* /*
* Macro to invoke __do_softirq on the irq stack. This is only called from * Macro to invoke __do_softirq on the irq stack. This is only called from
* task context when bottom halves are about to be reenabled and soft * task context when bottom halves are about to be reenabled and soft

View File

@ -132,7 +132,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
return 0; return 0;
} }
#ifndef CONFIG_PREEMPT_RT #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
struct irq_stack *irqstk; struct irq_stack *irqstk;

View File

@ -2,7 +2,7 @@
#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H #ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
#define __ASM_GENERIC_SOFTIRQ_STACK_H #define __ASM_GENERIC_SOFTIRQ_STACK_H
#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT) #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void); void do_softirq_own_stack(void);
#else #else
static inline void do_softirq_own_stack(void) static inline void do_softirq_own_stack(void)