mirror of
https://github.com/torvalds/linux.git
synced 2024-12-24 20:01:55 +00:00
0366a1c70b
Nowadays, irq_exit() calls __do_softirq() pretty much directly instead of calling do_softirq() which switches to the decicated softirq stack. This has lead to observed stack overflows on powerpc since we call irq_enter() and irq_exit() outside of the scope that switches to the irq stack. This fixes it by moving the stack switching up a level, making irq_enter() and irq_exit() run off the irq stack. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
80 lines
1.9 KiB
C
80 lines
1.9 KiB
C
#ifdef __KERNEL__
|
|
#ifndef _ASM_POWERPC_IRQ_H
|
|
#define _ASM_POWERPC_IRQ_H
|
|
|
|
/*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/irqdomain.h>
|
|
#include <linux/threads.h>
|
|
#include <linux/list.h>
|
|
#include <linux/radix-tree.h>
|
|
|
|
#include <asm/types.h>
|
|
#include <linux/atomic.h>
|
|
|
|
|
|
extern atomic_t ppc_n_lost_interrupts;
|
|
|
|
/* This number is used when no interrupt has been assigned */
|
|
#define NO_IRQ (0)
|
|
|
|
/* Total number of virq in the platform */
|
|
#define NR_IRQS CONFIG_NR_IRQS
|
|
|
|
/* Same thing, used by the generic IRQ code */
|
|
#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
|
|
|
|
extern irq_hw_number_t virq_to_hw(unsigned int virq);
|
|
|
|
/**
|
|
* irq_early_init - Init irq remapping subsystem
|
|
*/
|
|
extern void irq_early_init(void);
|
|
|
|
static __inline__ int irq_canonicalize(int irq)
|
|
{
|
|
return irq;
|
|
}
|
|
|
|
extern int distribute_irqs;
|
|
|
|
struct irqaction;
|
|
struct pt_regs;
|
|
|
|
#define __ARCH_HAS_DO_SOFTIRQ
|
|
|
|
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
|
/*
|
|
* Per-cpu stacks for handling critical, debug and machine check
|
|
* level interrupts.
|
|
*/
|
|
extern struct thread_info *critirq_ctx[NR_CPUS];
|
|
extern struct thread_info *dbgirq_ctx[NR_CPUS];
|
|
extern struct thread_info *mcheckirq_ctx[NR_CPUS];
|
|
extern void exc_lvl_ctx_init(void);
|
|
#else
|
|
#define exc_lvl_ctx_init()
|
|
#endif
|
|
|
|
/*
|
|
* Per-cpu stacks for handling hard and soft interrupts.
|
|
*/
|
|
extern struct thread_info *hardirq_ctx[NR_CPUS];
|
|
extern struct thread_info *softirq_ctx[NR_CPUS];
|
|
|
|
extern void irq_ctx_init(void);
|
|
extern void call_do_softirq(struct thread_info *tp);
|
|
extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
|
|
extern void do_IRQ(struct pt_regs *regs);
|
|
extern void __do_irq(struct pt_regs *regs);
|
|
|
|
int irq_choose_cpu(const struct cpumask *mask);
|
|
|
|
#endif /* _ASM_IRQ_H */
|
|
#endif /* __KERNEL__ */
|