mirror of
https://github.com/torvalds/linux.git
synced 2024-12-22 10:56:40 +00:00
bf18525fd7
By default, IRQ work is run from the tick interrupt (see irq_work_run() in update_process_times()). When we're in full NOHZ mode, restarting the tick requires the use of IRQ work and if the only place we run IRQ work is in the tick interrupt we have an unbreakable cycle. Implement arch_irq_work_raise() via self IPIs to break this cycle and get the tick started again. Note that we implement this via IPIs which are only available on SMP builds. This shouldn't be a problem because full NOHZ is only supported on SMP builds anyway. Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Reviewed-by: Kevin Hilman <khilman@linaro.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
33 lines
709 B
C
33 lines
709 B
C
#ifndef __ASM_HARDIRQ_H
|
|
#define __ASM_HARDIRQ_H
|
|
|
|
#include <linux/cache.h>
|
|
#include <linux/threads.h>
|
|
#include <asm/irq.h>
|
|
|
|
#define NR_IPI 7
|
|
|
|
typedef struct {
|
|
unsigned int __softirq_pending;
|
|
#ifdef CONFIG_SMP
|
|
unsigned int ipi_irqs[NR_IPI];
|
|
#endif
|
|
} ____cacheline_aligned irq_cpustat_t;
|
|
|
|
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
|
|
|
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
|
|
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
|
|
|
|
#ifdef CONFIG_SMP
|
|
u64 smp_irq_stat_cpu(unsigned int cpu);
|
|
#else
|
|
#define smp_irq_stat_cpu(cpu) 0
|
|
#endif
|
|
|
|
#define arch_irq_stat_cpu smp_irq_stat_cpu
|
|
|
|
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
|
|
|
#endif /* __ASM_HARDIRQ_H */
|