2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
|
|
|
|
*
|
|
|
|
* This file contains the lowest level x86-specific interrupt
|
|
|
|
* entry, irq-stacks and irq statistics code. All the remaining
|
|
|
|
* irq logic is done by the generic kernel/irq/ code and
|
|
|
|
* by the x86-specific irq controller code. (e.g. i8259.c and
|
|
|
|
* io_apic.c.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
2005-06-25 21:54:50 +00:00
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/delay.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-02-16 09:27:58 +00:00
|
|
|
#include <asm/apic.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
|
2007-07-19 08:48:13 +00:00
|
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
2005-04-16 22:20:36 +00:00
|
|
|
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
|
|
|
|
2007-05-02 17:27:16 +00:00
|
|
|
DEFINE_PER_CPU(struct pt_regs *, irq_regs);
|
|
|
|
EXPORT_PER_CPU_SYMBOL(irq_regs);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* 'what should we do if we get a hw irq event on an illegal vector'.
|
|
|
|
* each architecture has to answer this themselves.
|
|
|
|
*/
|
|
|
|
void ack_bad_irq(unsigned int irq)
|
|
|
|
{
|
2007-02-16 09:27:58 +00:00
|
|
|
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
/*
|
|
|
|
* Currently unexpected vectors happen only on SMP and APIC.
|
|
|
|
* We _must_ ack these because every local APIC has only N
|
|
|
|
* irq slots per priority level, and a 'hanging, unacked' IRQ
|
|
|
|
* holds up an irq slot - in excessive cases (when multiple
|
|
|
|
* unexpected vectors occur) that might lock up the APIC
|
|
|
|
* completely.
|
|
|
|
* But only ack when the APIC is enabled -AK
|
|
|
|
*/
|
|
|
|
if (cpu_has_apic)
|
|
|
|
ack_APIC_irq();
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2007-02-16 09:27:58 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-05 13:58:15 +00:00
|
|
|
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
|
|
|
/* Debugging check for stack overflow: is there less than 1KB free? */
|
|
|
|
static int check_stack_overflow(void)
|
|
|
|
{
|
|
|
|
long sp;
|
|
|
|
|
|
|
|
__asm__ __volatile__("andl %%esp,%0" :
|
|
|
|
"=r" (sp) : "0" (THREAD_SIZE - 1));
|
|
|
|
|
|
|
|
return sp < (sizeof(struct thread_info) + STACK_WARN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print_stack_overflow(void)
|
|
|
|
{
|
|
|
|
printk(KERN_WARNING "low stack detected by irq handler\n");
|
|
|
|
dump_stack();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
static inline int check_stack_overflow(void) { return 0; }
|
|
|
|
static inline void print_stack_overflow(void) { }
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_4KSTACKS
|
|
|
|
/*
|
|
|
|
* per-CPU IRQ handling contexts (thread information and stack)
|
|
|
|
*/
|
|
|
|
union irq_ctx {
|
|
|
|
struct thread_info tinfo;
|
|
|
|
u32 stack[THREAD_SIZE/sizeof(u32)];
|
|
|
|
};
|
|
|
|
|
2006-06-23 09:05:30 +00:00
|
|
|
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
|
|
|
|
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-05 16:13:50 +00:00
|
|
|
static char softirq_stack[NR_CPUS * THREAD_SIZE]
|
|
|
|
__attribute__((__section__(".bss.page_aligned")));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-05 16:13:50 +00:00
|
|
|
static char hardirq_stack[NR_CPUS * THREAD_SIZE]
|
|
|
|
__attribute__((__section__(".bss.page_aligned")));
|
2006-06-28 11:26:43 +00:00
|
|
|
|
2008-05-05 16:13:50 +00:00
|
|
|
static void call_on_stack(void *func, void *stack)
|
2008-05-05 10:36:38 +00:00
|
|
|
{
|
2008-05-05 16:13:50 +00:00
|
|
|
asm volatile("xchgl %%ebx,%%esp \n"
|
|
|
|
"call *%%edi \n"
|
|
|
|
"movl %%ebx,%%esp \n"
|
|
|
|
: "=b" (stack)
|
|
|
|
: "0" (stack),
|
|
|
|
"D"(func)
|
|
|
|
: "memory", "cc", "edx", "ecx", "eax");
|
2008-05-05 10:36:38 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-05 13:58:15 +00:00
|
|
|
static inline int
|
|
|
|
execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
|
|
|
|
{
|
|
|
|
union irq_ctx *curctx, *irqctx;
|
2008-05-05 16:13:50 +00:00
|
|
|
u32 *isp, arg1, arg2;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
curctx = (union irq_ctx *) current_thread_info();
|
|
|
|
irqctx = hardirq_ctx[smp_processor_id()];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this is where we switch to the IRQ stack. However, if we are
|
|
|
|
* already using the IRQ stack (because we interrupted a hardirq
|
|
|
|
* handler) we can't do that and just have to keep using the
|
|
|
|
* current stack (which is the irq stack already after all)
|
|
|
|
*/
|
2008-05-05 13:58:15 +00:00
|
|
|
if (unlikely(curctx == irqctx))
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-05 13:58:15 +00:00
|
|
|
/* build the stack frame on the IRQ stack */
|
|
|
|
isp = (u32 *) ((char*)irqctx + sizeof(*irqctx));
|
|
|
|
irqctx->tinfo.task = curctx->tinfo.task;
|
|
|
|
irqctx->tinfo.previous_esp = current_stack_pointer;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-05 13:58:15 +00:00
|
|
|
/*
|
|
|
|
* Copy the softirq bits in preempt_count so that the
|
|
|
|
* softirq checks work in the hardirq context.
|
|
|
|
*/
|
|
|
|
irqctx->tinfo.preempt_count =
|
|
|
|
(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
|
|
|
|
(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
|
|
|
|
|
|
|
|
if (unlikely(overflow))
|
2008-05-05 16:13:50 +00:00
|
|
|
call_on_stack(print_stack_overflow, isp);
|
|
|
|
|
|
|
|
asm volatile("xchgl %%ebx,%%esp \n"
|
|
|
|
"call *%%edi \n"
|
|
|
|
"movl %%ebx,%%esp \n"
|
|
|
|
: "=a" (arg1), "=d" (arg2), "=b" (isp)
|
|
|
|
: "0" (irq), "1" (desc), "2" (isp),
|
|
|
|
"D" (desc->handle_irq)
|
|
|
|
: "memory", "cc", "ecx");
|
2005-04-16 22:20:36 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate per-cpu stacks for hardirq and for softirq processing
|
|
|
|
*/
|
2008-05-05 16:13:50 +00:00
|
|
|
void __cpuinit irq_ctx_init(int cpu)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
union irq_ctx *irqctx;
|
|
|
|
|
|
|
|
if (hardirq_ctx[cpu])
|
|
|
|
return;
|
|
|
|
|
|
|
|
irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
|
2008-05-05 16:13:50 +00:00
|
|
|
irqctx->tinfo.task = NULL;
|
|
|
|
irqctx->tinfo.exec_domain = NULL;
|
|
|
|
irqctx->tinfo.cpu = cpu;
|
|
|
|
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
|
|
|
|
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
hardirq_ctx[cpu] = irqctx;
|
|
|
|
|
|
|
|
irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
|
2008-05-05 16:13:50 +00:00
|
|
|
irqctx->tinfo.task = NULL;
|
|
|
|
irqctx->tinfo.exec_domain = NULL;
|
|
|
|
irqctx->tinfo.cpu = cpu;
|
|
|
|
irqctx->tinfo.preempt_count = 0;
|
|
|
|
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
softirq_ctx[cpu] = irqctx;
|
|
|
|
|
2008-05-05 16:13:50 +00:00
|
|
|
printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
|
|
|
|
cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-06-25 21:54:56 +00:00
|
|
|
void irq_ctx_exit(int cpu)
|
|
|
|
{
|
|
|
|
hardirq_ctx[cpu] = NULL;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
asmlinkage void do_softirq(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct thread_info *curctx;
|
|
|
|
union irq_ctx *irqctx;
|
|
|
|
u32 *isp;
|
|
|
|
|
|
|
|
if (in_interrupt())
|
|
|
|
return;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
if (local_softirq_pending()) {
|
|
|
|
curctx = current_thread_info();
|
|
|
|
irqctx = softirq_ctx[smp_processor_id()];
|
|
|
|
irqctx->tinfo.task = curctx->task;
|
|
|
|
irqctx->tinfo.previous_esp = current_stack_pointer;
|
|
|
|
|
|
|
|
/* build the stack frame on the softirq stack */
|
|
|
|
isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
|
|
|
|
|
2008-05-05 16:13:50 +00:00
|
|
|
call_on_stack(__do_softirq, isp);
|
2006-07-03 07:24:43 +00:00
|
|
|
/*
|
|
|
|
* Shouldnt happen, we returned above if in_interrupt():
|
2008-05-05 16:13:50 +00:00
|
|
|
*/
|
2006-07-03 07:24:43 +00:00
|
|
|
WARN_ON_ONCE(softirq_count());
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
2008-05-05 16:13:50 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
static inline int
|
|
|
|
execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
2008-05-05 16:13:50 +00:00
|
|
|
/*
|
|
|
|
* do_IRQ handles all normal device IRQ's (the special
|
|
|
|
* SMP cross-CPU interrupts have their own specific
|
|
|
|
* handlers).
|
|
|
|
*/
|
|
|
|
unsigned int do_IRQ(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct pt_regs *old_regs;
|
|
|
|
/* high bit used in ret_from_ code */
|
|
|
|
int overflow, irq = ~regs->orig_ax;
|
|
|
|
struct irq_desc *desc = irq_desc + irq;
|
|
|
|
|
|
|
|
if (unlikely((unsigned)irq >= NR_IRQS)) {
|
|
|
|
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
|
|
|
|
__func__, irq);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
old_regs = set_irq_regs(regs);
|
|
|
|
irq_enter();
|
|
|
|
|
|
|
|
overflow = check_stack_overflow();
|
|
|
|
|
|
|
|
if (!execute_on_irq_stack(overflow, desc, irq)) {
|
|
|
|
if (unlikely(overflow))
|
|
|
|
print_stack_overflow();
|
|
|
|
desc->handle_irq(irq, desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_exit();
|
|
|
|
set_irq_regs(old_regs);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Interrupt statistics:
|
|
|
|
*/
|
|
|
|
|
|
|
|
atomic_t irq_err_count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* /proc/interrupts printing:
|
|
|
|
*/
|
|
|
|
|
|
|
|
int show_interrupts(struct seq_file *p, void *v)
|
|
|
|
{
|
|
|
|
int i = *(loff_t *) v, j;
|
|
|
|
struct irqaction * action;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (i == 0) {
|
|
|
|
seq_printf(p, " ");
|
2005-10-30 22:59:32 +00:00
|
|
|
for_each_online_cpu(j)
|
2006-06-26 11:59:23 +00:00
|
|
|
seq_printf(p, "CPU%-8d",j);
|
2005-04-16 22:20:36 +00:00
|
|
|
seq_putc(p, '\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i < NR_IRQS) {
|
2007-10-17 16:04:40 +00:00
|
|
|
unsigned any_count = 0;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
2007-10-17 16:04:40 +00:00
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
any_count = kstat_irqs(i);
|
|
|
|
#else
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
any_count |= kstat_cpu(j).irqs[i];
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
action = irq_desc[i].action;
|
2007-10-17 16:04:40 +00:00
|
|
|
if (!action && !any_count)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto skip;
|
|
|
|
seq_printf(p, "%3d: ",i);
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
|
|
|
#else
|
2005-10-30 22:59:32 +00:00
|
|
|
for_each_online_cpu(j)
|
2005-06-25 21:54:50 +00:00
|
|
|
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2006-10-04 09:16:26 +00:00
|
|
|
seq_printf(p, " %8s", irq_desc[i].chip->name);
|
2006-10-17 07:10:03 +00:00
|
|
|
seq_printf(p, "-%-8s", irq_desc[i].name);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-10-17 16:04:40 +00:00
|
|
|
if (action) {
|
|
|
|
seq_printf(p, " %s", action->name);
|
|
|
|
while ((action = action->next) != NULL)
|
|
|
|
seq_printf(p, ", %s", action->name);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
seq_putc(p, '\n');
|
|
|
|
skip:
|
|
|
|
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
|
|
|
} else if (i == NR_IRQS) {
|
|
|
|
seq_printf(p, "NMI: ");
|
2005-10-30 22:59:32 +00:00
|
|
|
for_each_online_cpu(j)
|
2005-06-25 21:54:50 +00:00
|
|
|
seq_printf(p, "%10u ", nmi_count(j));
|
2007-10-17 16:04:40 +00:00
|
|
|
seq_printf(p, " Non-maskable interrupts\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
seq_printf(p, "LOC: ");
|
2005-10-30 22:59:32 +00:00
|
|
|
for_each_online_cpu(j)
|
2005-06-25 21:54:50 +00:00
|
|
|
seq_printf(p, "%10u ",
|
|
|
|
per_cpu(irq_stat,j).apic_timer_irqs);
|
2007-10-17 16:04:40 +00:00
|
|
|
seq_printf(p, " Local timer interrupts\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
2007-10-17 16:04:40 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
seq_printf(p, "RES: ");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ",
|
|
|
|
per_cpu(irq_stat,j).irq_resched_count);
|
|
|
|
seq_printf(p, " Rescheduling interrupts\n");
|
|
|
|
seq_printf(p, "CAL: ");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ",
|
|
|
|
per_cpu(irq_stat,j).irq_call_count);
|
|
|
|
seq_printf(p, " function call interrupts\n");
|
|
|
|
seq_printf(p, "TLB: ");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ",
|
|
|
|
per_cpu(irq_stat,j).irq_tlb_count);
|
|
|
|
seq_printf(p, " TLB shootdowns\n");
|
|
|
|
#endif
|
2008-05-12 13:44:41 +00:00
|
|
|
#ifdef CONFIG_X86_MCE
|
2007-10-17 16:04:40 +00:00
|
|
|
seq_printf(p, "TRM: ");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ",
|
|
|
|
per_cpu(irq_stat,j).irq_thermal_count);
|
|
|
|
seq_printf(p, " Thermal event interrupts\n");
|
2008-05-12 13:44:41 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
2007-10-17 16:04:40 +00:00
|
|
|
seq_printf(p, "SPU: ");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ",
|
|
|
|
per_cpu(irq_stat,j).irq_spurious_count);
|
|
|
|
seq_printf(p, " Spurious interrupts\n");
|
2008-05-12 13:44:41 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
|
|
|
#if defined(CONFIG_X86_IO_APIC)
|
|
|
|
seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2005-06-25 21:54:50 +00:00
|
|
|
|
2008-05-12 13:44:41 +00:00
|
|
|
/*
|
|
|
|
* /proc/stat helpers
|
|
|
|
*/
|
|
|
|
u64 arch_irq_stat_cpu(unsigned int cpu)
|
|
|
|
{
|
|
|
|
u64 sum = nmi_count(cpu);
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
sum += per_cpu(irq_stat, cpu).apic_timer_irqs;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
sum += per_cpu(irq_stat, cpu).irq_resched_count;
|
|
|
|
sum += per_cpu(irq_stat, cpu).irq_call_count;
|
|
|
|
sum += per_cpu(irq_stat, cpu).irq_tlb_count;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_MCE
|
|
|
|
sum += per_cpu(irq_stat, cpu).irq_thermal_count;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
sum += per_cpu(irq_stat, cpu).irq_spurious_count;
|
|
|
|
#endif
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 arch_irq_stat(void)
|
|
|
|
{
|
|
|
|
u64 sum = atomic_read(&irq_err_count);
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_IO_APIC
|
|
|
|
sum += atomic_read(&irq_mis_count);
|
|
|
|
#endif
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
2005-06-25 21:54:50 +00:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
#include <mach_apic.h>
|
|
|
|
|
|
|
|
void fixup_irqs(cpumask_t map)
|
|
|
|
{
|
|
|
|
unsigned int irq;
|
|
|
|
static int warned;
|
|
|
|
|
|
|
|
for (irq = 0; irq < NR_IRQS; irq++) {
|
|
|
|
cpumask_t mask;
|
|
|
|
if (irq == 2)
|
|
|
|
continue;
|
|
|
|
|
2006-06-29 09:24:38 +00:00
|
|
|
cpus_and(mask, irq_desc[irq].affinity, map);
|
2005-06-25 21:54:50 +00:00
|
|
|
if (any_online_cpu(mask) == NR_CPUS) {
|
|
|
|
printk("Breaking affinity for irq %i\n", irq);
|
|
|
|
mask = map;
|
|
|
|
}
|
[PATCH] genirq: rename desc->handler to desc->chip
This patch-queue improves the generic IRQ layer to be truly generic, by adding
various abstractions and features to it, without impacting existing
functionality.
While the queue can be best described as "fix and improve everything in the
generic IRQ layer that we could think of", and thus it consists of many
smaller features and lots of cleanups, the one feature that stands out most is
the new 'irq chip' abstraction.
The irq-chip abstraction is about describing and coding and IRQ controller
driver by mapping its raw hardware capabilities [and quirks, if needed] in a
straightforward way, without having to think about "IRQ flow"
(level/edge/etc.) type of details.
This stands in contrast with the current 'irq-type' model of genirq
architectures, which 'mixes' raw hardware capabilities with 'flow' details.
The patchset supports both types of irq controller designs at once, and
converts i386 and x86_64 to the new irq-chip design.
As a bonus side-effect of the irq-chip approach, chained interrupt controllers
(master/slave PIC constructs, etc.) are now supported by design as well.
The end result of this patchset intends to be simpler architecture-level code
and more consolidation between architectures.
We reused many bits of code and many concepts from Russell King's ARM IRQ
layer, the merging of which was one of the motivations for this patchset.
This patch:
rename desc->handler to desc->chip.
Originally i did not want to do this, because it's a big patch. But having
both "desc->handler", "desc->handle_irq" and "action->handler" caused a
large degree of confusion and made the code appear alot less clean than it
truly is.
I have also attempted a dual approach as well by introducing a
desc->chip alias - but that just wasnt robust enough and broke
frequently.
So lets get over with this quickly. The conversion was done automatically
via scripts and converts all the code in the kernel.
This renaming patch is the first one amongst the patches, so that the
remaining patches can stay flexible and can be merged and split up
without having some big monolithic patch act as a merge barrier.
[akpm@osdl.org: build fix]
[akpm@osdl.org: another build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-29 09:24:36 +00:00
|
|
|
if (irq_desc[irq].chip->set_affinity)
|
|
|
|
irq_desc[irq].chip->set_affinity(irq, mask);
|
2005-06-25 21:54:50 +00:00
|
|
|
else if (irq_desc[irq].action && !(warned++))
|
|
|
|
printk("Cannot set affinity for irq %i\n", irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
barrier();
|
|
|
|
/* Ingo Molnar says: "after the IO-APIC masks have been redirected
|
|
|
|
[note the nop - the interrupt-enable boundary on x86 is two
|
|
|
|
instructions from sti] - to flush out pending hardirqs and
|
|
|
|
IPIs. After this point nothing is supposed to reach this CPU." */
|
|
|
|
__asm__ __volatile__("sti; nop; cli");
|
|
|
|
barrier();
|
|
|
|
#else
|
|
|
|
/* That doesn't seem sufficient. Give it 1ms. */
|
|
|
|
local_irq_enable();
|
|
|
|
mdelay(1);
|
|
|
|
local_irq_disable();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|