2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* IRQ subsystem internal functions and variables:
|
|
|
|
*/
|
2010-10-01 14:03:45 +00:00
|
|
|
#include <linux/irqdesc.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
extern int noirqdebug;
|
|
|
|
|
2010-10-01 12:44:58 +00:00
|
|
|
#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
|
|
|
|
|
2006-06-29 09:24:51 +00:00
|
|
|
/* Set default functions for irq_chip structures: */
|
|
|
|
extern void irq_chip_set_defaults(struct irq_chip *chip);
|
|
|
|
|
|
|
|
/* Set default handler: */
|
|
|
|
extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
|
|
|
|
|
2008-10-01 21:46:18 +00:00
|
|
|
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|
|
|
unsigned long flags);
|
2009-03-16 21:33:49 +00:00
|
|
|
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
|
|
|
|
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
|
2008-10-01 21:46:18 +00:00
|
|
|
|
2008-12-11 08:15:01 +00:00
|
|
|
extern struct lock_class_key irq_desc_lock_class;
|
2009-04-28 01:00:38 +00:00
|
|
|
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
2009-02-09 00:18:03 +00:00
|
|
|
extern void clear_kstat_irqs(struct irq_desc *desc);
|
2009-11-17 15:46:45 +00:00
|
|
|
extern raw_spinlock_t sparse_irq_lock;
|
2009-01-11 06:24:06 +00:00
|
|
|
|
2010-10-01 14:03:45 +00:00
|
|
|
/* Resending of interrupts :*/
|
|
|
|
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
|
|
|
|
|
2009-01-11 06:24:06 +00:00
|
|
|
#ifdef CONFIG_SPARSE_IRQ
|
2010-02-10 09:20:34 +00:00
|
|
|
void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
|
2009-01-11 06:24:06 +00:00
|
|
|
#endif
|
2008-12-11 08:15:01 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_PROC_FS
|
2008-08-20 03:50:11 +00:00
|
|
|
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
|
2005-04-16 22:20:36 +00:00
|
|
|
extern void register_handler_proc(unsigned int irq, struct irqaction *action);
|
|
|
|
extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
|
|
|
|
#else
|
2008-08-20 03:50:11 +00:00
|
|
|
static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
|
2005-04-16 22:20:36 +00:00
|
|
|
static inline void register_handler_proc(unsigned int irq,
|
|
|
|
struct irqaction *action) { }
|
|
|
|
static inline void unregister_handler_proc(unsigned int irq,
|
|
|
|
struct irqaction *action) { }
|
|
|
|
#endif
|
|
|
|
|
2008-11-07 12:18:30 +00:00
|
|
|
extern int irq_select_affinity_usr(unsigned int irq);
|
|
|
|
|
2009-07-21 09:09:39 +00:00
|
|
|
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
2009-04-28 00:59:53 +00:00
|
|
|
|
2010-10-01 13:17:14 +00:00
|
|
|
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
|
|
|
|
static inline void irq_end(unsigned int irq, struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
if (desc->irq_data.chip && desc->irq_data.chip->end)
|
|
|
|
desc->irq_data.chip->end(irq);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
|
|
|
|
#endif
|
|
|
|
|
2009-08-13 10:17:48 +00:00
|
|
|
/* Inline functions for support of irq chips on slow busses */
|
2010-09-27 12:44:35 +00:00
|
|
|
static inline void chip_bus_lock(struct irq_desc *desc)
|
2009-08-13 10:17:48 +00:00
|
|
|
{
|
2010-09-27 12:44:35 +00:00
|
|
|
if (unlikely(desc->irq_data.chip->irq_bus_lock))
|
|
|
|
desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
|
2009-08-13 10:17:48 +00:00
|
|
|
}
|
|
|
|
|
2010-09-27 12:44:35 +00:00
|
|
|
static inline void chip_bus_sync_unlock(struct irq_desc *desc)
|
2009-08-13 10:17:48 +00:00
|
|
|
{
|
2010-09-27 12:44:35 +00:00
|
|
|
if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
|
|
|
|
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
|
2009-08-13 10:17:48 +00:00
|
|
|
}
|
|
|
|
|
2006-06-29 09:24:58 +00:00
|
|
|
/*
|
|
|
|
* Debugging printout:
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
|
|
|
|
#define P(f) if (desc->status & f) printk("%14s set\n", #f)
|
|
|
|
|
|
|
|
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
|
|
|
|
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
|
|
|
|
printk("->handle_irq(): %p, ", desc->handle_irq);
|
|
|
|
print_symbol("%s\n", (unsigned long)desc->handle_irq);
|
2010-10-01 10:58:38 +00:00
|
|
|
printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
|
|
|
|
print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
|
2006-06-29 09:24:58 +00:00
|
|
|
printk("->action(): %p\n", desc->action);
|
|
|
|
if (desc->action) {
|
|
|
|
printk("->action->handler(): %p, ", desc->action->handler);
|
|
|
|
print_symbol("%s\n", (unsigned long)desc->action->handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
P(IRQ_INPROGRESS);
|
|
|
|
P(IRQ_DISABLED);
|
|
|
|
P(IRQ_PENDING);
|
|
|
|
P(IRQ_REPLAY);
|
|
|
|
P(IRQ_AUTODETECT);
|
|
|
|
P(IRQ_WAITING);
|
|
|
|
P(IRQ_LEVEL);
|
|
|
|
P(IRQ_MASKED);
|
|
|
|
#ifdef CONFIG_IRQ_PER_CPU
|
|
|
|
P(IRQ_PER_CPU);
|
|
|
|
#endif
|
|
|
|
P(IRQ_NOPROBE);
|
|
|
|
P(IRQ_NOREQUEST);
|
|
|
|
P(IRQ_NOAUTOEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef P
|
|
|
|
|
2010-10-01 14:03:45 +00:00
|
|
|
/* Stuff below will be cleaned up after the sparse allocator is done */
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/**
|
|
|
|
* alloc_desc_masks - allocate cpumasks for irq_desc
|
|
|
|
* @desc: pointer to irq_desc struct
|
|
|
|
* @node: node which will be handling the cpumasks
|
|
|
|
* @boot: true if need bootmem
|
|
|
|
*
|
|
|
|
* Allocates affinity and pending_mask cpumask if required.
|
|
|
|
* Returns true if successful (or not required).
|
|
|
|
*/
|
|
|
|
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
|
|
|
bool boot)
|
|
|
|
{
|
|
|
|
gfp_t gfp = GFP_ATOMIC;
|
|
|
|
|
|
|
|
if (boot)
|
|
|
|
gfp = GFP_NOWAIT;
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
|
|
if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
|
|
|
|
free_cpumask_var(desc->irq_data.affinity);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void init_desc_masks(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
cpumask_setall(desc->irq_data.affinity);
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
cpumask_clear(desc->pending_mask);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* init_copy_desc_masks - copy cpumasks for irq_desc
|
|
|
|
* @old_desc: pointer to old irq_desc struct
|
|
|
|
* @new_desc: pointer to new irq_desc struct
|
|
|
|
*
|
|
|
|
* Insures affinity and pending_masks are copied to new irq_desc.
|
|
|
|
* If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
|
|
|
|
* irq_desc struct so the copy is redundant.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
|
|
|
struct irq_desc *new_desc)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
|
|
cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
|
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_desc_masks(struct irq_desc *old_desc,
|
|
|
|
struct irq_desc *new_desc)
|
|
|
|
{
|
|
|
|
free_cpumask_var(old_desc->irq_data.affinity);
|
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
free_cpumask_var(old_desc->pending_mask);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* !CONFIG_SMP */
|
|
|
|
|
|
|
|
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
|
|
|
bool boot)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void init_desc_masks(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
|
|
|
struct irq_desc *new_desc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_desc_masks(struct irq_desc *old_desc,
|
|
|
|
struct irq_desc *new_desc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|