2006-03-25 11:07:36 +00:00
|
|
|
|
2006-04-11 05:54:04 +00:00
|
|
|
#include <linux/irq.h>
|
2009-04-28 00:59:53 +00:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
|
|
|
|
#include "internals.h"
|
2006-03-25 11:07:36 +00:00
|
|
|
|
2011-02-04 17:46:16 +00:00
|
|
|
void irq_move_masked_irq(struct irq_data *idata)
|
2006-03-25 11:07:36 +00:00
|
|
|
{
|
2011-02-04 17:46:16 +00:00
|
|
|
struct irq_desc *desc = irq_data_to_desc(idata);
|
2015-06-01 08:05:11 +00:00
|
|
|
struct irq_chip *chip = desc->irq_data.chip;
|
2006-03-25 11:07:36 +00:00
|
|
|
|
2011-02-05 14:20:04 +00:00
|
|
|
if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
|
2006-03-25 11:07:36 +00:00
|
|
|
return;
|
|
|
|
|
2015-06-20 10:05:40 +00:00
|
|
|
irqd_clr_move_pending(&desc->irq_data);
|
|
|
|
|
2006-03-25 11:07:37 +00:00
|
|
|
/*
|
|
|
|
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
|
|
|
|
*/
|
2015-06-20 10:05:40 +00:00
|
|
|
if (irqd_is_per_cpu(&desc->irq_data)) {
|
2006-03-25 11:07:37 +00:00
|
|
|
WARN_ON(1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-01-11 05:58:08 +00:00
|
|
|
if (unlikely(cpumask_empty(desc->pending_mask)))
|
2006-03-25 11:07:36 +00:00
|
|
|
return;
|
|
|
|
|
2010-09-27 12:45:41 +00:00
|
|
|
if (!chip->irq_set_affinity)
|
2006-03-25 11:07:36 +00:00
|
|
|
return;
|
|
|
|
|
2009-11-17 15:46:45 +00:00
|
|
|
assert_raw_spin_locked(&desc->lock);
|
2006-03-25 11:07:37 +00:00
|
|
|
|
2006-03-25 11:07:36 +00:00
|
|
|
/*
|
|
|
|
* If there was a valid mask to work with, please
|
|
|
|
* do the disable, re-program, enable sequence.
|
|
|
|
* This is *not* particularly important for level triggered
|
|
|
|
* but in a edge trigger case, we might be setting rte
|
2011-03-31 01:57:33 +00:00
|
|
|
* when an active trigger is coming in. This could
|
2006-03-25 11:07:36 +00:00
|
|
|
* cause some ioapics to mal-function.
|
|
|
|
* Being paranoid i guess!
|
2006-10-04 09:16:29 +00:00
|
|
|
*
|
|
|
|
* For correct operation this depends on the caller
|
|
|
|
* masking the irqs.
|
2006-03-25 11:07:36 +00:00
|
|
|
*/
|
2012-03-30 15:11:34 +00:00
|
|
|
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
|
|
|
|
irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
|
2009-04-28 00:59:53 +00:00
|
|
|
|
2009-01-11 05:58:08 +00:00
|
|
|
cpumask_clear(desc->pending_mask);
|
2006-03-25 11:07:36 +00:00
|
|
|
}
|
2006-10-04 09:16:29 +00:00
|
|
|
|
2011-02-04 17:46:16 +00:00
|
|
|
void irq_move_irq(struct irq_data *idata)
|
2006-10-04 09:16:29 +00:00
|
|
|
{
|
2011-01-28 07:47:15 +00:00
|
|
|
bool masked;
|
2006-10-04 09:16:29 +00:00
|
|
|
|
2015-06-01 08:05:11 +00:00
|
|
|
/*
|
|
|
|
* Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
|
|
|
|
* and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
|
|
|
|
* disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
|
|
|
|
*/
|
|
|
|
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
|
|
|
|
|
2011-02-04 17:46:16 +00:00
|
|
|
if (likely(!irqd_is_setaffinity_pending(idata)))
|
2006-10-04 09:16:29 +00:00
|
|
|
return;
|
|
|
|
|
2011-03-28 12:10:52 +00:00
|
|
|
if (unlikely(irqd_irq_disabled(idata)))
|
2007-02-23 11:46:20 +00:00
|
|
|
return;
|
2006-10-04 09:16:29 +00:00
|
|
|
|
2011-01-28 07:47:15 +00:00
|
|
|
/*
|
|
|
|
* Be careful vs. already masked interrupts. If this is a
|
|
|
|
* threaded interrupt with ONESHOT set, we can end up with an
|
|
|
|
* interrupt storm.
|
|
|
|
*/
|
2011-03-28 12:10:52 +00:00
|
|
|
masked = irqd_irq_masked(idata);
|
2011-01-28 07:47:15 +00:00
|
|
|
if (!masked)
|
2011-02-04 17:46:16 +00:00
|
|
|
idata->chip->irq_mask(idata);
|
|
|
|
irq_move_masked_irq(idata);
|
2011-01-28 07:47:15 +00:00
|
|
|
if (!masked)
|
2011-02-04 17:46:16 +00:00
|
|
|
idata->chip->irq_unmask(idata);
|
|
|
|
}
|