mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
genirq: Separate activation and startup
Activation of an interrupt and startup are currently a combo functionlity. That works so far, but upcoming changes require a strict separation because the activation can fail in future. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Juergen Gross <jgross@suse.com> Tested-by: Yu Chen <yu.c.chen@intel.com> Acked-by: Juergen Gross <jgross@suse.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Alok Kataria <akataria@vmware.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Rui Zhang <rui.zhang@intel.com> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Len Brown <lenb@kernel.org> Link: https://lkml.kernel.org/r/20170913213152.754334077@linutronix.de
This commit is contained in:
parent
239306fee8
commit
c942cee46b
@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
|
|||||||
if (desc->irq_data.chip->irq_set_type)
|
if (desc->irq_data.chip->irq_set_type)
|
||||||
desc->irq_data.chip->irq_set_type(&desc->irq_data,
|
desc->irq_data.chip->irq_set_type(&desc->irq_data,
|
||||||
IRQ_TYPE_PROBE);
|
IRQ_TYPE_PROBE);
|
||||||
irq_startup(desc, IRQ_NORESEND, IRQ_START_FORCE);
|
irq_activate_and_startup(desc, IRQ_NORESEND);
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
@ -207,20 +207,19 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
|
|||||||
* Catch code which fiddles with enable_irq() on a managed
|
* Catch code which fiddles with enable_irq() on a managed
|
||||||
* and potentially shutdown IRQ. Chained interrupt
|
* and potentially shutdown IRQ. Chained interrupt
|
||||||
* installment or irq auto probing should not happen on
|
* installment or irq auto probing should not happen on
|
||||||
* managed irqs either. Emit a warning, break the affinity
|
* managed irqs either.
|
||||||
* and start it up as a normal interrupt.
|
|
||||||
*/
|
*/
|
||||||
if (WARN_ON_ONCE(force))
|
if (WARN_ON_ONCE(force))
|
||||||
return IRQ_STARTUP_NORMAL;
|
return IRQ_STARTUP_ABORT;
|
||||||
/*
|
/*
|
||||||
* The interrupt was requested, but there is no online CPU
|
* The interrupt was requested, but there is no online CPU
|
||||||
* in it's affinity mask. Put it into managed shutdown
|
* in it's affinity mask. Put it into managed shutdown
|
||||||
* state and let the cpu hotplug mechanism start it up once
|
* state and let the cpu hotplug mechanism start it up once
|
||||||
* a CPU in the mask becomes available.
|
* a CPU in the mask becomes available.
|
||||||
*/
|
*/
|
||||||
irqd_set_managed_shutdown(d);
|
|
||||||
return IRQ_STARTUP_ABORT;
|
return IRQ_STARTUP_ABORT;
|
||||||
}
|
}
|
||||||
|
irq_domain_activate_irq(d);
|
||||||
return IRQ_STARTUP_MANAGED;
|
return IRQ_STARTUP_MANAGED;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -236,7 +235,9 @@ static int __irq_startup(struct irq_desc *desc)
|
|||||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
irq_domain_activate_irq(d);
|
/* Warn if this interrupt is not activated but try nevertheless */
|
||||||
|
WARN_ON_ONCE(!irqd_is_activated(d));
|
||||||
|
|
||||||
if (d->chip->irq_startup) {
|
if (d->chip->irq_startup) {
|
||||||
ret = d->chip->irq_startup(d);
|
ret = d->chip->irq_startup(d);
|
||||||
irq_state_clr_disabled(desc);
|
irq_state_clr_disabled(desc);
|
||||||
@ -269,6 +270,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
|
|||||||
irq_set_affinity_locked(d, aff, false);
|
irq_set_affinity_locked(d, aff, false);
|
||||||
break;
|
break;
|
||||||
case IRQ_STARTUP_ABORT:
|
case IRQ_STARTUP_ABORT:
|
||||||
|
irqd_set_managed_shutdown(d);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -278,6 +280,22 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int irq_activate(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||||
|
|
||||||
|
if (!irqd_affinity_is_managed(d))
|
||||||
|
irq_domain_activate_irq(d);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void irq_activate_and_startup(struct irq_desc *desc, bool resend)
|
||||||
|
{
|
||||||
|
if (WARN_ON(irq_activate(desc)))
|
||||||
|
return;
|
||||||
|
irq_startup(desc, resend, IRQ_START_FORCE);
|
||||||
|
}
|
||||||
|
|
||||||
static void __irq_disable(struct irq_desc *desc, bool mask);
|
static void __irq_disable(struct irq_desc *desc, bool mask);
|
||||||
|
|
||||||
void irq_shutdown(struct irq_desc *desc)
|
void irq_shutdown(struct irq_desc *desc)
|
||||||
@ -953,7 +971,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
|
|||||||
irq_settings_set_norequest(desc);
|
irq_settings_set_norequest(desc);
|
||||||
irq_settings_set_nothread(desc);
|
irq_settings_set_nothread(desc);
|
||||||
desc->action = &chained_action;
|
desc->action = &chained_action;
|
||||||
irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
|
irq_activate_and_startup(desc, IRQ_RESEND);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,6 +74,8 @@ extern void __enable_irq(struct irq_desc *desc);
|
|||||||
#define IRQ_START_FORCE true
|
#define IRQ_START_FORCE true
|
||||||
#define IRQ_START_COND false
|
#define IRQ_START_COND false
|
||||||
|
|
||||||
|
extern int irq_activate(struct irq_desc *desc);
|
||||||
|
extern void irq_activate_and_startup(struct irq_desc *desc, bool resend);
|
||||||
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
||||||
|
|
||||||
extern void irq_shutdown(struct irq_desc *desc);
|
extern void irq_shutdown(struct irq_desc *desc);
|
||||||
|
@ -519,7 +519,7 @@ void __enable_irq(struct irq_desc *desc)
|
|||||||
* time. If it was already started up, then irq_startup()
|
* time. If it was already started up, then irq_startup()
|
||||||
* will invoke irq_enable() under the hood.
|
* will invoke irq_enable() under the hood.
|
||||||
*/
|
*/
|
||||||
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
|
irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -1325,6 +1325,21 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Activate the interrupt. That activation must happen
|
||||||
|
* independently of IRQ_NOAUTOEN. request_irq() can fail
|
||||||
|
* and the callers are supposed to handle
|
||||||
|
* that. enable_irq() of an interrupt requested with
|
||||||
|
* IRQ_NOAUTOEN is not supposed to fail. The activation
|
||||||
|
* keeps it in shutdown mode, it merily associates
|
||||||
|
* resources if necessary and if that's not possible it
|
||||||
|
* fails. Interrupts which are in managed shutdown mode
|
||||||
|
* will simply ignore that activation request.
|
||||||
|
*/
|
||||||
|
ret = irq_activate(desc);
|
||||||
|
if (ret)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
|
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
|
||||||
IRQS_ONESHOT | IRQS_WAITING);
|
IRQS_ONESHOT | IRQS_WAITING);
|
||||||
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||||
|
Loading…
Reference in New Issue
Block a user