forked from Minki/linux
91552ab8ff
Core changes: - Provide IRQF_NO_AUTOEN as a flag for request*_irq() so drivers can be cleaned up which either use a seperate mechanism to prevent auto-enable at request time or have a racy mechanism which disables the interrupt right after request. - Get rid of the last usage of irq_create_identity_mapping() and remove the interface. - An overhaul of tasklet_disable(). Most usage sites of tasklet_disable() are in task context and usually in cleanup, teardown code pathes. tasklet_disable() spinwaits for a tasklet which is currently executed. That's not only a problem for PREEMPT_RT where this can lead to a live lock when the disabling task preempts the softirq thread. It's also problematic in context of virtualization when the vCPU which runs the tasklet is scheduled out and the disabling code has to spin wait until it's scheduled back in. Though there are a few code pathes which invoke tasklet_disable() from non-sleepable context. For these a new disable variant which still spinwaits is provided which allows to switch tasklet_disable() to a sleep wait mechanism. For the atomic use cases this does not solve the live lock issue on PREEMPT_RT. That is mitigated by blocking on the RT specific softirq lock. - The PREEMPT_RT specific implementation of softirq processing and local_bh_disable/enable(). On RT enabled kernels soft interrupt processing happens always in task context and all interrupt handlers, which are not explicitly marked to be invoked in hard interrupt context are forced into task context as well. This allows to protect against softirq processing with a per CPU lock, which in turn allows to make BH disabled regions preemptible. Most of the softirq handling code is still shared. The RT/non-RT specific differences are addressed with a set of inline functions which provide the context specific functionality. The local_bh_disable() / local_bh_enable() mechanism are obviously seperate. - The usual set of small improvements and cleanups Driver changes: - New drivers for Nuvoton WPCM450 and DT 79rc3243x interrupt controllers - Extended functionality for MStar, STM32 and SC7280 irq chips - Enhanced robustness for ARM GICv3/4.1 drivers - The usual set of cleanups and improvements all over the place -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmCGh5wTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoZ+/EACWBpQ/2ZHizEw1bzjaDzJrR8U228xu wNi7nSP92Y07nJ3cCX7a6TJ53mqd0n3RT+DprlsOuqSN0D7Ktr/x44V/aZtm0d3N GkFOlpeGCRnHusLaUTwk7a8289LuoQ7OhSxIB409n1I4nLI96ZK41D1tYonMYl6E nxDiGADASfjaciBWbjwJO/mlwmiW/VRpSTxswx0wzakFfbIx9iKyKv1bCJQZ5JK+ lHmf0jxpDIs1EVK/ElJ9Ky6TMBlEmZyiX7n6rujtwJ1W+Jc/uL/y8pLJvGwooVmI yHTYsLMqzviCbAMhJiB3h1qs3GbCGlM78prgJTnOd0+xEUOCcopCRQlsTXVBq8Nb OS+HNkYmYXRfiSH6lINJsIok8Xis28bAw/qWz2Ho+8wLq0TI8crK38roD1fPndee FNJRhsPPOBkscpIldJ0Cr0X5lclkJFiAhAxORPHoseKvQSm7gBMB7H99xeGRffTn yB3XqeTJMvPNmAHNN4Brv6ey3OjwnEWBgwcnIM2LtbIlRtlmxTYuR+82OPOgEvzk fSrjFFJqu0LEMLEOXS4pYN824PawjV//UAy4IaG8AodmUUCSGHgw1gTVa4sIf72t tXY54HqWfRWRpujhVRgsZETqBUtZkL6yvpoe8f6H7P91W5tAfv3oj4ch9RkhUo+Z b0/u9T0+Fpbg+w== =id4G -----END PGP SIGNATURE----- Merge tag 'irq-core-2021-04-26' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull irq updates from Thomas Gleixner: "The usual updates from the irq departement: Core changes: - Provide IRQF_NO_AUTOEN as a flag for request*_irq() so drivers can be cleaned up which either use a seperate mechanism to prevent auto-enable at request time or have a racy mechanism which disables the interrupt right after request. - Get rid of the last usage of irq_create_identity_mapping() and remove the interface. - An overhaul of tasklet_disable(). Most usage sites of tasklet_disable() are in task context and usually in cleanup, teardown code pathes. tasklet_disable() spinwaits for a tasklet which is currently executed. That's not only a problem for PREEMPT_RT where this can lead to a live lock when the disabling task preempts the softirq thread. It's also problematic in context of virtualization when the vCPU which runs the tasklet is scheduled out and the disabling code has to spin wait until it's scheduled back in. There are a few code pathes which invoke tasklet_disable() from non-sleepable context. For these a new disable variant which still spinwaits is provided which allows to switch tasklet_disable() to a sleep wait mechanism. For the atomic use cases this does not solve the live lock issue on PREEMPT_RT. That is mitigated by blocking on the RT specific softirq lock. - The PREEMPT_RT specific implementation of softirq processing and local_bh_disable/enable(). On RT enabled kernels soft interrupt processing happens always in task context and all interrupt handlers, which are not explicitly marked to be invoked in hard interrupt context are forced into task context as well. This allows to protect against softirq processing with a per CPU lock, which in turn allows to make BH disabled regions preemptible. Most of the softirq handling code is still shared. The RT/non-RT specific differences are addressed with a set of inline functions which provide the context specific functionality. The local_bh_disable() / local_bh_enable() mechanism are obviously seperate. - The usual set of small improvements and cleanups Driver changes: - New drivers for Nuvoton WPCM450 and DT 79rc3243x interrupt controllers - Extended functionality for MStar, STM32 and SC7280 irq chips - Enhanced robustness for ARM GICv3/4.1 drivers - The usual set of cleanups and improvements all over the place" * tag 'irq-core-2021-04-26' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (53 commits) irqchip/xilinx: Expose Kconfig option for Zynq/ZynqMP irqchip/gic-v3: Do not enable irqs when handling spurious interrups dt-bindings: interrupt-controller: Add IDT 79RC3243x Interrupt Controller irqchip: Add support for IDT 79rc3243x interrupt controller irqdomain: Drop references to recusive irqdomain setup irqdomain: Get rid of irq_create_strict_mappings() irqchip/jcore-aic: Kill use of irq_create_strict_mappings() ARM: PXA: Kill use of irq_create_strict_mappings() irqchip/gic-v4.1: Disable vSGI upon (GIC CPUIF < v4.1) detection irqchip/tb10x: Use 'fallthrough' to eliminate a warning genirq: Reduce irqdebug cacheline bouncing kernel: Initialize cpumask before parsing irqchip/wpcm450: Drop COMPILE_TEST irqchip/irq-mst: Support polarity configuration irqchip: Add driver for WPCM450 interrupt controller dt-bindings: interrupt-controller: Add nuvoton, wpcm450-aic dt-bindings: qcom,pdc: Add compatible for sc7280 irqchip/stm32: Add usart instances exti direct event support irqchip/gic-v3: Fix OF_BAD_ADDR error handling irqchip/sifive-plic: Mark two global variables __ro_after_init ...
251 lines
6.2 KiB
C
251 lines
6.2 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
|
|
* Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com>
|
|
*/
|
|
|
|
#include <linux/irq.h>
|
|
#include <linux/irq_sim.h>
|
|
#include <linux/irq_work.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/slab.h>
|
|
|
|
struct irq_sim_work_ctx {
|
|
struct irq_work work;
|
|
int irq_base;
|
|
unsigned int irq_count;
|
|
unsigned long *pending;
|
|
struct irq_domain *domain;
|
|
};
|
|
|
|
struct irq_sim_irq_ctx {
|
|
int irqnum;
|
|
bool enabled;
|
|
struct irq_sim_work_ctx *work_ctx;
|
|
};
|
|
|
|
static void irq_sim_irqmask(struct irq_data *data)
|
|
{
|
|
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
|
|
|
irq_ctx->enabled = false;
|
|
}
|
|
|
|
static void irq_sim_irqunmask(struct irq_data *data)
|
|
{
|
|
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
|
|
|
irq_ctx->enabled = true;
|
|
}
|
|
|
|
static int irq_sim_set_type(struct irq_data *data, unsigned int type)
|
|
{
|
|
/* We only support rising and falling edge trigger types. */
|
|
if (type & ~IRQ_TYPE_EDGE_BOTH)
|
|
return -EINVAL;
|
|
|
|
irqd_set_trigger_type(data, type);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int irq_sim_get_irqchip_state(struct irq_data *data,
|
|
enum irqchip_irq_state which, bool *state)
|
|
{
|
|
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
|
irq_hw_number_t hwirq = irqd_to_hwirq(data);
|
|
|
|
switch (which) {
|
|
case IRQCHIP_STATE_PENDING:
|
|
if (irq_ctx->enabled)
|
|
*state = test_bit(hwirq, irq_ctx->work_ctx->pending);
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int irq_sim_set_irqchip_state(struct irq_data *data,
|
|
enum irqchip_irq_state which, bool state)
|
|
{
|
|
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
|
irq_hw_number_t hwirq = irqd_to_hwirq(data);
|
|
|
|
switch (which) {
|
|
case IRQCHIP_STATE_PENDING:
|
|
if (irq_ctx->enabled) {
|
|
assign_bit(hwirq, irq_ctx->work_ctx->pending, state);
|
|
if (state)
|
|
irq_work_queue(&irq_ctx->work_ctx->work);
|
|
}
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct irq_chip irq_sim_irqchip = {
|
|
.name = "irq_sim",
|
|
.irq_mask = irq_sim_irqmask,
|
|
.irq_unmask = irq_sim_irqunmask,
|
|
.irq_set_type = irq_sim_set_type,
|
|
.irq_get_irqchip_state = irq_sim_get_irqchip_state,
|
|
.irq_set_irqchip_state = irq_sim_set_irqchip_state,
|
|
};
|
|
|
|
static void irq_sim_handle_irq(struct irq_work *work)
|
|
{
|
|
struct irq_sim_work_ctx *work_ctx;
|
|
unsigned int offset = 0;
|
|
int irqnum;
|
|
|
|
work_ctx = container_of(work, struct irq_sim_work_ctx, work);
|
|
|
|
while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) {
|
|
offset = find_next_bit(work_ctx->pending,
|
|
work_ctx->irq_count, offset);
|
|
clear_bit(offset, work_ctx->pending);
|
|
irqnum = irq_find_mapping(work_ctx->domain, offset);
|
|
handle_simple_irq(irq_to_desc(irqnum));
|
|
}
|
|
}
|
|
|
|
static int irq_sim_domain_map(struct irq_domain *domain,
|
|
unsigned int virq, irq_hw_number_t hw)
|
|
{
|
|
struct irq_sim_work_ctx *work_ctx = domain->host_data;
|
|
struct irq_sim_irq_ctx *irq_ctx;
|
|
|
|
irq_ctx = kzalloc(sizeof(*irq_ctx), GFP_KERNEL);
|
|
if (!irq_ctx)
|
|
return -ENOMEM;
|
|
|
|
irq_set_chip(virq, &irq_sim_irqchip);
|
|
irq_set_chip_data(virq, irq_ctx);
|
|
irq_set_handler(virq, handle_simple_irq);
|
|
irq_modify_status(virq, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
|
|
irq_ctx->work_ctx = work_ctx;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void irq_sim_domain_unmap(struct irq_domain *domain, unsigned int virq)
|
|
{
|
|
struct irq_sim_irq_ctx *irq_ctx;
|
|
struct irq_data *irqd;
|
|
|
|
irqd = irq_domain_get_irq_data(domain, virq);
|
|
irq_ctx = irq_data_get_irq_chip_data(irqd);
|
|
|
|
irq_set_handler(virq, NULL);
|
|
irq_domain_reset_irq_data(irqd);
|
|
kfree(irq_ctx);
|
|
}
|
|
|
|
static const struct irq_domain_ops irq_sim_domain_ops = {
|
|
.map = irq_sim_domain_map,
|
|
.unmap = irq_sim_domain_unmap,
|
|
};
|
|
|
|
/**
|
|
* irq_domain_create_sim - Create a new interrupt simulator irq_domain and
|
|
* allocate a range of dummy interrupts.
|
|
*
|
|
* @fwnode: struct fwnode_handle to be associated with this domain.
|
|
* @num_irqs: Number of interrupts to allocate.
|
|
*
|
|
* On success: return a new irq_domain object.
|
|
* On failure: a negative errno wrapped with ERR_PTR().
|
|
*/
|
|
struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
|
|
unsigned int num_irqs)
|
|
{
|
|
struct irq_sim_work_ctx *work_ctx;
|
|
|
|
work_ctx = kmalloc(sizeof(*work_ctx), GFP_KERNEL);
|
|
if (!work_ctx)
|
|
goto err_out;
|
|
|
|
work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL);
|
|
if (!work_ctx->pending)
|
|
goto err_free_work_ctx;
|
|
|
|
work_ctx->domain = irq_domain_create_linear(fwnode, num_irqs,
|
|
&irq_sim_domain_ops,
|
|
work_ctx);
|
|
if (!work_ctx->domain)
|
|
goto err_free_bitmap;
|
|
|
|
work_ctx->irq_count = num_irqs;
|
|
init_irq_work(&work_ctx->work, irq_sim_handle_irq);
|
|
|
|
return work_ctx->domain;
|
|
|
|
err_free_bitmap:
|
|
bitmap_free(work_ctx->pending);
|
|
err_free_work_ctx:
|
|
kfree(work_ctx);
|
|
err_out:
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_domain_create_sim);
|
|
|
|
/**
|
|
* irq_domain_remove_sim - Deinitialize the interrupt simulator domain: free
|
|
* the interrupt descriptors and allocated memory.
|
|
*
|
|
* @domain: The interrupt simulator domain to tear down.
|
|
*/
|
|
void irq_domain_remove_sim(struct irq_domain *domain)
|
|
{
|
|
struct irq_sim_work_ctx *work_ctx = domain->host_data;
|
|
|
|
irq_work_sync(&work_ctx->work);
|
|
bitmap_free(work_ctx->pending);
|
|
kfree(work_ctx);
|
|
|
|
irq_domain_remove(domain);
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_domain_remove_sim);
|
|
|
|
static void devm_irq_domain_remove_sim(void *data)
|
|
{
|
|
struct irq_domain *domain = data;
|
|
|
|
irq_domain_remove_sim(domain);
|
|
}
|
|
|
|
/**
|
|
* devm_irq_domain_create_sim - Create a new interrupt simulator for
|
|
* a managed device.
|
|
*
|
|
* @dev: Device to initialize the simulator object for.
|
|
* @fwnode: struct fwnode_handle to be associated with this domain.
|
|
* @num_irqs: Number of interrupts to allocate
|
|
*
|
|
* On success: return a new irq_domain object.
|
|
* On failure: a negative errno wrapped with ERR_PTR().
|
|
*/
|
|
struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
|
|
struct fwnode_handle *fwnode,
|
|
unsigned int num_irqs)
|
|
{
|
|
struct irq_domain *domain;
|
|
int ret;
|
|
|
|
domain = irq_domain_create_sim(fwnode, num_irqs);
|
|
if (IS_ERR(domain))
|
|
return domain;
|
|
|
|
ret = devm_add_action_or_reset(dev, devm_irq_domain_remove_sim, domain);
|
|
if (ret)
|
|
return ERR_PTR(ret);
|
|
|
|
return domain;
|
|
}
|
|
EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);
|