2018-03-14 21:15:19 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2017-08-14 14:53:16 +00:00
|
|
|
/*
|
2018-04-26 20:07:46 +00:00
|
|
|
* Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
|
2020-05-14 08:39:01 +00:00
|
|
|
* Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com>
|
2017-08-14 14:53:16 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/irq.h>
|
2020-05-14 08:39:01 +00:00
|
|
|
#include <linux/irq_sim.h>
|
|
|
|
#include <linux/irq_work.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
|
|
struct irq_sim_work_ctx {
|
|
|
|
struct irq_work work;
|
|
|
|
int irq_base;
|
|
|
|
unsigned int irq_count;
|
|
|
|
unsigned long *pending;
|
|
|
|
struct irq_domain *domain;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct irq_sim_irq_ctx {
|
|
|
|
int irqnum;
|
|
|
|
bool enabled;
|
|
|
|
struct irq_sim_work_ctx *work_ctx;
|
|
|
|
};
|
2017-08-14 14:53:16 +00:00
|
|
|
|
|
|
|
static void irq_sim_irqmask(struct irq_data *data)
|
|
|
|
{
|
|
|
|
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
irq_ctx->enabled = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void irq_sim_irqunmask(struct irq_data *data)
|
|
|
|
{
|
|
|
|
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
|
|
|
|
|
|
|
irq_ctx->enabled = true;
|
|
|
|
}
|
|
|
|
|
2019-02-18 16:32:03 +00:00
|
|
|
static int irq_sim_set_type(struct irq_data *data, unsigned int type)
|
|
|
|
{
|
|
|
|
/* We only support rising and falling edge trigger types. */
|
|
|
|
if (type & ~IRQ_TYPE_EDGE_BOTH)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
irqd_set_trigger_type(data, type);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-14 08:39:01 +00:00
|
|
|
static int irq_sim_get_irqchip_state(struct irq_data *data,
|
|
|
|
enum irqchip_irq_state which, bool *state)
|
|
|
|
{
|
|
|
|
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
|
|
|
irq_hw_number_t hwirq = irqd_to_hwirq(data);
|
|
|
|
|
|
|
|
switch (which) {
|
|
|
|
case IRQCHIP_STATE_PENDING:
|
|
|
|
if (irq_ctx->enabled)
|
|
|
|
*state = test_bit(hwirq, irq_ctx->work_ctx->pending);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int irq_sim_set_irqchip_state(struct irq_data *data,
|
|
|
|
enum irqchip_irq_state which, bool state)
|
|
|
|
{
|
|
|
|
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
|
|
|
irq_hw_number_t hwirq = irqd_to_hwirq(data);
|
|
|
|
|
|
|
|
switch (which) {
|
|
|
|
case IRQCHIP_STATE_PENDING:
|
|
|
|
if (irq_ctx->enabled) {
|
|
|
|
assign_bit(hwirq, irq_ctx->work_ctx->pending, state);
|
|
|
|
if (state)
|
|
|
|
irq_work_queue(&irq_ctx->work_ctx->work);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-14 14:53:16 +00:00
|
|
|
static struct irq_chip irq_sim_irqchip = {
|
2020-05-14 08:39:01 +00:00
|
|
|
.name = "irq_sim",
|
|
|
|
.irq_mask = irq_sim_irqmask,
|
|
|
|
.irq_unmask = irq_sim_irqunmask,
|
|
|
|
.irq_set_type = irq_sim_set_type,
|
|
|
|
.irq_get_irqchip_state = irq_sim_get_irqchip_state,
|
|
|
|
.irq_set_irqchip_state = irq_sim_set_irqchip_state,
|
2017-08-14 14:53:16 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void irq_sim_handle_irq(struct irq_work *work)
|
|
|
|
{
|
|
|
|
struct irq_sim_work_ctx *work_ctx;
|
2018-11-09 17:21:32 +00:00
|
|
|
unsigned int offset = 0;
|
|
|
|
int irqnum;
|
2017-08-14 14:53:16 +00:00
|
|
|
|
|
|
|
work_ctx = container_of(work, struct irq_sim_work_ctx, work);
|
2018-11-09 17:21:32 +00:00
|
|
|
|
2020-05-14 08:39:01 +00:00
|
|
|
while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) {
|
2018-11-09 17:21:32 +00:00
|
|
|
offset = find_next_bit(work_ctx->pending,
|
2020-05-14 08:39:01 +00:00
|
|
|
work_ctx->irq_count, offset);
|
2018-11-09 17:21:32 +00:00
|
|
|
clear_bit(offset, work_ctx->pending);
|
2020-05-14 08:39:01 +00:00
|
|
|
irqnum = irq_find_mapping(work_ctx->domain, offset);
|
2018-11-09 17:21:32 +00:00
|
|
|
handle_simple_irq(irq_to_desc(irqnum));
|
|
|
|
}
|
2017-08-14 14:53:16 +00:00
|
|
|
}
|
|
|
|
|
2020-05-14 08:39:01 +00:00
|
|
|
static int irq_sim_domain_map(struct irq_domain *domain,
|
|
|
|
unsigned int virq, irq_hw_number_t hw)
|
|
|
|
{
|
|
|
|
struct irq_sim_work_ctx *work_ctx = domain->host_data;
|
|
|
|
struct irq_sim_irq_ctx *irq_ctx;
|
|
|
|
|
|
|
|
irq_ctx = kzalloc(sizeof(*irq_ctx), GFP_KERNEL);
|
|
|
|
if (!irq_ctx)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
irq_set_chip(virq, &irq_sim_irqchip);
|
|
|
|
irq_set_chip_data(virq, irq_ctx);
|
|
|
|
irq_set_handler(virq, handle_simple_irq);
|
|
|
|
irq_modify_status(virq, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
|
|
|
|
irq_ctx->work_ctx = work_ctx;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void irq_sim_domain_unmap(struct irq_domain *domain, unsigned int virq)
|
|
|
|
{
|
|
|
|
struct irq_sim_irq_ctx *irq_ctx;
|
|
|
|
struct irq_data *irqd;
|
|
|
|
|
|
|
|
irqd = irq_domain_get_irq_data(domain, virq);
|
|
|
|
irq_ctx = irq_data_get_irq_chip_data(irqd);
|
|
|
|
|
|
|
|
irq_set_handler(virq, NULL);
|
|
|
|
irq_domain_reset_irq_data(irqd);
|
|
|
|
kfree(irq_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct irq_domain_ops irq_sim_domain_ops = {
|
|
|
|
.map = irq_sim_domain_map,
|
|
|
|
.unmap = irq_sim_domain_unmap,
|
|
|
|
};
|
|
|
|
|
2017-08-14 14:53:16 +00:00
|
|
|
/**
|
2020-05-14 08:39:01 +00:00
|
|
|
* irq_domain_create_sim - Create a new interrupt simulator irq_domain and
|
|
|
|
* allocate a range of dummy interrupts.
|
2017-08-14 14:53:16 +00:00
|
|
|
*
|
2021-03-02 16:14:53 +00:00
|
|
|
* @fwnode: struct fwnode_handle to be associated with this domain.
|
2020-05-14 08:39:01 +00:00
|
|
|
* @num_irqs: Number of interrupts to allocate.
|
2017-08-14 14:53:16 +00:00
|
|
|
*
|
2020-05-14 08:39:01 +00:00
|
|
|
* On success: return a new irq_domain object.
|
|
|
|
* On failure: a negative errno wrapped with ERR_PTR().
|
2017-08-14 14:53:16 +00:00
|
|
|
*/
|
2020-05-14 08:39:01 +00:00
|
|
|
struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
|
|
|
|
unsigned int num_irqs)
|
2017-08-14 14:53:16 +00:00
|
|
|
{
|
2020-05-14 08:39:01 +00:00
|
|
|
struct irq_sim_work_ctx *work_ctx;
|
2017-08-14 14:53:16 +00:00
|
|
|
|
2020-05-14 08:39:01 +00:00
|
|
|
work_ctx = kmalloc(sizeof(*work_ctx), GFP_KERNEL);
|
|
|
|
if (!work_ctx)
|
|
|
|
goto err_out;
|
2017-08-14 14:53:16 +00:00
|
|
|
|
2020-05-14 08:39:01 +00:00
|
|
|
work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL);
|
|
|
|
if (!work_ctx->pending)
|
|
|
|
goto err_free_work_ctx;
|
2017-08-14 14:53:16 +00:00
|
|
|
|
2020-05-14 08:39:01 +00:00
|
|
|
work_ctx->domain = irq_domain_create_linear(fwnode, num_irqs,
|
|
|
|
&irq_sim_domain_ops,
|
|
|
|
work_ctx);
|
|
|
|
if (!work_ctx->domain)
|
|
|
|
goto err_free_bitmap;
|
2018-11-09 17:21:32 +00:00
|
|
|
|
2020-05-14 08:39:01 +00:00
|
|
|
work_ctx->irq_count = num_irqs;
|
2022-05-11 11:07:50 +00:00
|
|
|
work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq);
|
2017-08-14 14:53:16 +00:00
|
|
|
|
2020-05-14 08:39:01 +00:00
|
|
|
return work_ctx->domain;
|
2017-08-14 14:53:16 +00:00
|
|
|
|
2020-05-14 08:39:01 +00:00
|
|
|
err_free_bitmap:
|
|
|
|
bitmap_free(work_ctx->pending);
|
|
|
|
err_free_work_ctx:
|
|
|
|
kfree(work_ctx);
|
|
|
|
err_out:
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2017-08-14 14:53:16 +00:00
|
|
|
}
|
2020-05-14 08:39:01 +00:00
|
|
|
EXPORT_SYMBOL_GPL(irq_domain_create_sim);
|
2017-08-14 14:53:16 +00:00
|
|
|
|
|
|
|
/**
|
2020-05-14 08:39:01 +00:00
|
|
|
* irq_domain_remove_sim - Deinitialize the interrupt simulator domain: free
|
|
|
|
* the interrupt descriptors and allocated memory.
|
2017-08-14 14:53:16 +00:00
|
|
|
*
|
2020-05-14 08:39:01 +00:00
|
|
|
* @domain: The interrupt simulator domain to tear down.
|
2017-08-14 14:53:16 +00:00
|
|
|
*/
|
2020-05-14 08:39:01 +00:00
|
|
|
void irq_domain_remove_sim(struct irq_domain *domain)
|
2017-08-14 14:53:16 +00:00
|
|
|
{
|
2020-05-14 08:39:01 +00:00
|
|
|
struct irq_sim_work_ctx *work_ctx = domain->host_data;
|
|
|
|
|
|
|
|
irq_work_sync(&work_ctx->work);
|
|
|
|
bitmap_free(work_ctx->pending);
|
|
|
|
kfree(work_ctx);
|
|
|
|
|
|
|
|
irq_domain_remove(domain);
|
2017-08-14 14:53:16 +00:00
|
|
|
}
|
2020-05-14 08:39:01 +00:00
|
|
|
EXPORT_SYMBOL_GPL(irq_domain_remove_sim);
|
2017-08-14 14:53:16 +00:00
|
|
|
|
2021-03-01 14:26:59 +00:00
|
|
|
static void devm_irq_domain_remove_sim(void *data)
|
2017-08-14 14:53:17 +00:00
|
|
|
{
|
2021-03-01 14:26:59 +00:00
|
|
|
struct irq_domain *domain = data;
|
2017-08-14 14:53:17 +00:00
|
|
|
|
2021-03-01 14:26:59 +00:00
|
|
|
irq_domain_remove_sim(domain);
|
2017-08-14 14:53:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2020-05-14 08:39:01 +00:00
|
|
|
* devm_irq_domain_create_sim - Create a new interrupt simulator for
|
|
|
|
* a managed device.
|
2017-08-14 14:53:17 +00:00
|
|
|
*
|
|
|
|
* @dev: Device to initialize the simulator object for.
|
2021-03-02 16:14:53 +00:00
|
|
|
* @fwnode: struct fwnode_handle to be associated with this domain.
|
2017-08-14 14:53:17 +00:00
|
|
|
* @num_irqs: Number of interrupts to allocate
|
|
|
|
*
|
2020-05-14 08:39:01 +00:00
|
|
|
* On success: return a new irq_domain object.
|
|
|
|
* On failure: a negative errno wrapped with ERR_PTR().
|
2017-08-14 14:53:17 +00:00
|
|
|
*/
|
2020-05-14 08:39:01 +00:00
|
|
|
struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
|
|
|
|
struct fwnode_handle *fwnode,
|
|
|
|
unsigned int num_irqs)
|
2017-08-14 14:53:17 +00:00
|
|
|
{
|
2021-03-01 14:26:59 +00:00
|
|
|
struct irq_domain *domain;
|
|
|
|
int ret;
|
2017-08-14 14:53:17 +00:00
|
|
|
|
2021-03-01 14:26:59 +00:00
|
|
|
domain = irq_domain_create_sim(fwnode, num_irqs);
|
|
|
|
if (IS_ERR(domain))
|
|
|
|
return domain;
|
2017-08-14 14:53:17 +00:00
|
|
|
|
2021-03-01 14:26:59 +00:00
|
|
|
ret = devm_add_action_or_reset(dev, devm_irq_domain_remove_sim, domain);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
2017-08-14 14:53:17 +00:00
|
|
|
|
2021-03-01 14:26:59 +00:00
|
|
|
return domain;
|
2017-08-14 14:53:16 +00:00
|
|
|
}
|
2020-05-14 08:39:01 +00:00
|
|
|
EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);
|