mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
irqdomain: Move revmap_trees_mutex to struct irq_domain
The revmap_trees_mutex protects domain->revmap_tree. There is no need to make it global because it is allowed to modify revmap_tree of two different domains concurrently. Having said that, this would not be a actual bottleneck because the interrupt map/unmap does not occur quite often. Rather, the motivation is to tidy up the code from a data structure point of view. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
c94fb639d5
commit
f1d7835854
@ -32,6 +32,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/irqhandler.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/radix-tree.h>
|
||||
|
||||
struct device_node;
|
||||
@ -176,6 +177,7 @@ struct irq_domain {
|
||||
unsigned int revmap_direct_max_irq;
|
||||
unsigned int revmap_size;
|
||||
struct radix_tree_root revmap_tree;
|
||||
struct mutex revmap_tree_mutex;
|
||||
unsigned int linear_revmap[];
|
||||
};
|
||||
|
||||
|
@ -21,7 +21,6 @@
|
||||
static LIST_HEAD(irq_domain_list);
|
||||
static DEFINE_MUTEX(irq_domain_mutex);
|
||||
|
||||
static DEFINE_MUTEX(revmap_trees_mutex);
|
||||
static struct irq_domain *irq_default_domain;
|
||||
|
||||
static void irq_domain_check_hierarchy(struct irq_domain *domain);
|
||||
@ -211,6 +210,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
|
||||
|
||||
/* Fill structure */
|
||||
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
|
||||
mutex_init(&domain->revmap_tree_mutex);
|
||||
domain->ops = ops;
|
||||
domain->host_data = host_data;
|
||||
domain->hwirq_max = hwirq_max;
|
||||
@ -462,9 +462,9 @@ static void irq_domain_clear_mapping(struct irq_domain *domain,
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = 0;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
mutex_lock(&domain->revmap_tree_mutex);
|
||||
radix_tree_delete(&domain->revmap_tree, hwirq);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
mutex_unlock(&domain->revmap_tree_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -475,9 +475,9 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = irq_data->irq;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
mutex_lock(&domain->revmap_tree_mutex);
|
||||
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
mutex_unlock(&domain->revmap_tree_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1459,11 +1459,11 @@ static void irq_domain_fix_revmap(struct irq_data *d)
|
||||
return; /* Not using radix tree. */
|
||||
|
||||
/* Fix up the revmap. */
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
mutex_lock(&d->domain->revmap_tree_mutex);
|
||||
slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
|
||||
if (slot)
|
||||
radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
mutex_unlock(&d->domain->revmap_tree_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user