forked from Minki/linux
irqchip/gic-v4.1: Ensure mutual exclusion between vPE affinity change and RD access
Before GICv4.1, all operations would be serialized with the affinity changes by virtue of using the same ITS command queue. With v4.1, things change, as invalidations (and a number of other operations) are issued using the redistributor MMIO frame. We must thus make sure that these redistributor accesses cannot race against aginst the affinity change, or we may end-up talking to the wrong redistributor. To ensure this, we expand the irq_to_cpuid() helper to take a spinlock when the LPI is mapped to a vLPI (a new per-VPE lock) on each operation that requires mutual exclusion. Signed-off-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Zenghui Yu <yuzenghui@huawei.com> Link: https://lore.kernel.org/r/20200304203330.4967-4-maz@kernel.org
This commit is contained in:
parent
28d160de51
commit
f3a059219b
@ -239,15 +239,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int irq_to_cpuid(struct irq_data *d)
|
static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
|
||||||
|
{
|
||||||
|
raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
|
||||||
|
return vpe->col_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
|
||||||
|
{
|
||||||
|
raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
|
||||||
|
{
|
||||||
|
struct its_vlpi_map *map = get_vlpi_map(d);
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
if (map) {
|
||||||
|
cpu = vpe_to_cpuid_lock(map->vpe, flags);
|
||||||
|
} else {
|
||||||
|
/* Physical LPIs are already locked via the irq_desc lock */
|
||||||
|
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
||||||
|
cpu = its_dev->event_map.col_map[its_get_event_id(d)];
|
||||||
|
/* Keep GCC quiet... */
|
||||||
|
*flags = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return cpu;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
|
||||||
{
|
{
|
||||||
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
|
||||||
struct its_vlpi_map *map = get_vlpi_map(d);
|
struct its_vlpi_map *map = get_vlpi_map(d);
|
||||||
|
|
||||||
if (map)
|
if (map)
|
||||||
return map->vpe->col_idx;
|
vpe_to_cpuid_unlock(map->vpe, flags);
|
||||||
|
|
||||||
return its_dev->event_map.col_map[its_get_event_id(d)];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct its_collection *valid_col(struct its_collection *col)
|
static struct its_collection *valid_col(struct its_collection *col)
|
||||||
@ -1329,7 +1355,9 @@ static void direct_lpi_inv(struct irq_data *d)
|
|||||||
{
|
{
|
||||||
struct its_vlpi_map *map = get_vlpi_map(d);
|
struct its_vlpi_map *map = get_vlpi_map(d);
|
||||||
void __iomem *rdbase;
|
void __iomem *rdbase;
|
||||||
|
unsigned long flags;
|
||||||
u64 val;
|
u64 val;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
if (map) {
|
if (map) {
|
||||||
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
||||||
@ -1344,10 +1372,12 @@ static void direct_lpi_inv(struct irq_data *d)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Target the redistributor this LPI is currently routed to */
|
/* Target the redistributor this LPI is currently routed to */
|
||||||
rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base;
|
cpu = irq_to_cpuid_lock(d, &flags);
|
||||||
|
rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
|
||||||
gic_write_lpir(val, rdbase + GICR_INVLPIR);
|
gic_write_lpir(val, rdbase + GICR_INVLPIR);
|
||||||
|
|
||||||
wait_for_syncr(rdbase);
|
wait_for_syncr(rdbase);
|
||||||
|
irq_to_cpuid_unlock(d, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
|
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
|
||||||
@ -3486,17 +3516,25 @@ static int its_vpe_set_affinity(struct irq_data *d,
|
|||||||
{
|
{
|
||||||
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
int from, cpu = cpumask_first(mask_val);
|
int from, cpu = cpumask_first(mask_val);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Changing affinity is mega expensive, so let's be as lazy as
|
* Changing affinity is mega expensive, so let's be as lazy as
|
||||||
* we can and only do it if we really have to. Also, if mapped
|
* we can and only do it if we really have to. Also, if mapped
|
||||||
* into the proxy device, we need to move the doorbell
|
* into the proxy device, we need to move the doorbell
|
||||||
* interrupt to its new location.
|
* interrupt to its new location.
|
||||||
|
*
|
||||||
|
* Another thing is that changing the affinity of a vPE affects
|
||||||
|
* *other interrupts* such as all the vLPIs that are routed to
|
||||||
|
* this vPE. This means that the irq_desc lock is not enough to
|
||||||
|
* protect us, and that we must ensure nobody samples vpe->col_idx
|
||||||
|
* during the update, hence the lock below which must also be
|
||||||
|
* taken on any vLPI handling path that evaluates vpe->col_idx.
|
||||||
*/
|
*/
|
||||||
if (vpe->col_idx == cpu)
|
from = vpe_to_cpuid_lock(vpe, &flags);
|
||||||
|
if (from == cpu)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
from = vpe->col_idx;
|
|
||||||
vpe->col_idx = cpu;
|
vpe->col_idx = cpu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3512,6 +3550,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||||
|
vpe_to_cpuid_unlock(vpe, flags);
|
||||||
|
|
||||||
return IRQ_SET_MASK_OK_DONE;
|
return IRQ_SET_MASK_OK_DONE;
|
||||||
}
|
}
|
||||||
@ -3855,6 +3894,7 @@ static int its_vpe_init(struct its_vpe *vpe)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
raw_spin_lock_init(&vpe->vpe_lock);
|
||||||
vpe->vpe_id = vpe_id;
|
vpe->vpe_id = vpe_id;
|
||||||
vpe->vpt_page = vpt_page;
|
vpe->vpt_page = vpt_page;
|
||||||
if (gic_rdists->has_rvpeid)
|
if (gic_rdists->has_rvpeid)
|
||||||
|
@ -53,6 +53,11 @@ struct its_vpe {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensures mutual exclusion between affinity setting of the
|
||||||
|
* vPE and vLPI operations using vpe->col_idx.
|
||||||
|
*/
|
||||||
|
raw_spinlock_t vpe_lock;
|
||||||
/*
|
/*
|
||||||
* This collection ID is used to indirect the target
|
* This collection ID is used to indirect the target
|
||||||
* redistributor for this VPE. The ID itself isn't involved in
|
* redistributor for this VPE. The ID itself isn't involved in
|
||||||
|
Loading…
Reference in New Issue
Block a user