mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
KVM: arm64: vgic-its: Get rid of the lpi_list_lock
The last genuine use case for the lpi_list_lock was the global LPI translation cache, which has been removed in favor of a per-ITS xarray. Remove a layer from the locking puzzle by getting rid of it. vgic_add_lpi() still has a critical section that needs to protect against the insertion of other LPIs; change it to take the LPI xarray's xa_lock to retain this property. Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20240422200158.2606761-13-oliver.upton@linux.dev Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
ec39bbfd55
commit
481c9ee846
@ -53,7 +53,6 @@ void kvm_vgic_early_init(struct kvm *kvm)
|
|||||||
{
|
{
|
||||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||||
|
|
||||||
raw_spin_lock_init(&dist->lpi_list_lock);
|
|
||||||
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
|
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|||||||
irq->target_vcpu = vcpu;
|
irq->target_vcpu = vcpu;
|
||||||
irq->group = 1;
|
irq->group = 1;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
xa_lock_irqsave(&dist->lpi_xa, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There could be a race with another vgic_add_lpi(), so we need to
|
* There could be a race with another vgic_add_lpi(), so we need to
|
||||||
@ -84,14 +84,14 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0));
|
ret = xa_err(__xa_store(&dist->lpi_xa, intid, irq, 0));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
xa_release(&dist->lpi_xa, intid);
|
xa_release(&dist->lpi_xa, intid);
|
||||||
kfree(irq);
|
kfree(irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
xa_unlock_irqrestore(&dist->lpi_xa, flags);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
@ -29,9 +29,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
|
|||||||
* its->cmd_lock (mutex)
|
* its->cmd_lock (mutex)
|
||||||
* its->its_lock (mutex)
|
* its->its_lock (mutex)
|
||||||
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
|
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
|
||||||
* kvm->lpi_list_lock must be taken with IRQs disabled
|
* vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
|
||||||
* vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
|
* vgic_irq->irq_lock must be taken with IRQs disabled
|
||||||
* vgic_irq->irq_lock must be taken with IRQs disabled
|
|
||||||
*
|
*
|
||||||
* As the ap_list_lock might be taken from the timer interrupt handler,
|
* As the ap_list_lock might be taken from the timer interrupt handler,
|
||||||
* we have to disable IRQs before taking this lock and everything lower
|
* we have to disable IRQs before taking this lock and everything lower
|
||||||
|
@ -280,9 +280,6 @@ struct vgic_dist {
|
|||||||
*/
|
*/
|
||||||
u64 propbaser;
|
u64 propbaser;
|
||||||
|
|
||||||
/* Protects the lpi_list. */
|
|
||||||
raw_spinlock_t lpi_list_lock;
|
|
||||||
|
|
||||||
#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0
|
#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0
|
||||||
struct xarray lpi_xa;
|
struct xarray lpi_xa;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user