KVM: arm64: vgic-its: Get rid of vgic_copy_lpi_list()

The last user has been transitioned to walking the LPI xarray directly.
Cut the wart off, and get rid of the now unneeded lpi_count while doing
so.

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240422200158.2606761-7-oliver.upton@linux.dev
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Oliver Upton 2024-04-22 20:01:45 +00:00 committed by Marc Zyngier
parent 85d3ccc8b7
commit 30a0ce9c49
4 changed files with 0 additions and 51 deletions

View File

@ -86,11 +86,8 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
if (ret) {
xa_release(&dist->lpi_xa, intid);
kfree(irq);
goto out_unlock;
}
atomic_inc(&dist->lpi_count);
out_unlock:
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
@ -316,51 +313,6 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
return 0;
}
/*
* Create a snapshot of the current LPIs targeting @vcpu, so that we can
* enumerate those LPIs without holding any lock.
* Returns their number and puts the kmalloc'ed array into intid_ptr.
*/
int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
{
struct vgic_dist *dist = &kvm->arch.vgic;
XA_STATE(xas, &dist->lpi_xa, GIC_LPI_OFFSET);
struct vgic_irq *irq;
unsigned long flags;
u32 *intids;
int irq_count, i = 0;
/*
* There is an obvious race between allocating the array and LPIs
* being mapped/unmapped. If we ended up here as a result of a
* command, we're safe (locks are held, preventing another
* command). If coming from another path (such as enabling LPIs),
* we must be careful not to overrun the array.
*/
irq_count = atomic_read(&dist->lpi_count);
intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
if (!intids)
return -ENOMEM;
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
rcu_read_lock();
xas_for_each(&xas, irq, VGIC_LPI_MAX_INTID) {
if (i == irq_count)
break;
/* We don't need to "get" the IRQ, as we hold the list lock. */
if (vcpu && irq->target_vcpu != vcpu)
continue;
intids[i++] = irq->intid;
}
rcu_read_unlock();
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids;
return i;
}
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
{
int ret = 0;

View File

@ -126,7 +126,6 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
__xa_erase(&dist->lpi_xa, irq->intid);
xa_unlock_irqrestore(&dist->lpi_xa, flags);
atomic_dec(&dist->lpi_count);
kfree_rcu(irq, rcu);
}

View File

@ -331,7 +331,6 @@ static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
}
bool vgic_lpis_enabled(struct kvm_vcpu *vcpu);
int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
u32 devid, u32 eventid, struct vgic_irq **irq);
struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);

View File

@ -279,7 +279,6 @@ struct vgic_dist {
#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0
struct xarray lpi_xa;
atomic_t lpi_count;
/* LPI translation cache */
struct list_head lpi_translation_cache;