KVM: arm64: Destroy mpidr_data for 'late' vCPU creation

A particularly annoying userspace could create a vCPU after KVM has
computed mpidr_data for the VM, either by racing against VGIC
initialization or having a userspace irqchip.

In any case, this means mpidr_data no longer fully describes the VM, and
attempts to find the new vCPU with kvm_mpidr_to_vcpu() will fail. The
fix is to discard mpidr_data altogether, as it is only a performance
optimization and not required for correctness. In all likelihood KVM
will recompute the mappings when KVM_RUN is called on the new vCPU.

Note that reads of mpidr_data are not guarded by a lock; promote to RCU
to cope with the possibility of mpidr_data being invalidated at runtime.

Fixes: 54a8006d0b ("KVM: arm64: Fast-track kvm_mpidr_to_vcpu() when mpidr_data is available")
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240508071952.2035422-1-oliver.upton@linux.dev
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Oliver Upton 2024-05-08 07:19:52 +00:00 committed by Marc Zyngier
parent 5053c3f051
commit ce5d2448eb

View File

@ -195,6 +195,23 @@ void kvm_arch_create_vm_debugfs(struct kvm *kvm)
kvm_sys_regs_create_debugfs(kvm);
}
static void kvm_destroy_mpidr_data(struct kvm *kvm)
{
struct kvm_mpidr_data *data;
mutex_lock(&kvm->arch.config_lock);
data = rcu_dereference_protected(kvm->arch.mpidr_data,
lockdep_is_held(&kvm->arch.config_lock));
if (data) {
rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
synchronize_rcu();
kfree(data);
}
mutex_unlock(&kvm->arch.config_lock);
}
/**
* kvm_arch_destroy_vm - destroy the VM data structure
* @kvm: pointer to the KVM struct
@ -209,7 +226,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (is_protected_kvm_enabled())
pkvm_destroy_hyp_vm(kvm);
kfree(kvm->arch.mpidr_data);
kvm_destroy_mpidr_data(kvm);
kfree(kvm->arch.sysreg_masks);
kvm_destroy_vcpus(kvm);
@ -395,6 +413,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
/*
* This vCPU may have been created after mpidr_data was initialized.
* Throw out the pre-computed mappings if that is the case which forces
* KVM to fall back to iteratively searching the vCPUs.
*/
kvm_destroy_mpidr_data(vcpu->kvm);
err = kvm_vgic_vcpu_init(vcpu);
if (err)
return err;
@ -594,7 +619,8 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
mutex_lock(&kvm->arch.config_lock);
if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
if (rcu_access_pointer(kvm->arch.mpidr_data) ||
atomic_read(&kvm->online_vcpus) == 1)
goto out;
kvm_for_each_vcpu(c, vcpu, kvm) {
@ -631,7 +657,7 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
data->cmpidr_to_idx[index] = c;
}
kvm->arch.mpidr_data = data;
rcu_assign_pointer(kvm->arch.mpidr_data, data);
out:
mutex_unlock(&kvm->arch.config_lock);
}
@ -2470,22 +2496,28 @@ out_err:
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
struct kvm_vcpu *vcpu;
struct kvm_vcpu *vcpu = NULL;
struct kvm_mpidr_data *data;
unsigned long i;
mpidr &= MPIDR_HWID_BITMASK;
if (kvm->arch.mpidr_data) {
u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
rcu_read_lock();
data = rcu_dereference(kvm->arch.mpidr_data);
vcpu = kvm_get_vcpu(kvm,
kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
if (data) {
u16 idx = kvm_mpidr_index(data, mpidr);
vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
vcpu = NULL;
return vcpu;
}
rcu_read_unlock();
if (vcpu)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
return vcpu;