Merge branch kvm-arm64/misc-6.10 into kvmarm-master/next

* kvm-arm64/misc-6.10:
  : .
  : Misc fixes and updates targeting 6.10
  :
  : - Improve boot-time diagnostics when the sysreg tables
  :   are not correctly sorted
  :
  : - Allow FFA_MSG_SEND_DIRECT_REQ in the FFA proxy
  :
  : - Fix duplicate XNX field in the ID_AA64MMFR1_EL1
  :   writeable mask
  :
  : - Allocate PPIs and SGIs outside of the vcpu structure, allowing
  :   for smaller EL2 mapping and some flexibility in implementing
  :   more or less than 32 private IRQs.
  :
  : - Use bitmap_gather() instead of its open-coded equivalent
  :
  : - Make protected mode use hVHE if available
  :
  : - Purge stale mpidr_data if a vcpu is created after the MPIDR
  :   map has been created
  : .
  KVM: arm64: Destroy mpidr_data for 'late' vCPU creation
  KVM: arm64: Use hVHE in pKVM by default on CPUs with VHE support
  KVM: arm64: Fix hvhe/nvhe early alias parsing
  KVM: arm64: Convert kvm_mpidr_index() to bitmap_gather()
  KVM: arm64: vgic: Allocate private interrupts on demand
  KVM: arm64: Remove duplicated AA64MMFR1_EL1 XNX
  KVM: arm64: Remove FFA_MSG_SEND_DIRECT_REQ from the denylist
  KVM: arm64: Improve out-of-order sysreg table diagnostics

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2024-05-08 16:41:50 +01:00
commit e28157060c
7 changed files with 115 additions and 49 deletions

View File

@ -221,20 +221,10 @@ struct kvm_mpidr_data {
static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr) static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
{ {
unsigned long mask = data->mpidr_mask; unsigned long index = 0, mask = data->mpidr_mask;
u64 aff = mpidr & MPIDR_HWID_BITMASK; unsigned long aff = mpidr & MPIDR_HWID_BITMASK;
int nbits, bit, bit_idx = 0;
u16 index = 0;
/* bitmap_gather(&index, &aff, &mask, fls(mask));
* If this looks like RISC-V's BEXT or x86's PEXT
* instructions, it isn't by accident.
*/
nbits = fls(mask);
for_each_set_bit(bit, &mask, nbits) {
index |= (aff & BIT(bit)) >> (bit - bit_idx);
bit_idx++;
}
return index; return index;
} }

View File

@ -209,8 +209,8 @@ static const struct {
char alias[FTR_ALIAS_NAME_LEN]; char alias[FTR_ALIAS_NAME_LEN];
char feature[FTR_ALIAS_OPTION_LEN]; char feature[FTR_ALIAS_OPTION_LEN];
} aliases[] __initconst = { } aliases[] __initconst = {
{ "kvm_arm.mode=nvhe", "id_aa64mmfr1.vh=0" }, { "kvm_arm.mode=nvhe", "arm64_sw.hvhe=0 id_aa64mmfr1.vh=0" },
{ "kvm_arm.mode=protected", "id_aa64mmfr1.vh=0" }, { "kvm_arm.mode=protected", "arm64_sw.hvhe=1" },
{ "arm64.nosve", "id_aa64pfr0.sve=0" }, { "arm64.nosve", "id_aa64pfr0.sve=0" },
{ "arm64.nosme", "id_aa64pfr1.sme=0" }, { "arm64.nosme", "id_aa64pfr1.sme=0" },
{ "arm64.nobti", "id_aa64pfr1.bt=0" }, { "arm64.nobti", "id_aa64pfr1.bt=0" },

View File

@ -218,6 +218,23 @@ void kvm_arch_create_vm_debugfs(struct kvm *kvm)
kvm_sys_regs_create_debugfs(kvm); kvm_sys_regs_create_debugfs(kvm);
} }
static void kvm_destroy_mpidr_data(struct kvm *kvm)
{
struct kvm_mpidr_data *data;
mutex_lock(&kvm->arch.config_lock);
data = rcu_dereference_protected(kvm->arch.mpidr_data,
lockdep_is_held(&kvm->arch.config_lock));
if (data) {
rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
synchronize_rcu();
kfree(data);
}
mutex_unlock(&kvm->arch.config_lock);
}
/** /**
* kvm_arch_destroy_vm - destroy the VM data structure * kvm_arch_destroy_vm - destroy the VM data structure
* @kvm: pointer to the KVM struct * @kvm: pointer to the KVM struct
@ -232,7 +249,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (is_protected_kvm_enabled()) if (is_protected_kvm_enabled())
pkvm_destroy_hyp_vm(kvm); pkvm_destroy_hyp_vm(kvm);
kfree(kvm->arch.mpidr_data); kvm_destroy_mpidr_data(kvm);
kfree(kvm->arch.sysreg_masks); kfree(kvm->arch.sysreg_masks);
kvm_destroy_vcpus(kvm); kvm_destroy_vcpus(kvm);
@ -450,6 +468,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
/*
* This vCPU may have been created after mpidr_data was initialized.
* Throw out the pre-computed mappings if that is the case which forces
* KVM to fall back to iteratively searching the vCPUs.
*/
kvm_destroy_mpidr_data(vcpu->kvm);
err = kvm_vgic_vcpu_init(vcpu); err = kvm_vgic_vcpu_init(vcpu);
if (err) if (err)
return err; return err;
@ -687,7 +712,8 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->arch.config_lock);
if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1) if (rcu_access_pointer(kvm->arch.mpidr_data) ||
atomic_read(&kvm->online_vcpus) == 1)
goto out; goto out;
kvm_for_each_vcpu(c, vcpu, kvm) { kvm_for_each_vcpu(c, vcpu, kvm) {
@ -724,7 +750,7 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
data->cmpidr_to_idx[index] = c; data->cmpidr_to_idx[index] = c;
} }
kvm->arch.mpidr_data = data; rcu_assign_pointer(kvm->arch.mpidr_data, data);
out: out:
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->arch.config_lock);
} }
@ -2562,22 +2588,28 @@ out_err:
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu = NULL;
struct kvm_mpidr_data *data;
unsigned long i; unsigned long i;
mpidr &= MPIDR_HWID_BITMASK; mpidr &= MPIDR_HWID_BITMASK;
if (kvm->arch.mpidr_data) { rcu_read_lock();
u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr); data = rcu_dereference(kvm->arch.mpidr_data);
vcpu = kvm_get_vcpu(kvm, if (data) {
kvm->arch.mpidr_data->cmpidr_to_idx[idx]); u16 idx = kvm_mpidr_index(data, mpidr);
vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu)) if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
vcpu = NULL; vcpu = NULL;
return vcpu;
} }
rcu_read_unlock();
if (vcpu)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
return vcpu; return vcpu;

View File

@ -600,7 +600,6 @@ static bool ffa_call_supported(u64 func_id)
case FFA_MSG_POLL: case FFA_MSG_POLL:
case FFA_MSG_WAIT: case FFA_MSG_WAIT:
/* 32-bit variants of 64-bit calls */ /* 32-bit variants of 64-bit calls */
case FFA_MSG_SEND_DIRECT_REQ:
case FFA_MSG_SEND_DIRECT_RESP: case FFA_MSG_SEND_DIRECT_RESP:
case FFA_RXTX_MAP: case FFA_RXTX_MAP:
case FFA_MEM_DONATE: case FFA_MEM_DONATE:

View File

@ -2338,7 +2338,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64MMFR0_EL1_TGRAN16_2)), ID_AA64MMFR0_EL1_TGRAN16_2)),
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 | ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
ID_AA64MMFR1_EL1_HCX | ID_AA64MMFR1_EL1_HCX |
ID_AA64MMFR1_EL1_XNX |
ID_AA64MMFR1_EL1_TWED | ID_AA64MMFR1_EL1_TWED |
ID_AA64MMFR1_EL1_XNX | ID_AA64MMFR1_EL1_XNX |
ID_AA64MMFR1_EL1_VH | ID_AA64MMFR1_EL1_VH |
@ -3069,12 +3068,14 @@ static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (!is_32 && table[i].reg && !table[i].reset) { if (!is_32 && table[i].reg && !table[i].reset) {
kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i); kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
&table[i], i, table[i].name);
return false; return false;
} }
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) { if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1); kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
&table[i], i, table[i - 1].name, table[i].name);
return false; return false;
} }
} }

View File

@ -180,27 +180,22 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
return 0; return 0;
} }
/** static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu)
* kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
* structures and register VCPU-specific KVM iodevs
*
* @vcpu: pointer to the VCPU being created and initialized
*
* Only do initialization, but do not actually enable the
* VGIC CPU interface
*/
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
int ret = 0;
int i; int i;
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; lockdep_assert_held(&vcpu->kvm->arch.config_lock);
INIT_LIST_HEAD(&vgic_cpu->ap_list_head); if (vgic_cpu->private_irqs)
raw_spin_lock_init(&vgic_cpu->ap_list_lock); return 0;
atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
vgic_cpu->private_irqs = kcalloc(VGIC_NR_PRIVATE_IRQS,
sizeof(struct vgic_irq),
GFP_KERNEL_ACCOUNT);
if (!vgic_cpu->private_irqs)
return -ENOMEM;
/* /*
* Enable and configure all SGIs to be edge-triggered and * Enable and configure all SGIs to be edge-triggered and
@ -225,9 +220,48 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
} }
} }
return 0;
}
static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu)
{
int ret;
mutex_lock(&vcpu->kvm->arch.config_lock);
ret = vgic_allocate_private_irqs_locked(vcpu);
mutex_unlock(&vcpu->kvm->arch.config_lock);
return ret;
}
/**
* kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
* structures and register VCPU-specific KVM iodevs
*
* @vcpu: pointer to the VCPU being created and initialized
*
* Only do initialization, but do not actually enable the
* VGIC CPU interface
*/
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
int ret = 0;
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
raw_spin_lock_init(&vgic_cpu->ap_list_lock);
atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
if (!irqchip_in_kernel(vcpu->kvm)) if (!irqchip_in_kernel(vcpu->kvm))
return 0; return 0;
ret = vgic_allocate_private_irqs(vcpu);
if (ret)
return ret;
/* /*
* If we are creating a VCPU with a GICv3 we must also register the * If we are creating a VCPU with a GICv3 we must also register the
* KVM io device for the redistributor that belongs to this VCPU. * KVM io device for the redistributor that belongs to this VCPU.
@ -283,10 +317,13 @@ int vgic_init(struct kvm *kvm)
/* Initialize groups on CPUs created before the VGIC type was known */ /* Initialize groups on CPUs created before the VGIC type was known */
kvm_for_each_vcpu(idx, vcpu, kvm) { kvm_for_each_vcpu(idx, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; ret = vgic_allocate_private_irqs_locked(vcpu);
if (ret)
goto out;
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; struct vgic_irq *irq = vgic_get_irq(kvm, vcpu, i);
switch (dist->vgic_model) { switch (dist->vgic_model) {
case KVM_DEV_TYPE_ARM_VGIC_V3: case KVM_DEV_TYPE_ARM_VGIC_V3:
irq->group = 1; irq->group = 1;
@ -298,8 +335,12 @@ int vgic_init(struct kvm *kvm)
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
goto out;
} }
vgic_put_irq(kvm, irq);
if (ret)
goto out;
} }
} }
@ -373,6 +414,9 @@ static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_flush_pending_lpis(vcpu); vgic_flush_pending_lpis(vcpu);
INIT_LIST_HEAD(&vgic_cpu->ap_list_head); INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
kfree(vgic_cpu->private_irqs);
vgic_cpu->private_irqs = NULL;
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
vgic_unregister_redist_iodev(vcpu); vgic_unregister_redist_iodev(vcpu);
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;

View File

@ -331,7 +331,7 @@ struct vgic_cpu {
struct vgic_v3_cpu_if vgic_v3; struct vgic_v3_cpu_if vgic_v3;
}; };
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; struct vgic_irq *private_irqs;
raw_spinlock_t ap_list_lock; /* Protects the ap_list */ raw_spinlock_t ap_list_lock; /* Protects the ap_list */