KVM: arm64: PMU: Move the ID_AA64DFR0_EL1.PMUver limit to VM creation

As further patches will enable the selection of a PMU revision
from userspace, sample the supported PMU revision at VM creation
time, rather than building each time the ID_AA64DFR0_EL1 register
is accessed.

This shouldn't result in any change in behaviour.

Reviewed-by: Reiji Watanabe <reijiw@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221113163832.3154370-11-maz@kernel.org
This commit is contained in:
Marc Zyngier 2022-11-13 16:38:26 +00:00
parent 26d2d0594d
commit 3d0dba5764
5 changed files with 55 additions and 8 deletions

View File

@ -163,6 +163,10 @@ struct kvm_arch {
u8 pfr0_csv2;
u8 pfr0_csv3;
struct {
u8 imp:4;
u8 unimp:4;
} dfr0_pmuver;
/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;

View File

@ -164,6 +164,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
set_default_spectre(kvm);
kvm_arm_init_hypercalls(kvm);
/*
* Initialise the default PMUver before there is a chance to
* create an actual PMU.
*/
kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(&kvm->arch.mmu);

View File

@ -1047,3 +1047,14 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -ENXIO;
}
u8 kvm_arm_pmu_get_pmuver_limit(void)
{
u64 tmp;
tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
tmp = cpuid_feature_cap_perfmon_field(tmp,
ID_AA64DFR0_EL1_PMUVer_SHIFT,
ID_AA64DFR0_EL1_PMUVer_V3P4);
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
}

View File

@ -1062,6 +1062,27 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_has_pmu(vcpu))
return vcpu->kvm->arch.dfr0_pmuver.imp;
return vcpu->kvm->arch.dfr0_pmuver.unimp;
}
static u8 pmuver_to_perfmon(u8 pmuver)
{
switch (pmuver) {
case ID_AA64DFR0_EL1_PMUVer_IMP:
return ID_DFR0_PERFMON_8_0;
case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
return ID_DFR0_PERFMON_IMP_DEF;
default:
/* Anything ARMv8.1+ and NI have the same value. For now. */
return pmuver;
}
}
/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
{
@ -1111,18 +1132,17 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
/* Limit debug to ARMv8.0 */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
/* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_EL1_PMUVer_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0);
/* Set PMUver to the required version */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
vcpu_pmuver(vcpu));
/* Hide SPE from guests */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break;
case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
ID_DFR0_PERFMON_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
val &= ~ARM64_FEATURE_MASK(ID_DFR0_PERFMON);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_PERFMON),
pmuver_to_perfmon(vcpu_pmuver(vcpu)));
break;
}

View File

@ -89,6 +89,8 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0)
u8 kvm_arm_pmu_get_pmuver_limit(void);
#else
struct kvm_pmu {
};
@ -154,6 +156,10 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{
return 0;
}
#endif