mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
KVM: arm64: Make PMEVTYPER<n>_EL0.NSH RES0 if EL2 isn't advertised
The NSH bit, which filters event counting at EL2, is required by the architecture if an implementation has EL2. Even though KVM doesn't support nested virt yet, it makes no effort to hide the existence of EL2 from the ID registers. Userspace can, however, change the value of PFR0 to hide EL2. Align KVM's sysreg emulation with the architecture and make NSH RES0 if EL2 isn't advertised. Keep in mind the bit is ignored when constructing the backing perf event. While at it, build the event type mask using explicit field definitions instead of relying on ARMV8_PMU_EVTYPE_MASK. KVM probably should've been doing this in the first place, as it avoids changes to the aforementioned mask affecting sysreg emulation. Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Link: https://lore.kernel.org/r/20231019185618.3442949-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
6465e260f4
commit
bc512d6a9b
@ -60,6 +60,18 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
|
||||
return __kvm_pmu_event_mask(pmuver);
|
||||
}
|
||||
|
||||
u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
|
||||
{
|
||||
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
|
||||
kvm_pmu_event_mask(kvm);
|
||||
u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
|
||||
mask |= ARMV8_PMU_INCLUDE_EL2;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmc_is_64bit - determine if counter is 64bit
|
||||
* @pmc: counter context
|
||||
@ -657,18 +669,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
||||
u64 select_idx)
|
||||
{
|
||||
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
|
||||
u64 reg, mask;
|
||||
u64 reg;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
mask = ARMV8_PMU_EVTYPE_MASK;
|
||||
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
|
||||
mask |= kvm_pmu_event_mask(vcpu->kvm);
|
||||
|
||||
reg = counter_index_to_evtreg(pmc->idx);
|
||||
|
||||
__vcpu_sys_reg(vcpu, reg) = data & mask;
|
||||
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
|
||||
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
}
|
||||
|
@ -746,8 +746,12 @@ static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
|
||||
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
/* This thing will UNDEF, who cares about the reset value? */
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
@ -988,7 +992,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
|
||||
p->regval = __vcpu_sys_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -101,6 +101,7 @@ void kvm_vcpu_pmu_resync_el0(void);
|
||||
})
|
||||
|
||||
u8 kvm_arm_pmu_get_pmuver_limit(void);
|
||||
u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
|
||||
|
||||
#else
|
||||
struct kvm_pmu {
|
||||
@ -172,6 +173,10 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void kvm_vcpu_pmu_resync_el0(void) {}
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user