mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Merge branch kvm-arm64/pmu_pmcr_n into kvmarm/next
* kvm-arm64/pmu_pmcr_n: : User-defined PMC limit, courtesy Raghavendra Rao Ananta : : Certain VMMs may want to reserve some PMCs for host use while running a : KVM guest. This was a bit difficult before, as KVM advertised all : supported counters to the guest. Userspace can now limit the number of : advertised PMCs by writing to PMCR_EL0.N, as KVM's sysreg and PMU : emulation enforce the specified limit for handling guest accesses. KVM: selftests: aarch64: vPMU test for validating user accesses KVM: selftests: aarch64: vPMU register test for unimplemented counters KVM: selftests: aarch64: vPMU register test for implemented counters KVM: selftests: aarch64: Introduce vpmu_counter_access test tools: Import arm_pmuv3.h KVM: arm64: PMU: Allow userspace to limit PMCR_EL0.N for the guest KVM: arm64: Sanitize PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} before first run KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} KVM: arm64: PMU: Set PMCR_EL0.N for vCPU based on the associated PMU KVM: arm64: PMU: Add a helper to read a vCPU's PMCR_EL0 KVM: arm64: Select default PMU in KVM_ARM_VCPU_INIT handler KVM: arm64: PMU: Introduce helpers to set the guest's PMU Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
commit
123f42f0ad
@ -290,6 +290,9 @@ struct kvm_arch {
|
||||
|
||||
cpumask_var_t supported_cpus;
|
||||
|
||||
/* PMCR_EL0.N value for the guest */
|
||||
u8 pmcr_n;
|
||||
|
||||
/* Hypercall features firmware registers' descriptor */
|
||||
struct kvm_smccc_features smccc_feat;
|
||||
struct maple_tree smccc_filter;
|
||||
|
@ -857,8 +857,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
|
||||
kvm_pmu_handle_pmcr(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0));
|
||||
kvm_vcpu_reload_pmu(vcpu);
|
||||
|
||||
if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
@ -1319,6 +1318,21 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
|
||||
KVM_VCPU_MAX_FEATURES);
|
||||
}
|
||||
|
||||
static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* When the vCPU has a PMU, but no PMU is set for the guest
|
||||
* yet, set the default one.
|
||||
*/
|
||||
if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
|
||||
ret = kvm_arm_set_default_pmu(kvm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_vcpu_init *init)
|
||||
{
|
||||
@ -1334,6 +1348,10 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
|
||||
|
||||
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
ret = kvm_setup_vcpu(vcpu);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
/* Now we know what it is, we can reset it. */
|
||||
kvm_reset_vcpu(vcpu);
|
||||
|
||||
|
@ -89,7 +89,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
|
||||
|
||||
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
|
||||
{
|
||||
u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
|
||||
u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
|
||||
|
||||
return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
|
||||
(pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
|
||||
@ -267,7 +267,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
||||
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||
u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||
|
||||
val &= ARMV8_PMU_PMCR_N_MASK;
|
||||
if (val == 0)
|
||||
@ -289,7 +289,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
|
||||
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
@ -341,7 +341,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 reg = 0;
|
||||
|
||||
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
|
||||
if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
|
||||
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
|
||||
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
|
||||
@ -443,7 +443,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
|
||||
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
|
||||
return;
|
||||
|
||||
/* Weed out disabled counters */
|
||||
@ -586,7 +586,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||
return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
|
||||
return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
|
||||
(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
|
||||
}
|
||||
|
||||
@ -735,10 +735,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
|
||||
* It is still necessary to get a valid cpu, though, to probe for the
|
||||
* default PMU instance as userspace is not required to specify a PMU
|
||||
* type. In order to uphold the preexisting behavior KVM selects the
|
||||
* PMU instance for the core where the first call to the
|
||||
* KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case
|
||||
* would be a user with disdain of all things big.LITTLE that affines
|
||||
* the VMM to a particular cluster of cores.
|
||||
* PMU instance for the core during vcpu init. A dependent use
|
||||
* case would be a user with disdain of all things big.LITTLE that
|
||||
* affines the VMM to a particular cluster of cores.
|
||||
*
|
||||
* In any case, userspace should just do the sane thing and use the UAPI
|
||||
* to select a PMU type directly. But, be wary of the baggage being
|
||||
@ -804,6 +803,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
||||
return val & mask;
|
||||
}
|
||||
|
||||
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
|
||||
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
|
||||
}
|
||||
|
||||
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
@ -892,6 +902,52 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
|
||||
* @kvm: The kvm pointer
|
||||
*/
|
||||
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
|
||||
{
|
||||
struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
|
||||
|
||||
/*
|
||||
* The arm_pmu->num_events considers the cycle counter as well.
|
||||
* Ignore that and return only the general-purpose counters.
|
||||
*/
|
||||
return arm_pmu->num_events - 1;
|
||||
}
|
||||
|
||||
static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
|
||||
{
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
kvm->arch.arm_pmu = arm_pmu;
|
||||
kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_set_default_pmu - No PMU set, get the default one.
|
||||
* @kvm: The kvm pointer
|
||||
*
|
||||
* The observant among you will notice that the supported_cpus
|
||||
* mask does not get updated for the default PMU even though it
|
||||
* is quite possible the selected instance supports only a
|
||||
* subset of cores in the system. This is intentional, and
|
||||
* upholds the preexisting behavior on heterogeneous systems
|
||||
* where vCPUs can be scheduled on any core but the guest
|
||||
* counters could stop working.
|
||||
*/
|
||||
int kvm_arm_set_default_pmu(struct kvm *kvm)
|
||||
{
|
||||
struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
|
||||
|
||||
if (!arm_pmu)
|
||||
return -ENODEV;
|
||||
|
||||
kvm_arm_set_pmu(kvm, arm_pmu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
@ -911,7 +967,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
|
||||
break;
|
||||
}
|
||||
|
||||
kvm->arch.arm_pmu = arm_pmu;
|
||||
kvm_arm_set_pmu(kvm, arm_pmu);
|
||||
cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
|
||||
ret = 0;
|
||||
break;
|
||||
@ -934,23 +990,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
|
||||
if (vcpu->arch.pmu.created)
|
||||
return -EBUSY;
|
||||
|
||||
if (!kvm->arch.arm_pmu) {
|
||||
/*
|
||||
* No PMU set, get the default one.
|
||||
*
|
||||
* The observant among you will notice that the supported_cpus
|
||||
* mask does not get updated for the default PMU even though it
|
||||
* is quite possible the selected instance supports only a
|
||||
* subset of cores in the system. This is intentional, and
|
||||
* upholds the preexisting behavior on heterogeneous systems
|
||||
* where vCPUs can be scheduled on any core but the guest
|
||||
* counters could stop working.
|
||||
*/
|
||||
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
|
||||
if (!kvm->arch.arm_pmu)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
switch (attr->attr) {
|
||||
case KVM_ARM_VCPU_PMU_V3_IRQ: {
|
||||
int __user *uaddr = (int __user *)(long)attr->addr;
|
||||
@ -1090,3 +1129,15 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
|
||||
ID_AA64DFR0_EL1_PMUVer_V3P5);
|
||||
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
|
||||
* @vcpu: The vcpu pointer
|
||||
*/
|
||||
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) &
|
||||
~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
|
||||
return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
}
|
||||
|
@ -719,14 +719,9 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
|
||||
|
||||
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
u8 n = vcpu->kvm->arch.pmcr_n;
|
||||
|
||||
/* No PMU available, any PMU reg may UNDEF... */
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return 0;
|
||||
|
||||
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||
n &= ARMV8_PMU_PMCR_N_MASK;
|
||||
if (n)
|
||||
mask |= GENMASK(n - 1, 0);
|
||||
|
||||
@ -766,17 +761,15 @@ static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
|
||||
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 pmcr;
|
||||
u64 pmcr = 0;
|
||||
|
||||
/* No PMU available, PMCR_EL0 may UNDEF... */
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return 0;
|
||||
|
||||
/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
|
||||
pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
if (!kvm_supports_32bit_el0())
|
||||
pmcr |= ARMV8_PMU_PMCR_LC;
|
||||
|
||||
/*
|
||||
* The value of PMCR.N field is included when the
|
||||
* vCPU register is read via kvm_vcpu_read_pmcr().
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, r->reg) = pmcr;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
@ -826,7 +819,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
* Only update writeable bits of PMCR (continuing into
|
||||
* kvm_pmu_handle_pmcr() as well)
|
||||
*/
|
||||
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
val = kvm_vcpu_read_pmcr(vcpu);
|
||||
val &= ~ARMV8_PMU_PMCR_MASK;
|
||||
val |= p->regval & ARMV8_PMU_PMCR_MASK;
|
||||
if (!kvm_supports_32bit_el0())
|
||||
@ -834,7 +827,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
kvm_pmu_handle_pmcr(vcpu, val);
|
||||
} else {
|
||||
/* PMCR.P & PMCR.C are RAZ */
|
||||
val = __vcpu_sys_reg(vcpu, PMCR_EL0)
|
||||
val = kvm_vcpu_read_pmcr(vcpu)
|
||||
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
|
||||
p->regval = val;
|
||||
}
|
||||
@ -883,7 +876,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
|
||||
{
|
||||
u64 pmcr, val;
|
||||
|
||||
pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
pmcr = kvm_vcpu_read_pmcr(vcpu);
|
||||
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
|
||||
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
@ -998,6 +991,39 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
|
||||
{
|
||||
bool set;
|
||||
|
||||
val &= kvm_pmu_valid_counter_mask(vcpu);
|
||||
|
||||
switch (r->reg) {
|
||||
case PMOVSSET_EL0:
|
||||
/* CRm[1] being set indicates a SET register, and CLR otherwise */
|
||||
set = r->CRm & 2;
|
||||
break;
|
||||
default:
|
||||
/* Op2[0] being set indicates a SET register, and CLR otherwise */
|
||||
set = r->Op2 & 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (set)
|
||||
__vcpu_sys_reg(vcpu, r->reg) |= val;
|
||||
else
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= ~val;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
|
||||
{
|
||||
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
|
||||
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
@ -1107,6 +1133,51 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
||||
u64 *val)
|
||||
{
|
||||
*val = kvm_vcpu_read_pmcr(vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
||||
u64 val)
|
||||
{
|
||||
u8 new_n = (val >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
||||
/*
|
||||
* The vCPU can't have more counters than the PMU hardware
|
||||
* implements. Ignore this error to maintain compatibility
|
||||
* with the existing KVM behavior.
|
||||
*/
|
||||
if (!kvm_vm_has_ran_once(kvm) &&
|
||||
new_n <= kvm_arm_pmu_get_max_counters(kvm))
|
||||
kvm->arch.pmcr_n = new_n;
|
||||
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
/*
|
||||
* Ignore writes to RES0 bits, read only bits that are cleared on
|
||||
* vCPU reset, and writable bits that KVM doesn't support yet.
|
||||
* (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
|
||||
* The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
|
||||
* But, we leave the bit as it is here, as the vCPU's PMUver might
|
||||
* be changed later (NOTE: the bit will be cleared on first vCPU run
|
||||
* if necessary).
|
||||
*/
|
||||
val &= ARMV8_PMU_PMCR_MASK;
|
||||
|
||||
/* The LC bit is RES1 when AArch32 is not supported */
|
||||
if (!kvm_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
|
||||
@ -2188,9 +2259,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
/* PMBIDR_EL1 is not trapped */
|
||||
|
||||
{ PMU_SYS_REG(PMINTENSET_EL1),
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1,
|
||||
.get_user = get_pmreg, .set_user = set_pmreg },
|
||||
{ PMU_SYS_REG(PMINTENCLR_EL1),
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1,
|
||||
.get_user = get_pmreg, .set_user = set_pmreg },
|
||||
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
|
||||
|
||||
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
||||
@ -2238,14 +2311,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
|
||||
{ SYS_DESC(SYS_SVCR), undef_access },
|
||||
|
||||
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr,
|
||||
.reset = reset_pmcr, .reg = PMCR_EL0 },
|
||||
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
|
||||
.reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
|
||||
{ PMU_SYS_REG(PMCNTENSET_EL0),
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
|
||||
.get_user = get_pmreg, .set_user = set_pmreg },
|
||||
{ PMU_SYS_REG(PMCNTENCLR_EL0),
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
|
||||
.get_user = get_pmreg, .set_user = set_pmreg },
|
||||
{ PMU_SYS_REG(PMOVSCLR_EL0),
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0 },
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0,
|
||||
.get_user = get_pmreg, .set_user = set_pmreg },
|
||||
/*
|
||||
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
|
||||
* previously (and pointlessly) advertised in the past...
|
||||
@ -2273,7 +2349,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
|
||||
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
|
||||
{ PMU_SYS_REG(PMOVSSET_EL0),
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0 },
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0,
|
||||
.get_user = get_pmreg, .set_user = set_pmreg },
|
||||
|
||||
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
|
||||
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
|
||||
|
@ -13,7 +13,6 @@
|
||||
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
|
||||
|
||||
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
|
||||
|
||||
struct kvm_pmc {
|
||||
u8 idx; /* index into the pmu->pmc array */
|
||||
struct perf_event *perf_event;
|
||||
@ -63,6 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
||||
u64 select_idx);
|
||||
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
|
||||
@ -102,7 +102,10 @@ void kvm_vcpu_pmu_resync_el0(void);
|
||||
|
||||
u8 kvm_arm_pmu_get_pmuver_limit(void);
|
||||
u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
|
||||
int kvm_arm_set_default_pmu(struct kvm *kvm);
|
||||
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
|
||||
|
||||
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
struct kvm_pmu {
|
||||
};
|
||||
@ -169,6 +172,7 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
||||
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
|
||||
static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
|
||||
{
|
||||
return 0;
|
||||
@ -179,6 +183,21 @@ static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
|
||||
}
|
||||
static inline void kvm_vcpu_pmu_resync_el0(void) {}
|
||||
|
||||
static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
308
tools/include/perf/arm_pmuv3.h
Normal file
308
tools/include/perf/arm_pmuv3.h
Normal file
@ -0,0 +1,308 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __PERF_ARM_PMUV3_H
|
||||
#define __PERF_ARM_PMUV3_H
|
||||
|
||||
#include <assert.h>
|
||||
#include <asm/bug.h>
|
||||
|
||||
#define ARMV8_PMU_MAX_COUNTERS 32
|
||||
#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
|
||||
|
||||
/*
|
||||
* Common architectural and microarchitectural event numbers.
|
||||
*/
|
||||
#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005
|
||||
#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006
|
||||
#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007
|
||||
#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008
|
||||
#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009
|
||||
#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A
|
||||
#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B
|
||||
#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E
|
||||
#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010
|
||||
#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012
|
||||
#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018
|
||||
#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019
|
||||
#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A
|
||||
#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B
|
||||
#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C
|
||||
#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D
|
||||
#define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030
|
||||
#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031
|
||||
#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032
|
||||
#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033
|
||||
#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034
|
||||
#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035
|
||||
#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036
|
||||
#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037
|
||||
#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039
|
||||
#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A
|
||||
#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL 0x003C
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F
|
||||
|
||||
/* Statistical profiling extension microarchitectural events */
|
||||
#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000
|
||||
#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001
|
||||
#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002
|
||||
#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003
|
||||
|
||||
/* AMUv1 architecture events */
|
||||
#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004
|
||||
#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005
|
||||
|
||||
/* long-latency read miss events */
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B
|
||||
|
||||
/* Trace buffer events */
|
||||
#define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C
|
||||
#define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E
|
||||
|
||||
/* Trace unit events */
|
||||
#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010
|
||||
#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011
|
||||
#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012
|
||||
#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013
|
||||
#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018
|
||||
#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019
|
||||
#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A
|
||||
#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B
|
||||
|
||||
/* additional latency from alignment events */
|
||||
#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020
|
||||
#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021
|
||||
#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022
|
||||
|
||||
/* Armv8.5 Memory Tagging Extension events */
|
||||
#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024
|
||||
#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025
|
||||
#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026
|
||||
|
||||
/* ARMv8 recommended implementation defined event types */
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065
|
||||
#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066
|
||||
#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067
|
||||
#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068
|
||||
#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069
|
||||
#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C
|
||||
#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D
|
||||
#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E
|
||||
#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F
|
||||
#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070
|
||||
#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071
|
||||
#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072
|
||||
#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073
|
||||
#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074
|
||||
#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075
|
||||
#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076
|
||||
#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077
|
||||
#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078
|
||||
#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079
|
||||
#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C
|
||||
#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D
|
||||
#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F
|
||||
#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090
|
||||
#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8
|
||||
|
||||
/*
|
||||
* Per-CPU PMCR: config reg
|
||||
*/
|
||||
#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
|
||||
#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
|
||||
#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
|
||||
#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
|
||||
#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
|
||||
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
|
||||
#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
|
||||
#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
|
||||
#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
|
||||
#define ARMV8_PMU_PMCR_N_MASK 0x1f
|
||||
#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */
|
||||
|
||||
/*
|
||||
* PMOVSR: counters overflow flag status reg
|
||||
*/
|
||||
#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
|
||||
#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
|
||||
|
||||
/*
|
||||
* PMXEVTYPER: Event selection reg
|
||||
*/
|
||||
#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
|
||||
#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
|
||||
|
||||
/*
|
||||
* Event filters for PMUv3
|
||||
*/
|
||||
#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
|
||||
#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
|
||||
#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
|
||||
|
||||
/*
|
||||
* PMUSERENR: user enable reg
|
||||
*/
|
||||
#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
|
||||
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
|
||||
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
|
||||
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
|
||||
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
|
||||
|
||||
/* PMMIR_EL1.SLOTS mask */
|
||||
#define ARMV8_PMU_SLOTS_MASK 0xff
|
||||
|
||||
#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
|
||||
#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
|
||||
#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
|
||||
#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
|
||||
|
||||
/*
|
||||
* This code is really good
|
||||
*/
|
||||
|
||||
#define PMEVN_CASE(n, case_macro) \
|
||||
case n: case_macro(n); break
|
||||
|
||||
#define PMEVN_SWITCH(x, case_macro) \
|
||||
do { \
|
||||
switch (x) { \
|
||||
PMEVN_CASE(0, case_macro); \
|
||||
PMEVN_CASE(1, case_macro); \
|
||||
PMEVN_CASE(2, case_macro); \
|
||||
PMEVN_CASE(3, case_macro); \
|
||||
PMEVN_CASE(4, case_macro); \
|
||||
PMEVN_CASE(5, case_macro); \
|
||||
PMEVN_CASE(6, case_macro); \
|
||||
PMEVN_CASE(7, case_macro); \
|
||||
PMEVN_CASE(8, case_macro); \
|
||||
PMEVN_CASE(9, case_macro); \
|
||||
PMEVN_CASE(10, case_macro); \
|
||||
PMEVN_CASE(11, case_macro); \
|
||||
PMEVN_CASE(12, case_macro); \
|
||||
PMEVN_CASE(13, case_macro); \
|
||||
PMEVN_CASE(14, case_macro); \
|
||||
PMEVN_CASE(15, case_macro); \
|
||||
PMEVN_CASE(16, case_macro); \
|
||||
PMEVN_CASE(17, case_macro); \
|
||||
PMEVN_CASE(18, case_macro); \
|
||||
PMEVN_CASE(19, case_macro); \
|
||||
PMEVN_CASE(20, case_macro); \
|
||||
PMEVN_CASE(21, case_macro); \
|
||||
PMEVN_CASE(22, case_macro); \
|
||||
PMEVN_CASE(23, case_macro); \
|
||||
PMEVN_CASE(24, case_macro); \
|
||||
PMEVN_CASE(25, case_macro); \
|
||||
PMEVN_CASE(26, case_macro); \
|
||||
PMEVN_CASE(27, case_macro); \
|
||||
PMEVN_CASE(28, case_macro); \
|
||||
PMEVN_CASE(29, case_macro); \
|
||||
PMEVN_CASE(30, case_macro); \
|
||||
default: \
|
||||
WARN(1, "Invalid PMEV* index\n"); \
|
||||
assert(0); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif
|
@ -159,6 +159,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
|
||||
TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
|
||||
TEST_GEN_PROGS_aarch64 += demand_paging_test
|
||||
TEST_GEN_PROGS_aarch64 += dirty_log_test
|
||||
|
670
tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
Normal file
670
tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
Normal file
@ -0,0 +1,670 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* vpmu_counter_access - Test vPMU event counter access
|
||||
*
|
||||
* Copyright (c) 2023 Google LLC.
|
||||
*
|
||||
* This test checks if the guest can see the same number of the PMU event
|
||||
* counters (PMCR_EL0.N) that userspace sets, if the guest can access
|
||||
* those counters, and if the guest is prevented from accessing any
|
||||
* other counters.
|
||||
* It also checks if the userspace accesses to the PMU regsisters honor the
|
||||
* PMCR.N value that's set for the guest.
|
||||
* This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
|
||||
*/
|
||||
#include <kvm_util.h>
|
||||
#include <processor.h>
|
||||
#include <test_util.h>
|
||||
#include <vgic.h>
|
||||
#include <perf/arm_pmuv3.h>
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
/* The max number of the PMU event counters (excluding the cycle counter) */
|
||||
#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
|
||||
|
||||
/* The cycle counter bit position that's common among the PMU registers */
|
||||
#define ARMV8_PMU_CYCLE_IDX 31
|
||||
|
||||
struct vpmu_vm {
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int gic_fd;
|
||||
};
|
||||
|
||||
static struct vpmu_vm vpmu_vm;
|
||||
|
||||
struct pmreg_sets {
|
||||
uint64_t set_reg_id;
|
||||
uint64_t clr_reg_id;
|
||||
};
|
||||
|
||||
#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
|
||||
|
||||
static uint64_t get_pmcr_n(uint64_t pmcr)
|
||||
{
|
||||
return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
|
||||
}
|
||||
|
||||
static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
|
||||
{
|
||||
*pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
*pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
}
|
||||
|
||||
static uint64_t get_counters_mask(uint64_t n)
|
||||
{
|
||||
uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
|
||||
if (n)
|
||||
mask |= GENMASK(n - 1, 0);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
|
||||
static inline unsigned long read_sel_evcntr(int sel)
|
||||
{
|
||||
write_sysreg(sel, pmselr_el0);
|
||||
isb();
|
||||
return read_sysreg(pmxevcntr_el0);
|
||||
}
|
||||
|
||||
/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
|
||||
static inline void write_sel_evcntr(int sel, unsigned long val)
|
||||
{
|
||||
write_sysreg(sel, pmselr_el0);
|
||||
isb();
|
||||
write_sysreg(val, pmxevcntr_el0);
|
||||
isb();
|
||||
}
|
||||
|
||||
/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
|
||||
static inline unsigned long read_sel_evtyper(int sel)
|
||||
{
|
||||
write_sysreg(sel, pmselr_el0);
|
||||
isb();
|
||||
return read_sysreg(pmxevtyper_el0);
|
||||
}
|
||||
|
||||
/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
|
||||
static inline void write_sel_evtyper(int sel, unsigned long val)
|
||||
{
|
||||
write_sysreg(sel, pmselr_el0);
|
||||
isb();
|
||||
write_sysreg(val, pmxevtyper_el0);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void enable_counter(int idx)
|
||||
{
|
||||
uint64_t v = read_sysreg(pmcntenset_el0);
|
||||
|
||||
write_sysreg(BIT(idx) | v, pmcntenset_el0);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void disable_counter(int idx)
|
||||
{
|
||||
uint64_t v = read_sysreg(pmcntenset_el0);
|
||||
|
||||
write_sysreg(BIT(idx) | v, pmcntenclr_el0);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void pmu_disable_reset(void)
|
||||
{
|
||||
uint64_t pmcr = read_sysreg(pmcr_el0);
|
||||
|
||||
/* Reset all counters, disabling them */
|
||||
pmcr &= ~ARMV8_PMU_PMCR_E;
|
||||
write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
|
||||
isb();
|
||||
}
|
||||
|
||||
#define RETURN_READ_PMEVCNTRN(n) \
|
||||
return read_sysreg(pmevcntr##n##_el0)
|
||||
static unsigned long read_pmevcntrn(int n)
|
||||
{
|
||||
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define WRITE_PMEVCNTRN(n) \
|
||||
write_sysreg(val, pmevcntr##n##_el0)
|
||||
static void write_pmevcntrn(int n, unsigned long val)
|
||||
{
|
||||
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
|
||||
isb();
|
||||
}
|
||||
|
||||
#define READ_PMEVTYPERN(n) \
|
||||
return read_sysreg(pmevtyper##n##_el0)
|
||||
static unsigned long read_pmevtypern(int n)
|
||||
{
|
||||
PMEVN_SWITCH(n, READ_PMEVTYPERN);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define WRITE_PMEVTYPERN(n) \
|
||||
write_sysreg(val, pmevtyper##n##_el0)
|
||||
static void write_pmevtypern(int n, unsigned long val)
|
||||
{
|
||||
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
|
||||
isb();
|
||||
}
|
||||
|
||||
/*
|
||||
* The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
|
||||
* accessors that test cases will use. Each of the accessors will
|
||||
* either directly reads/writes PMEV{CNTR,TYPER}<n>_EL0
|
||||
* (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through
|
||||
* PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()).
|
||||
*
|
||||
* This is used to test that combinations of those accessors provide
|
||||
* the consistent behavior.
|
||||
*/
|
||||
struct pmc_accessor {
|
||||
/* A function to be used to read PMEVTCNTR<n>_EL0 */
|
||||
unsigned long (*read_cntr)(int idx);
|
||||
/* A function to be used to write PMEVTCNTR<n>_EL0 */
|
||||
void (*write_cntr)(int idx, unsigned long val);
|
||||
/* A function to be used to read PMEVTYPER<n>_EL0 */
|
||||
unsigned long (*read_typer)(int idx);
|
||||
/* A function to be used to write PMEVTYPER<n>_EL0 */
|
||||
void (*write_typer)(int idx, unsigned long val);
|
||||
};
|
||||
|
||||
struct pmc_accessor pmc_accessors[] = {
|
||||
/* test with all direct accesses */
|
||||
{ read_pmevcntrn, write_pmevcntrn, read_pmevtypern, write_pmevtypern },
|
||||
/* test with all indirect accesses */
|
||||
{ read_sel_evcntr, write_sel_evcntr, read_sel_evtyper, write_sel_evtyper },
|
||||
/* read with direct accesses, and write with indirect accesses */
|
||||
{ read_pmevcntrn, write_sel_evcntr, read_pmevtypern, write_sel_evtyper },
|
||||
/* read with indirect accesses, and write with direct accesses */
|
||||
{ read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern },
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert a pointer of pmc_accessor to an index in pmc_accessors[],
|
||||
* assuming that the pointer is one of the entries in pmc_accessors[].
|
||||
*/
|
||||
#define PMC_ACC_TO_IDX(acc) (acc - &pmc_accessors[0])
|
||||
|
||||
#define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \
|
||||
{ \
|
||||
uint64_t _tval = read_sysreg(regname); \
|
||||
\
|
||||
if (set_expected) \
|
||||
__GUEST_ASSERT((_tval & mask), \
|
||||
"tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \
|
||||
_tval, mask, set_expected); \
|
||||
else \
|
||||
__GUEST_ASSERT(!(_tval & mask), \
|
||||
"tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \
|
||||
_tval, mask, set_expected); \
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
|
||||
* are set or cleared as specified in @set_expected.
|
||||
*/
|
||||
static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
|
||||
{
|
||||
GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected);
|
||||
GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected);
|
||||
GUEST_ASSERT_BITMAP_REG(pmintenset_el1, mask, set_expected);
|
||||
GUEST_ASSERT_BITMAP_REG(pmintenclr_el1, mask, set_expected);
|
||||
GUEST_ASSERT_BITMAP_REG(pmovsset_el0, mask, set_expected);
|
||||
GUEST_ASSERT_BITMAP_REG(pmovsclr_el0, mask, set_expected);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding
|
||||
* to the specified counter (@pmc_idx) can be read/written as expected.
|
||||
* When @set_op is true, it tries to set the bit for the counter in
|
||||
* those registers by writing the SET registers (the bit won't be set
|
||||
* if the counter is not implemented though).
|
||||
* Otherwise, it tries to clear the bits in the registers by writing
|
||||
* the CLR registers.
|
||||
* Then, it checks if the values indicated in the registers are as expected.
|
||||
*/
|
||||
static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
|
||||
{
|
||||
uint64_t pmcr_n, test_bit = BIT(pmc_idx);
|
||||
bool set_expected = false;
|
||||
|
||||
if (set_op) {
|
||||
write_sysreg(test_bit, pmcntenset_el0);
|
||||
write_sysreg(test_bit, pmintenset_el1);
|
||||
write_sysreg(test_bit, pmovsset_el0);
|
||||
|
||||
/* The bit will be set only if the counter is implemented */
|
||||
pmcr_n = get_pmcr_n(read_sysreg(pmcr_el0));
|
||||
set_expected = (pmc_idx < pmcr_n) ? true : false;
|
||||
} else {
|
||||
write_sysreg(test_bit, pmcntenclr_el0);
|
||||
write_sysreg(test_bit, pmintenclr_el1);
|
||||
write_sysreg(test_bit, pmovsclr_el0);
|
||||
}
|
||||
check_bitmap_pmu_regs(test_bit, set_expected);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tests for reading/writing registers for the (implemented) event counter
|
||||
* specified by @pmc_idx.
|
||||
*/
|
||||
static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
|
||||
{
|
||||
uint64_t write_data, read_data;
|
||||
|
||||
/* Disable all PMCs and reset all PMCs to zero. */
|
||||
pmu_disable_reset();
|
||||
|
||||
/*
|
||||
* Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1.
|
||||
*/
|
||||
|
||||
/* Make sure that the bit in those registers are set to 0 */
|
||||
test_bitmap_pmu_regs(pmc_idx, false);
|
||||
/* Test if setting the bit in those registers works */
|
||||
test_bitmap_pmu_regs(pmc_idx, true);
|
||||
/* Test if clearing the bit in those registers works */
|
||||
test_bitmap_pmu_regs(pmc_idx, false);
|
||||
|
||||
/*
|
||||
* Tests for reading/writing the event type register.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Set the event type register to an arbitrary value just for testing
|
||||
* of reading/writing the register.
|
||||
* Arm ARM says that for the event from 0x0000 to 0x003F,
|
||||
* the value indicated in the PMEVTYPER<n>_EL0.evtCount field is
|
||||
* the value written to the field even when the specified event
|
||||
* is not supported.
|
||||
*/
|
||||
write_data = (ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMUV3_PERFCTR_INST_RETIRED);
|
||||
acc->write_typer(pmc_idx, write_data);
|
||||
read_data = acc->read_typer(pmc_idx);
|
||||
__GUEST_ASSERT(read_data == write_data,
|
||||
"pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
|
||||
pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
|
||||
|
||||
/*
|
||||
* Tests for reading/writing the event count register.
|
||||
*/
|
||||
|
||||
read_data = acc->read_cntr(pmc_idx);
|
||||
|
||||
/* The count value must be 0, as it is disabled and reset */
|
||||
__GUEST_ASSERT(read_data == 0,
|
||||
"pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx",
|
||||
pmc_idx, PMC_ACC_TO_IDX(acc), read_data);
|
||||
|
||||
write_data = read_data + pmc_idx + 0x12345;
|
||||
acc->write_cntr(pmc_idx, write_data);
|
||||
read_data = acc->read_cntr(pmc_idx);
|
||||
__GUEST_ASSERT(read_data == write_data,
|
||||
"pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
|
||||
pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
|
||||
}
|
||||
|
||||
#define INVALID_EC (-1ul)
|
||||
uint64_t expected_ec = INVALID_EC;
|
||||
|
||||
static void guest_sync_handler(struct ex_regs *regs)
|
||||
{
|
||||
uint64_t esr, ec;
|
||||
|
||||
esr = read_sysreg(esr_el1);
|
||||
ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
|
||||
|
||||
__GUEST_ASSERT(expected_ec == ec,
|
||||
"PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx",
|
||||
regs->pc, esr, ec, expected_ec);
|
||||
|
||||
/* skip the trapping instruction */
|
||||
regs->pc += 4;
|
||||
|
||||
/* Use INVALID_EC to indicate an exception occurred */
|
||||
expected_ec = INVALID_EC;
|
||||
}
|
||||
|
||||
/*
|
||||
* Run the given operation that should trigger an exception with the
|
||||
* given exception class. The exception handler (guest_sync_handler)
|
||||
* will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip
|
||||
* the instruction that trapped.
|
||||
*/
|
||||
#define TEST_EXCEPTION(ec, ops) \
|
||||
({ \
|
||||
GUEST_ASSERT(ec != INVALID_EC); \
|
||||
WRITE_ONCE(expected_ec, ec); \
|
||||
dsb(ish); \
|
||||
ops; \
|
||||
GUEST_ASSERT(expected_ec == INVALID_EC); \
|
||||
})
|
||||
|
||||
/*
|
||||
* Tests for reading/writing registers for the unimplemented event counter
|
||||
* specified by @pmc_idx (>= PMCR_EL0.N).
|
||||
*/
|
||||
static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
|
||||
{
|
||||
/*
|
||||
* Reading/writing the event count/type registers should cause
|
||||
* an UNDEFINED exception.
|
||||
*/
|
||||
TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_cntr(pmc_idx));
|
||||
TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0));
|
||||
TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_typer(pmc_idx));
|
||||
TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_typer(pmc_idx, 0));
|
||||
/*
|
||||
* The bit corresponding to the (unimplemented) counter in
|
||||
* {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ.
|
||||
*/
|
||||
test_bitmap_pmu_regs(pmc_idx, 1);
|
||||
test_bitmap_pmu_regs(pmc_idx, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* The guest is configured with PMUv3 with @expected_pmcr_n number of
|
||||
* event counters.
|
||||
* Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and
|
||||
* if reading/writing PMU registers for implemented or unimplemented
|
||||
* counters works as expected.
|
||||
*/
|
||||
static void guest_code(uint64_t expected_pmcr_n)
|
||||
{
|
||||
uint64_t pmcr, pmcr_n, unimp_mask;
|
||||
int i, pmc;
|
||||
|
||||
__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
|
||||
"Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx",
|
||||
expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
|
||||
|
||||
pmcr = read_sysreg(pmcr_el0);
|
||||
pmcr_n = get_pmcr_n(pmcr);
|
||||
|
||||
/* Make sure that PMCR_EL0.N indicates the value userspace set */
|
||||
__GUEST_ASSERT(pmcr_n == expected_pmcr_n,
|
||||
"Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
|
||||
expected_pmcr_n, pmcr_n);
|
||||
|
||||
/*
|
||||
* Make sure that (RAZ) bits corresponding to unimplemented event
|
||||
* counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset
|
||||
* to zero.
|
||||
* (NOTE: bits for implemented event counters are reset to UNKNOWN)
|
||||
*/
|
||||
unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n);
|
||||
check_bitmap_pmu_regs(unimp_mask, false);
|
||||
|
||||
/*
|
||||
* Tests for reading/writing PMU registers for implemented counters.
|
||||
* Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
|
||||
for (pmc = 0; pmc < pmcr_n; pmc++)
|
||||
test_access_pmc_regs(&pmc_accessors[i], pmc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tests for reading/writing PMU registers for unimplemented counters.
|
||||
* Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
|
||||
for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++)
|
||||
test_access_invalid_pmc_regs(&pmc_accessors[i], pmc);
|
||||
}
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
#define GICD_BASE_GPA 0x8000000ULL
|
||||
#define GICR_BASE_GPA 0x80A0000ULL
|
||||
|
||||
/* Create a VM that has one vCPU with PMUv3 configured. */
|
||||
static void create_vpmu_vm(void *guest_code)
|
||||
{
|
||||
struct kvm_vcpu_init init;
|
||||
uint8_t pmuver, ec;
|
||||
uint64_t dfr0, irq = 23;
|
||||
struct kvm_device_attr irq_attr = {
|
||||
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
|
||||
.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
|
||||
.addr = (uint64_t)&irq,
|
||||
};
|
||||
struct kvm_device_attr init_attr = {
|
||||
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
|
||||
.attr = KVM_ARM_VCPU_PMU_V3_INIT,
|
||||
};
|
||||
|
||||
/* The test creates the vpmu_vm multiple times. Ensure a clean state */
|
||||
memset(&vpmu_vm, 0, sizeof(vpmu_vm));
|
||||
|
||||
vpmu_vm.vm = vm_create(1);
|
||||
vm_init_descriptor_tables(vpmu_vm.vm);
|
||||
for (ec = 0; ec < ESR_EC_NUM; ec++) {
|
||||
vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
|
||||
guest_sync_handler);
|
||||
}
|
||||
|
||||
/* Create vCPU with PMUv3 */
|
||||
vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
|
||||
init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
|
||||
vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
|
||||
vcpu_init_descriptor_tables(vpmu_vm.vcpu);
|
||||
vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
|
||||
GICD_BASE_GPA, GICR_BASE_GPA);
|
||||
__TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
|
||||
"Failed to create vgic-v3, skipping");
|
||||
|
||||
/* Make sure that PMUv3 support is indicated in the ID register */
|
||||
vcpu_get_reg(vpmu_vm.vcpu,
|
||||
KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
|
||||
pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
|
||||
TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
|
||||
pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
|
||||
"Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
|
||||
|
||||
/* Initialize vPMU */
|
||||
vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
|
||||
vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
|
||||
}
|
||||
|
||||
static void destroy_vpmu_vm(void)
|
||||
{
|
||||
close(vpmu_vm.gic_fd);
|
||||
kvm_vm_free(vpmu_vm.vm);
|
||||
}
|
||||
|
||||
static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
|
||||
{
|
||||
struct ucall uc;
|
||||
|
||||
vcpu_args_set(vcpu, 1, pmcr_n);
|
||||
vcpu_run(vcpu);
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
break;
|
||||
default:
|
||||
TEST_FAIL("Unknown ucall %lu", uc.cmd);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
uint64_t pmcr, pmcr_orig;
|
||||
|
||||
create_vpmu_vm(guest_code);
|
||||
vcpu = vpmu_vm.vcpu;
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
|
||||
pmcr = pmcr_orig;
|
||||
|
||||
/*
|
||||
* Setting a larger value of PMCR.N should not modify the field, and
|
||||
* return a success.
|
||||
*/
|
||||
set_pmcr_n(&pmcr, pmcr_n);
|
||||
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
|
||||
|
||||
if (expect_fail)
|
||||
TEST_ASSERT(pmcr_orig == pmcr,
|
||||
"PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n",
|
||||
pmcr, pmcr_n);
|
||||
else
|
||||
TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
|
||||
"Failed to update PMCR.N to %lu (received: %lu)\n",
|
||||
pmcr_n, get_pmcr_n(pmcr));
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
|
||||
* and run the test.
|
||||
*/
|
||||
static void run_access_test(uint64_t pmcr_n)
|
||||
{
|
||||
uint64_t sp;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vcpu_init init;
|
||||
|
||||
pr_debug("Test with pmcr_n %lu\n", pmcr_n);
|
||||
|
||||
test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
|
||||
vcpu = vpmu_vm.vcpu;
|
||||
|
||||
/* Save the initial sp to restore them later to run the guest again */
|
||||
vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
|
||||
|
||||
run_vcpu(vcpu, pmcr_n);
|
||||
|
||||
/*
|
||||
* Reset and re-initialize the vCPU, and run the guest code again to
|
||||
* check if PMCR_EL0.N is preserved.
|
||||
*/
|
||||
vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
|
||||
init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
|
||||
aarch64_vcpu_setup(vcpu, &init);
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
|
||||
|
||||
run_vcpu(vcpu, pmcr_n);
|
||||
|
||||
destroy_vpmu_vm();
|
||||
}
|
||||
|
||||
static struct pmreg_sets validity_check_reg_sets[] = {
|
||||
PMREG_SET(SYS_PMCNTENSET_EL0, SYS_PMCNTENCLR_EL0),
|
||||
PMREG_SET(SYS_PMINTENSET_EL1, SYS_PMINTENCLR_EL1),
|
||||
PMREG_SET(SYS_PMOVSSET_EL0, SYS_PMOVSCLR_EL0),
|
||||
};
|
||||
|
||||
/*
|
||||
* Create a VM, and check if KVM handles the userspace accesses of
|
||||
* the PMU register sets in @validity_check_reg_sets[] correctly.
|
||||
*/
|
||||
static void run_pmregs_validity_test(uint64_t pmcr_n)
|
||||
{
|
||||
int i;
|
||||
struct kvm_vcpu *vcpu;
|
||||
uint64_t set_reg_id, clr_reg_id, reg_val;
|
||||
uint64_t valid_counters_mask, max_counters_mask;
|
||||
|
||||
test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
|
||||
vcpu = vpmu_vm.vcpu;
|
||||
|
||||
valid_counters_mask = get_counters_mask(pmcr_n);
|
||||
max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(validity_check_reg_sets); i++) {
|
||||
set_reg_id = validity_check_reg_sets[i].set_reg_id;
|
||||
clr_reg_id = validity_check_reg_sets[i].clr_reg_id;
|
||||
|
||||
/*
|
||||
* Test if the 'set' and 'clr' variants of the registers
|
||||
* are initialized based on the number of valid counters.
|
||||
*/
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), ®_val);
|
||||
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
|
||||
"Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
|
||||
KVM_ARM64_SYS_REG(set_reg_id), reg_val);
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), ®_val);
|
||||
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
|
||||
"Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
|
||||
KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
|
||||
|
||||
/*
|
||||
* Using the 'set' variant, force-set the register to the
|
||||
* max number of possible counters and test if KVM discards
|
||||
* the bits for unimplemented counters as it should.
|
||||
*/
|
||||
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask);
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), ®_val);
|
||||
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
|
||||
"Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
|
||||
KVM_ARM64_SYS_REG(set_reg_id), reg_val);
|
||||
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), ®_val);
|
||||
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
|
||||
"Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
|
||||
KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
|
||||
}
|
||||
|
||||
destroy_vpmu_vm();
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
|
||||
* the vCPU to @pmcr_n, which is larger than the host value.
|
||||
* The attempt should fail as @pmcr_n is too big to set for the vCPU.
|
||||
*/
|
||||
static void run_error_test(uint64_t pmcr_n)
|
||||
{
|
||||
pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
|
||||
|
||||
test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
|
||||
destroy_vpmu_vm();
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the default number of implemented PMU event counters excluding
|
||||
* the cycle counter (i.e. PMCR_EL0.N value) for the guest.
|
||||
*/
|
||||
static uint64_t get_pmcr_n_limit(void)
|
||||
{
|
||||
uint64_t pmcr;
|
||||
|
||||
create_vpmu_vm(guest_code);
|
||||
vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
|
||||
destroy_vpmu_vm();
|
||||
return get_pmcr_n(pmcr);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
uint64_t i, pmcr_n;
|
||||
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
|
||||
|
||||
pmcr_n = get_pmcr_n_limit();
|
||||
for (i = 0; i <= pmcr_n; i++) {
|
||||
run_access_test(i);
|
||||
run_pmregs_validity_test(i);
|
||||
}
|
||||
|
||||
for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
|
||||
run_error_test(i);
|
||||
|
||||
return 0;
|
||||
}
|
@ -104,6 +104,7 @@ enum {
|
||||
#define ESR_EC_SHIFT 26
|
||||
#define ESR_EC_MASK (ESR_EC_NUM - 1)
|
||||
|
||||
#define ESR_EC_UNKNOWN 0x0
|
||||
#define ESR_EC_SVC64 0x15
|
||||
#define ESR_EC_IABT 0x21
|
||||
#define ESR_EC_DABT 0x25
|
||||
|
Loading…
Reference in New Issue
Block a user