KVM: PPC: Book3S HV P9: Switch PMU to guest as late as possible

This moves PMU switch to guest as late as possible in entry, and switch
back to host as early as possible at exit. This helps the host get the
most perf coverage of KVM entry/exit code as possible.

This is slightly suboptimal for SPR scheduling point of view when the
PMU is enabled, but when perf is disabled there is no real difference.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-34-npiggin@gmail.com
This commit is contained in:
Nicholas Piggin 2021-11-23 19:52:11 +10:00 committed by Michael Ellerman
parent 3f9e2966d1
commit 3e7b337902
2 changed files with 4 additions and 8 deletions

View File

@ -3833,8 +3833,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
s64 dec; s64 dec;
int trap; int trap;
switch_pmu_to_guest(vcpu, &host_os_sprs);
save_p9_host_os_sprs(&host_os_sprs); save_p9_host_os_sprs(&host_os_sprs);
/* /*
@ -3897,9 +3895,11 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
mtspr(SPRN_DAR, vcpu->arch.shregs.dar); mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
switch_pmu_to_guest(vcpu, &host_os_sprs);
trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs), trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
__pa(&vcpu->arch.regs)); __pa(&vcpu->arch.regs));
kvmhv_restore_hv_return_state(vcpu, &hvregs); kvmhv_restore_hv_return_state(vcpu, &hvregs);
switch_pmu_to_host(vcpu, &host_os_sprs);
vcpu->arch.shregs.msr = vcpu->arch.regs.msr; vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
vcpu->arch.shregs.dar = mfspr(SPRN_DAR); vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
@ -3918,8 +3918,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
restore_p9_host_os_sprs(vcpu, &host_os_sprs); restore_p9_host_os_sprs(vcpu, &host_os_sprs);
switch_pmu_to_host(vcpu, &host_os_sprs);
return trap; return trap;
} }

View File

@ -601,8 +601,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR); local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR); local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
switch_pmu_to_guest(vcpu, &host_os_sprs);
save_p9_host_os_sprs(&host_os_sprs); save_p9_host_os_sprs(&host_os_sprs);
/* /*
@ -744,7 +742,9 @@ tm_return_to_guest:
accumulate_time(vcpu, &vcpu->arch.guest_time); accumulate_time(vcpu, &vcpu->arch.guest_time);
switch_pmu_to_guest(vcpu, &host_os_sprs);
kvmppc_p9_enter_guest(vcpu); kvmppc_p9_enter_guest(vcpu);
switch_pmu_to_host(vcpu, &host_os_sprs);
accumulate_time(vcpu, &vcpu->arch.rm_intr); accumulate_time(vcpu, &vcpu->arch.rm_intr);
@ -955,8 +955,6 @@ tm_return_to_guest:
asm volatile(PPC_CP_ABORT); asm volatile(PPC_CP_ABORT);
out: out:
switch_pmu_to_host(vcpu, &host_os_sprs);
end_timing(vcpu); end_timing(vcpu);
return trap; return trap;