KVM: PPC: Book3S HV P9: Comment and fix MMU context switching code
Tighten up partition switching code synchronisation and comments. In particular, hwsync ; isync is required after the last access that is performed in the context of a partition, before the partition is switched away from. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211123095231.1036501-40-npiggin@gmail.com
This commit is contained in:
parent
5236756d04
commit
cf3b16cfa6
@ -374,11 +374,16 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||
BEGIN_FTR_SECTION
|
||||
mtspr SPRN_DAWRX1,r10
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
|
||||
mtspr SPRN_PID,r10
|
||||
|
||||
/*
|
||||
* Switch to host MMU mode
|
||||
* Switch to host MMU mode (don't have the real host PID but we aren't
|
||||
* going back to userspace).
|
||||
*/
|
||||
hwsync
|
||||
isync
|
||||
|
||||
mtspr SPRN_PID,r10
|
||||
|
||||
ld r10, HSTATE_KVM_VCPU(r13)
|
||||
ld r10, VCPU_KVM(r10)
|
||||
lwz r10, KVM_HOST_LPID(r10)
|
||||
@ -389,6 +394,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
|
||||
ld r10, KVM_HOST_LPCR(r10)
|
||||
mtspr SPRN_LPCR,r10
|
||||
|
||||
isync
|
||||
|
||||
/*
|
||||
* Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
|
||||
* MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
|
||||
|
@ -57,6 +57,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
|
||||
|
||||
preempt_disable();
|
||||
|
||||
asm volatile("hwsync" ::: "memory");
|
||||
isync();
|
||||
/* switch the lpid first to avoid running host with unallocated pid */
|
||||
old_lpid = mfspr(SPRN_LPID);
|
||||
if (old_lpid != lpid)
|
||||
@ -75,6 +77,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
|
||||
ret = __copy_to_user_inatomic((void __user *)to, from, n);
|
||||
pagefault_enable();
|
||||
|
||||
asm volatile("hwsync" ::: "memory");
|
||||
isync();
|
||||
/* switch the pid first to avoid running host with unallocated pid */
|
||||
if (quadrant == 1 && pid != old_pid)
|
||||
mtspr(SPRN_PID, old_pid);
|
||||
|
@ -531,17 +531,19 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
|
||||
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
|
||||
|
||||
/*
|
||||
* All the isync()s are overkill but trivially follow the ISA
|
||||
* requirements. Some can likely be replaced with justification
|
||||
* comment for why they are not needed.
|
||||
* Prior memory accesses to host PID Q3 must be completed before we
|
||||
* start switching, and stores must be drained to avoid not-my-LPAR
|
||||
* logic (see switch_mmu_to_host).
|
||||
*/
|
||||
asm volatile("hwsync" ::: "memory");
|
||||
isync();
|
||||
mtspr(SPRN_LPID, lpid);
|
||||
isync();
|
||||
mtspr(SPRN_LPCR, lpcr);
|
||||
isync();
|
||||
mtspr(SPRN_PID, vcpu->arch.pid);
|
||||
isync();
|
||||
/*
|
||||
* isync not required here because we are HRFID'ing to guest before
|
||||
* any guest context access, which is context synchronising.
|
||||
*/
|
||||
}
|
||||
|
||||
static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
|
||||
@ -551,25 +553,41 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
|
||||
|
||||
lpid = kvm->arch.lpid;
|
||||
|
||||
/*
|
||||
* See switch_mmu_to_guest_radix. ptesync should not be required here
|
||||
* even if the host is in HPT mode because speculative accesses would
|
||||
* not cause RC updates (we are in real mode).
|
||||
*/
|
||||
asm volatile("hwsync" ::: "memory");
|
||||
isync();
|
||||
mtspr(SPRN_LPID, lpid);
|
||||
mtspr(SPRN_LPCR, lpcr);
|
||||
mtspr(SPRN_PID, vcpu->arch.pid);
|
||||
|
||||
for (i = 0; i < vcpu->arch.slb_max; i++)
|
||||
mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
|
||||
|
||||
isync();
|
||||
/*
|
||||
* isync not required here, see switch_mmu_to_guest_radix.
|
||||
*/
|
||||
}
|
||||
|
||||
static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
|
||||
{
|
||||
/*
|
||||
* The guest has exited, so guest MMU context is no longer being
|
||||
* non-speculatively accessed, but a hwsync is needed before the
|
||||
* mtLPIDR / mtPIDR switch, in order to ensure all stores are drained,
|
||||
* so the not-my-LPAR tlbie logic does not overlook them.
|
||||
*/
|
||||
asm volatile("hwsync" ::: "memory");
|
||||
isync();
|
||||
mtspr(SPRN_PID, pid);
|
||||
isync();
|
||||
mtspr(SPRN_LPID, kvm->arch.host_lpid);
|
||||
isync();
|
||||
mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
|
||||
isync();
|
||||
/*
|
||||
* isync is not required after the switch, because mtmsrd with L=0
|
||||
* is performed after this switch, which is context synchronising.
|
||||
*/
|
||||
|
||||
if (!radix_enabled())
|
||||
slb_restore_bolted_realmode();
|
||||
|
Loading…
Reference in New Issue
Block a user