KVM: x86: let reexecute_instruction work for tdp

Currently, reexecute_instruction refused to retry all instructions if
tdp is enabled. If nested npt is used, the emulation may be caused by
shadow page, it can be fixed by dropping the shadow page. And the only
condition that tdp can not retry the instruction is the access fault
on error pfn

Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Xiao Guangrong 2013-01-13 23:46:52 +08:00 committed by Marcelo Tosatti
parent 22368028fe
commit 95b3cf69bd

View File

@ -4751,25 +4751,25 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
return r;
}
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2)
{
gpa_t gpa;
gpa_t gpa = cr2;
pfn_t pfn;
if (tdp_enabled)
return false;
if (!vcpu->arch.mmu.direct_map) {
/*
* Write permission should be allowed since only
* write access need to be emulated.
*/
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
if (gpa == UNMAPPED_GVA)
return true; /* let cpu generate fault */
/*
* if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-enter the
* guest to let CPU execute the instruction.
*/
if (kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)))
return true;
/*
* If the mapping is invalid in guest, let cpu retry
* it to generate fault.
*/
if (gpa == UNMAPPED_GVA)
return true;
}
/*
* Do not retry the unhandleable instruction if it faults on the
@ -4778,12 +4778,37 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
* instruction -> ...
*/
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
if (!is_error_noslot_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
/*
* If the instruction failed on the error pfn, it can not be fixed,
* report the error to userspace.
*/
if (is_error_noslot_pfn(pfn))
return false;
kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */
if (vcpu->arch.mmu.direct_map) {
unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock);
indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
spin_unlock(&vcpu->kvm->mmu_lock);
if (indirect_shadow_pages)
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
}
return false;
/*
* if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-enter the
* guest to let CPU execute the instruction.
*/
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
}
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,