KVM: x86: Add EMULTYPE_PF when emulation is triggered by a page fault
Add a new emulation type flag to explicitly mark emulation related to a page fault. Move the propation of the GPA into the emulator from the page fault handler into x86_emulate_instruction, using EMULTYPE_PF as an indicator that cr2 is valid. Similarly, don't propagate cr2 into the exception.address when it's *not* valid. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
999eabcc89
commit
92daa48b34
@ -1381,8 +1381,9 @@ extern u64 kvm_mce_cap_supported;
|
|||||||
* decode the instruction length. For use *only* by
|
* decode the instruction length. For use *only* by
|
||||||
* kvm_x86_ops->skip_emulated_instruction() implementations.
|
* kvm_x86_ops->skip_emulated_instruction() implementations.
|
||||||
*
|
*
|
||||||
* EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to
|
* EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
|
||||||
* retry native execution under certain conditions.
|
* retry native execution under certain conditions,
|
||||||
|
* Can only be set in conjunction with EMULTYPE_PF.
|
||||||
*
|
*
|
||||||
* EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
|
* EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
|
||||||
* triggered by KVM's magic "force emulation" prefix,
|
* triggered by KVM's magic "force emulation" prefix,
|
||||||
@ -1395,13 +1396,18 @@ extern u64 kvm_mce_cap_supported;
|
|||||||
* backdoor emulation, which is opt in via module param.
|
* backdoor emulation, which is opt in via module param.
|
||||||
* VMware backoor emulation handles select instructions
|
* VMware backoor emulation handles select instructions
|
||||||
* and reinjects the #GP for all other cases.
|
* and reinjects the #GP for all other cases.
|
||||||
|
*
|
||||||
|
* EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
|
||||||
|
* case the CR2/GPA value pass on the stack is valid.
|
||||||
*/
|
*/
|
||||||
#define EMULTYPE_NO_DECODE (1 << 0)
|
#define EMULTYPE_NO_DECODE (1 << 0)
|
||||||
#define EMULTYPE_TRAP_UD (1 << 1)
|
#define EMULTYPE_TRAP_UD (1 << 1)
|
||||||
#define EMULTYPE_SKIP (1 << 2)
|
#define EMULTYPE_SKIP (1 << 2)
|
||||||
#define EMULTYPE_ALLOW_RETRY (1 << 3)
|
#define EMULTYPE_ALLOW_RETRY_PF (1 << 3)
|
||||||
#define EMULTYPE_TRAP_UD_FORCED (1 << 4)
|
#define EMULTYPE_TRAP_UD_FORCED (1 << 4)
|
||||||
#define EMULTYPE_VMWARE_GP (1 << 5)
|
#define EMULTYPE_VMWARE_GP (1 << 5)
|
||||||
|
#define EMULTYPE_PF (1 << 6)
|
||||||
|
|
||||||
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
|
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
|
||||||
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
|
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
|
||||||
void *insn, int insn_len);
|
void *insn, int insn_len);
|
||||||
|
@ -5415,18 +5415,12 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
|
|||||||
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
||||||
void *insn, int insn_len)
|
void *insn, int insn_len)
|
||||||
{
|
{
|
||||||
int r, emulation_type = 0;
|
int r, emulation_type = EMULTYPE_PF;
|
||||||
bool direct = vcpu->arch.mmu->direct_map;
|
bool direct = vcpu->arch.mmu->direct_map;
|
||||||
|
|
||||||
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
|
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
|
||||||
return RET_PF_RETRY;
|
return RET_PF_RETRY;
|
||||||
|
|
||||||
/* With shadow page tables, fault_address contains a GVA or nGPA. */
|
|
||||||
if (vcpu->arch.mmu->direct_map) {
|
|
||||||
vcpu->arch.gpa_available = true;
|
|
||||||
vcpu->arch.gpa_val = cr2_or_gpa;
|
|
||||||
}
|
|
||||||
|
|
||||||
r = RET_PF_INVALID;
|
r = RET_PF_INVALID;
|
||||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||||
r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
|
r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
|
||||||
@ -5470,7 +5464,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
|||||||
* for L1 isn't going to magically fix whatever issue cause L2 to fail.
|
* for L1 isn't going to magically fix whatever issue cause L2 to fail.
|
||||||
*/
|
*/
|
||||||
if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
|
if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
|
||||||
emulation_type = EMULTYPE_ALLOW_RETRY;
|
emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
|
||||||
emulate:
|
emulate:
|
||||||
/*
|
/*
|
||||||
* On AMD platforms, under certain conditions insn_len may be zero on #NPF.
|
* On AMD platforms, under certain conditions insn_len may be zero on #NPF.
|
||||||
|
@ -6492,10 +6492,11 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
|||||||
gpa_t gpa = cr2_or_gpa;
|
gpa_t gpa = cr2_or_gpa;
|
||||||
kvm_pfn_t pfn;
|
kvm_pfn_t pfn;
|
||||||
|
|
||||||
if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
|
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(is_guest_mode(vcpu)))
|
if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
|
||||||
|
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!vcpu->arch.mmu->direct_map) {
|
if (!vcpu->arch.mmu->direct_map) {
|
||||||
@ -6583,10 +6584,11 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
|
|||||||
*/
|
*/
|
||||||
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
|
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
|
||||||
|
|
||||||
if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
|
if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(is_guest_mode(vcpu)))
|
if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
|
||||||
|
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (x86_page_table_writing_insn(ctxt))
|
if (x86_page_table_writing_insn(ctxt))
|
||||||
@ -6839,8 +6841,19 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
|||||||
}
|
}
|
||||||
|
|
||||||
restart:
|
restart:
|
||||||
/* Save the faulting GPA (cr2) in the address field */
|
if (emulation_type & EMULTYPE_PF) {
|
||||||
ctxt->exception.address = cr2_or_gpa;
|
/* Save the faulting GPA (cr2) in the address field */
|
||||||
|
ctxt->exception.address = cr2_or_gpa;
|
||||||
|
|
||||||
|
/* With shadow page tables, cr2 contains a GVA or nGPA. */
|
||||||
|
if (vcpu->arch.mmu->direct_map) {
|
||||||
|
vcpu->arch.gpa_available = true;
|
||||||
|
vcpu->arch.gpa_val = cr2_or_gpa;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Sanitize the address out of an abundance of paranoia. */
|
||||||
|
ctxt->exception.address = 0;
|
||||||
|
}
|
||||||
|
|
||||||
r = x86_emulate_insn(ctxt);
|
r = x86_emulate_insn(ctxt);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user