mirror of
https://github.com/torvalds/linux.git
synced 2024-12-16 16:12:52 +00:00
KVM: x86: MMU: Move handle_mmio_page_fault() call to kvm_mmu_page_fault()
Rather than placing a handle_mmio_page_fault() call in each vcpu->arch.mmu.page_fault() handler, moving it up to kvm_mmu_page_fault() makes the code better: - avoids code duplication - for kvm_arch_async_page_ready(), which is the other caller of vcpu->arch.mmu.page_fault(), removes an extra error_code check - avoids returning both RET_MMIO_PF_* values and raw integer values from vcpu->arch.mmu.page_fault() Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ded5874946
commit
e9ee956e31
@ -3370,13 +3370,6 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
|
||||
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
|
||||
|
||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||
r = handle_mmio_page_fault(vcpu, gva, true);
|
||||
|
||||
if (likely(r != RET_MMIO_PF_INVALID))
|
||||
return r;
|
||||
}
|
||||
|
||||
r = mmu_topup_memory_caches(vcpu);
|
||||
if (r)
|
||||
return r;
|
||||
@ -3460,13 +3453,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
||||
|
||||
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
||||
|
||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||
r = handle_mmio_page_fault(vcpu, gpa, true);
|
||||
|
||||
if (likely(r != RET_MMIO_PF_INVALID))
|
||||
return r;
|
||||
}
|
||||
|
||||
r = mmu_topup_memory_caches(vcpu);
|
||||
if (r)
|
||||
return r;
|
||||
@ -4361,18 +4347,27 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
|
||||
enum emulation_result er;
|
||||
bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
|
||||
|
||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||
r = handle_mmio_page_fault(vcpu, cr2, direct);
|
||||
if (r == RET_MMIO_PF_EMULATE) {
|
||||
emulation_type = 0;
|
||||
goto emulate;
|
||||
}
|
||||
if (r == RET_MMIO_PF_RETRY)
|
||||
return 1;
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
|
||||
if (r < 0)
|
||||
goto out;
|
||||
|
||||
if (!r) {
|
||||
r = 1;
|
||||
goto out;
|
||||
}
|
||||
return r;
|
||||
if (!r)
|
||||
return 1;
|
||||
|
||||
if (mmio_info_in_cache(vcpu, cr2, direct))
|
||||
emulation_type = 0;
|
||||
|
||||
emulate:
|
||||
er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
|
||||
|
||||
switch (er) {
|
||||
@ -4386,8 +4381,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
|
||||
|
||||
|
@ -702,23 +702,16 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||
|
||||
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
|
||||
|
||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||
r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu));
|
||||
if (likely(r != RET_MMIO_PF_INVALID))
|
||||
return r;
|
||||
|
||||
/*
|
||||
* page fault with PFEC.RSVD = 1 is caused by shadow
|
||||
* page fault, should not be used to walk guest page
|
||||
* table.
|
||||
*/
|
||||
error_code &= ~PFERR_RSVD_MASK;
|
||||
};
|
||||
|
||||
r = mmu_topup_memory_caches(vcpu);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* If PFEC.RSVD is set, this is a shadow page fault.
|
||||
* The bit needs to be cleared before walking guest page tables.
|
||||
*/
|
||||
error_code &= ~PFERR_RSVD_MASK;
|
||||
|
||||
/*
|
||||
* Look up the guest pte for the faulting address.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user