KVM: x86/mmu: Remove redundant is_tdp_mmu_enabled check
This check is redundant because the root shadow page will only be a TDP MMU page if is_tdp_mmu_enabled() returns true, and is_tdp_mmu_enabled() never changes for the lifetime of a VM. It's possible that this check was added for performance reasons but it is unlikely that it is useful in practice since to_shadow_page() is cheap. That being said, this patch also caches the return value of is_tdp_mmu_root() in direct_page_fault() since there's no reason to duplicate the call so many times, so performance is not a concern. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: David Matlack <dmatlack@google.com> Message-Id: <20210617231948.2591431-3-dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
aa23c0ad14
commit
0b873fd7fb
@ -3608,7 +3608,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
||||
return reserved;
|
||||
}
|
||||
|
||||
if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
|
||||
if (is_tdp_mmu_root(vcpu->arch.mmu->root_hpa))
|
||||
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
|
||||
else
|
||||
leaf = get_walk(vcpu, addr, sptes, &root);
|
||||
@ -3780,6 +3780,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
|
||||
static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
||||
bool prefault, int max_level, bool is_tdp)
|
||||
{
|
||||
bool is_tdp_mmu_fault = is_tdp_mmu_root(vcpu->arch.mmu->root_hpa);
|
||||
bool write = error_code & PFERR_WRITE_MASK;
|
||||
bool map_writable;
|
||||
|
||||
@ -3792,7 +3793,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
||||
if (page_fault_handle_page_track(vcpu, error_code, gfn))
|
||||
return RET_PF_EMULATE;
|
||||
|
||||
if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
|
||||
if (!is_tdp_mmu_fault) {
|
||||
r = fast_page_fault(vcpu, gpa, error_code);
|
||||
if (r != RET_PF_INVALID)
|
||||
return r;
|
||||
@ -3814,7 +3815,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
||||
|
||||
r = RET_PF_RETRY;
|
||||
|
||||
if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
|
||||
if (is_tdp_mmu_fault)
|
||||
read_lock(&vcpu->kvm->mmu_lock);
|
||||
else
|
||||
write_lock(&vcpu->kvm->mmu_lock);
|
||||
@ -3825,7 +3826,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
||||
if (r)
|
||||
goto out_unlock;
|
||||
|
||||
if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
|
||||
if (is_tdp_mmu_fault)
|
||||
r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
|
||||
pfn, prefault);
|
||||
else
|
||||
@ -3833,7 +3834,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
||||
prefault, is_tdp);
|
||||
|
||||
out_unlock:
|
||||
if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
|
||||
if (is_tdp_mmu_fault)
|
||||
read_unlock(&vcpu->kvm->mmu_lock);
|
||||
else
|
||||
write_unlock(&vcpu->kvm->mmu_lock);
|
||||
|
@ -86,12 +86,10 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
||||
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
|
||||
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
|
||||
|
||||
static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
|
||||
static inline bool is_tdp_mmu_root(hpa_t hpa)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
|
||||
if (!is_tdp_mmu_enabled(kvm))
|
||||
return false;
|
||||
if (WARN_ON(!VALID_PAGE(hpa)))
|
||||
return false;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user