KVM: x86/mmu: Use enable_mmio_caching to track if MMIO caching is enabled
Clear enable_mmio_caching if hardware can't support MMIO caching and use the dedicated flag to detect if MMIO caching is enabled instead of assuming shadow_mmio_value==0 means MMIO caching is disabled. TDX will use a zero value even when caching is enabled, and is_mmio_spte() isn't so hot that it needs to avoid an extra memory access, i.e. there's no reason to be super clever. And the clever approach may not even be more performant, e.g. gcc-11 lands the extra check on a non-zero value inline, but puts the enable_mmio_caching out-of-line, i.e. avoids the few extra uops for non-MMIO SPTEs. Cc: Isaku Yamahata <isaku.yamahata@intel.com> Cc: Kai Huang <kai.huang@intel.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220420002747.3287931-1-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
65936229d3
commit
8b9e74bfbf
@ -3036,7 +3036,7 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa
|
|||||||
* and only if L1's MAXPHYADDR is inaccurate with respect to
|
* and only if L1's MAXPHYADDR is inaccurate with respect to
|
||||||
* the hardware's).
|
* the hardware's).
|
||||||
*/
|
*/
|
||||||
if (unlikely(!shadow_mmio_value) ||
|
if (unlikely(!enable_mmio_caching) ||
|
||||||
unlikely(fault->gfn > kvm_mmu_max_gfn())) {
|
unlikely(fault->gfn > kvm_mmu_max_gfn())) {
|
||||||
*ret_val = RET_PF_EMULATE;
|
*ret_val = RET_PF_EMULATE;
|
||||||
return true;
|
return true;
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
#include <asm/memtype.h>
|
#include <asm/memtype.h>
|
||||||
#include <asm/vmx.h>
|
#include <asm/vmx.h>
|
||||||
|
|
||||||
static bool __read_mostly enable_mmio_caching = true;
|
bool __read_mostly enable_mmio_caching = true;
|
||||||
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
|
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
|
||||||
|
|
||||||
u64 __read_mostly shadow_host_writable_mask;
|
u64 __read_mostly shadow_host_writable_mask;
|
||||||
@ -351,6 +351,9 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
|
|||||||
WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
|
WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
|
||||||
mmio_value = 0;
|
mmio_value = 0;
|
||||||
|
|
||||||
|
if (!mmio_value)
|
||||||
|
enable_mmio_caching = false;
|
||||||
|
|
||||||
shadow_mmio_value = mmio_value;
|
shadow_mmio_value = mmio_value;
|
||||||
shadow_mmio_mask = mmio_mask;
|
shadow_mmio_mask = mmio_mask;
|
||||||
shadow_mmio_access_mask = access_mask;
|
shadow_mmio_access_mask = access_mask;
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
|
|
||||||
#include "mmu_internal.h"
|
#include "mmu_internal.h"
|
||||||
|
|
||||||
|
extern bool __read_mostly enable_mmio_caching;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A MMU present SPTE is backed by actual memory and may or may not be present
|
* A MMU present SPTE is backed by actual memory and may or may not be present
|
||||||
* in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it
|
* in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it
|
||||||
@ -204,7 +206,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
|
|||||||
static inline bool is_mmio_spte(u64 spte)
|
static inline bool is_mmio_spte(u64 spte)
|
||||||
{
|
{
|
||||||
return (spte & shadow_mmio_mask) == shadow_mmio_value &&
|
return (spte & shadow_mmio_mask) == shadow_mmio_value &&
|
||||||
likely(shadow_mmio_value);
|
likely(enable_mmio_caching);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool is_shadow_present_pte(u64 pte)
|
static inline bool is_shadow_present_pte(u64 pte)
|
||||||
|
Loading…
Reference in New Issue
Block a user