forked from Minki/linux
x86: kvm: mmu: make spte mmio mask more explicit
Specify both a mask (i.e., bits to consider) and a value (i.e., pattern of bits that indicates a special PTE) for mmio SPTEs. On Intel, this lets us pack even more information into the (SPTE_SPECIAL_MASK | EPT_VMX_RWX_MASK) mask we use for access tracking liberating all (SPTE_SPECIAL_MASK | (non-misconfigured-RWX)) values. Signed-off-by: Peter Feiner <pfeiner@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ce00053b1c
commit
dcdca5fed5
@ -183,6 +183,7 @@ static u64 __read_mostly shadow_user_mask;
|
||||
static u64 __read_mostly shadow_accessed_mask;
|
||||
static u64 __read_mostly shadow_dirty_mask;
|
||||
static u64 __read_mostly shadow_mmio_mask;
|
||||
static u64 __read_mostly shadow_mmio_value;
|
||||
static u64 __read_mostly shadow_present_mask;
|
||||
|
||||
/*
|
||||
@ -207,8 +208,10 @@ static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIF
|
||||
static void mmu_spte_set(u64 *sptep, u64 spte);
|
||||
static void mmu_free_roots(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
|
||||
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
|
||||
{
|
||||
BUG_ON((mmio_mask & mmio_value) != mmio_value);
|
||||
shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
|
||||
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
|
||||
@ -270,7 +273,7 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
|
||||
u64 mask = generation_mmio_spte_mask(gen);
|
||||
|
||||
access &= ACC_WRITE_MASK | ACC_USER_MASK;
|
||||
mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;
|
||||
mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
|
||||
|
||||
trace_mark_mmio_spte(sptep, gfn, access, gen);
|
||||
mmu_spte_set(sptep, mask);
|
||||
@ -278,7 +281,7 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
|
||||
|
||||
static bool is_mmio_spte(u64 spte)
|
||||
{
|
||||
return (spte & shadow_mmio_mask) == shadow_mmio_mask;
|
||||
return (spte & shadow_mmio_mask) == shadow_mmio_value;
|
||||
}
|
||||
|
||||
static gfn_t get_mmio_spte_gfn(u64 spte)
|
||||
|
@ -51,7 +51,7 @@ static inline u64 rsvd_bits(int s, int e)
|
||||
return ((1ULL << (e - s + 1)) - 1) << s;
|
||||
}
|
||||
|
||||
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
|
||||
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
|
||||
|
||||
void
|
||||
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
|
||||
|
@ -5163,7 +5163,8 @@ static void ept_set_mmio_spte_mask(void)
|
||||
* EPT Misconfigurations can be generated if the value of bits 2:0
|
||||
* of an EPT paging-structure entry is 110b (write/execute).
|
||||
*/
|
||||
kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE);
|
||||
kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
|
||||
VMX_EPT_MISCONFIG_WX_VALUE);
|
||||
}
|
||||
|
||||
#define VMX_XSS_EXIT_BITMAP 0
|
||||
|
@ -6009,7 +6009,7 @@ static void kvm_set_mmio_spte_mask(void)
|
||||
mask &= ~1ull;
|
||||
#endif
|
||||
|
||||
kvm_mmu_set_mmio_spte_mask(mask);
|
||||
kvm_mmu_set_mmio_spte_mask(mask, mask);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
Loading…
Reference in New Issue
Block a user