mirror of
https://github.com/torvalds/linux.git
synced 2024-12-19 09:32:32 +00:00
ce88decffd
The idea is from Avi: | We could cache the result of a miss in an spte by using a reserved bit, and | checking the page fault error code (or seeing if we get an ept violation or | ept misconfiguration), so if we get repeated mmio on a page, we don't need to | search the slot list/tree. | (https://lkml.org/lkml/2011/2/22/221) When the page fault is caused by mmio, we cache the info in the shadow page table, and also set the reserved bits in the shadow page table, so if the mmio is caused again, we can quickly identify it and emulate it directly Searching mmio gfn in memslots is heavy since we need to walk all memeslots, it can be reduced by this feature, and also avoid walking guest page table for soft mmu. [jan: fix operator precedence issue] Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
105 lines
2.7 KiB
C
105 lines
2.7 KiB
C
#ifndef __KVM_X86_MMU_H
|
|
#define __KVM_X86_MMU_H
|
|
|
|
#include <linux/kvm_host.h>
|
|
#include "kvm_cache_regs.h"
|
|
|
|
#define PT64_PT_BITS 9
|
|
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
|
|
#define PT32_PT_BITS 10
|
|
#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
|
|
|
|
#define PT_WRITABLE_SHIFT 1
|
|
|
|
#define PT_PRESENT_MASK (1ULL << 0)
|
|
#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
|
|
#define PT_USER_MASK (1ULL << 2)
|
|
#define PT_PWT_MASK (1ULL << 3)
|
|
#define PT_PCD_MASK (1ULL << 4)
|
|
#define PT_ACCESSED_SHIFT 5
|
|
#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
|
|
#define PT_DIRTY_MASK (1ULL << 6)
|
|
#define PT_PAGE_SIZE_MASK (1ULL << 7)
|
|
#define PT_PAT_MASK (1ULL << 7)
|
|
#define PT_GLOBAL_MASK (1ULL << 8)
|
|
#define PT64_NX_SHIFT 63
|
|
#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
|
|
|
|
#define PT_PAT_SHIFT 7
|
|
#define PT_DIR_PAT_SHIFT 12
|
|
#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
|
|
|
|
#define PT32_DIR_PSE36_SIZE 4
|
|
#define PT32_DIR_PSE36_SHIFT 13
|
|
#define PT32_DIR_PSE36_MASK \
|
|
(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
|
|
|
|
#define PT64_ROOT_LEVEL 4
|
|
#define PT32_ROOT_LEVEL 2
|
|
#define PT32E_ROOT_LEVEL 3
|
|
|
|
#define PT_PDPE_LEVEL 3
|
|
#define PT_DIRECTORY_LEVEL 2
|
|
#define PT_PAGE_TABLE_LEVEL 1
|
|
|
|
#define PFERR_PRESENT_MASK (1U << 0)
|
|
#define PFERR_WRITE_MASK (1U << 1)
|
|
#define PFERR_USER_MASK (1U << 2)
|
|
#define PFERR_RSVD_MASK (1U << 3)
|
|
#define PFERR_FETCH_MASK (1U << 4)
|
|
|
|
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
|
|
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
|
|
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
|
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
|
|
|
|
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
|
{
|
|
return kvm->arch.n_max_mmu_pages -
|
|
kvm->arch.n_used_mmu_pages;
|
|
}
|
|
|
|
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (unlikely(kvm_mmu_available_pages(vcpu->kvm)< KVM_MIN_FREE_MMU_PAGES))
|
|
__kvm_mmu_free_some_pages(vcpu);
|
|
}
|
|
|
|
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
|
|
return 0;
|
|
|
|
return kvm_mmu_load(vcpu);
|
|
}
|
|
|
|
static inline int is_present_gpte(unsigned long pte)
|
|
{
|
|
return pte & PT_PRESENT_MASK;
|
|
}
|
|
|
|
static inline int is_writable_pte(unsigned long pte)
|
|
{
|
|
return pte & PT_WRITABLE_MASK;
|
|
}
|
|
|
|
static inline bool is_write_protection(struct kvm_vcpu *vcpu)
|
|
{
|
|
return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
|
|
}
|
|
|
|
static inline bool check_write_user_access(struct kvm_vcpu *vcpu,
|
|
bool write_fault, bool user_fault,
|
|
unsigned long pte)
|
|
{
|
|
if (unlikely(write_fault && !is_writable_pte(pte)
|
|
&& (user_fault || is_write_protection(vcpu))))
|
|
return false;
|
|
|
|
if (unlikely(user_fault && !(pte & PT_USER_MASK)))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
#endif
|