KVM: x86/mmu: Add accessors to query mmu_role bits
Add accessors via a builder macro for all mmu_role bits that track a CR0, CR4, or EFER bit, abstracting whether the bits are in the base or the extended role. Future commits will switch to using mmu_role instead of vCPU state to configure the MMU, i.e. there are about to be a large number of users. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210622175739.3610207-26-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
167f8a5cae
commit
6066772455
@ -206,6 +206,27 @@ BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
|
||||
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
|
||||
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
|
||||
|
||||
/*
|
||||
* The MMU itself (with a valid role) is the single source of truth for the
|
||||
* MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
|
||||
* regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
|
||||
* and the vCPU may be incorrect/irrelevant.
|
||||
*/
|
||||
#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \
|
||||
static inline bool is_##reg##_##name(struct kvm_mmu *mmu) \
|
||||
{ \
|
||||
return !!(mmu->mmu_role. base_or_ext . reg##_##name); \
|
||||
}
|
||||
BUILD_MMU_ROLE_ACCESSOR(ext, cr0, pg);
|
||||
BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
|
||||
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
|
||||
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pae);
|
||||
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
|
||||
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
|
||||
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
|
||||
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
|
||||
BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
|
||||
|
||||
static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_mmu_role_regs regs = {
|
||||
|
@ -471,7 +471,7 @@ retry_walk:
|
||||
|
||||
error:
|
||||
errcode |= write_fault | user_fault;
|
||||
if (fetch_fault && (mmu->nx || mmu->mmu_role.ext.cr4_smep))
|
||||
if (fetch_fault && (mmu->nx || is_cr4_smep(mmu)))
|
||||
errcode |= PFERR_FETCH_MASK;
|
||||
|
||||
walker->fault.vector = PF_VECTOR;
|
||||
|
Loading…
Reference in New Issue
Block a user