KVM: nVMX: Rename EPTP validity helper and associated variables
Rename valid_ept_address() to nested_vmx_check_eptp() to follow the nVMX nomenclature and to reflect that the function now checks a lot more than just the address contained in the EPTP. Rename address to new_eptp in associated code. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ac69dfaace
commit
ac6389ab2c
@ -2563,13 +2563,13 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
|
||||
static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
int maxphyaddr = cpuid_maxphyaddr(vcpu);
|
||||
|
||||
/* Check for memory type validity */
|
||||
switch (address & VMX_EPTP_MT_MASK) {
|
||||
switch (new_eptp & VMX_EPTP_MT_MASK) {
|
||||
case VMX_EPTP_MT_UC:
|
||||
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
|
||||
return false;
|
||||
@ -2583,7 +2583,7 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
|
||||
}
|
||||
|
||||
/* Page-walk levels validity. */
|
||||
switch (address & VMX_EPTP_PWL_MASK) {
|
||||
switch (new_eptp & VMX_EPTP_PWL_MASK) {
|
||||
case VMX_EPTP_PWL_5:
|
||||
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
|
||||
return false;
|
||||
@ -2597,11 +2597,11 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
|
||||
}
|
||||
|
||||
/* Reserved bits should not be set */
|
||||
if (CC(address >> maxphyaddr || ((address >> 7) & 0x1f)))
|
||||
if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f)))
|
||||
return false;
|
||||
|
||||
/* AD, if set, should be supported */
|
||||
if (address & VMX_EPTP_AD_ENABLE_BIT) {
|
||||
if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
|
||||
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
|
||||
return false;
|
||||
}
|
||||
@ -2650,7 +2650,7 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
|
||||
return -EINVAL;
|
||||
|
||||
if (nested_cpu_has_ept(vmcs12) &&
|
||||
CC(!valid_ept_address(vcpu, vmcs12->ept_pointer)))
|
||||
CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
|
||||
return -EINVAL;
|
||||
|
||||
if (nested_cpu_has_vmfunc(vmcs12)) {
|
||||
@ -5234,7 +5234,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
u32 index = kvm_rcx_read(vcpu);
|
||||
u64 address;
|
||||
u64 new_eptp;
|
||||
bool accessed_dirty;
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
|
||||
@ -5247,23 +5247,23 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
|
||||
|
||||
|
||||
if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
|
||||
&address, index * 8, 8))
|
||||
&new_eptp, index * 8, 8))
|
||||
return 1;
|
||||
|
||||
accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
|
||||
accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
|
||||
|
||||
/*
|
||||
* If the (L2) guest does a vmfunc to the currently
|
||||
* active ept pointer, we don't have to do anything else
|
||||
*/
|
||||
if (vmcs12->ept_pointer != address) {
|
||||
if (!valid_ept_address(vcpu, address))
|
||||
if (vmcs12->ept_pointer != new_eptp) {
|
||||
if (!nested_vmx_check_eptp(vcpu, new_eptp))
|
||||
return 1;
|
||||
|
||||
kvm_mmu_unload(vcpu);
|
||||
mmu->ept_ad = accessed_dirty;
|
||||
mmu->mmu_role.base.ad_disabled = !accessed_dirty;
|
||||
vmcs12->ept_pointer = address;
|
||||
vmcs12->ept_pointer = new_eptp;
|
||||
/*
|
||||
* TODO: Check what's the correct approach in case
|
||||
* mmu reload fails. Currently, we just let the next
|
||||
|
Loading…
Reference in New Issue
Block a user