mirror of
https://github.com/torvalds/linux.git
synced 2024-12-14 07:02:23 +00:00
KVM: x86/mmu: Support shadowing NPT when 5-level paging is enabled in host
When the 5-level page table CPU flag is set in the host, but the guest has CR4.LA57=0 (including the case of a 32-bit guest), the top level of the shadow NPT page tables will be fixed, consisting of one pointer to a lower-level table and 511 non-present entries. Extend the existing code that creates the fixed PML4 or PDP table, to provide a fixed PML5 table if needed. This is not needed on EPT because the number of layers in the tables is specified in the EPTP instead of depending on the host CR4. Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Wei Huang <wei.huang2@amd.com> Message-Id: <20210818165549.3771014-3-wei.huang2@amd.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
746700d21f
commit
cb0f722aff
@ -441,6 +441,7 @@ struct kvm_mmu {
|
||||
|
||||
u64 *pae_root;
|
||||
u64 *pml4_root;
|
||||
u64 *pml5_root;
|
||||
|
||||
/*
|
||||
* check zero bits on shadow page table entries, these
|
||||
|
@ -3536,15 +3536,22 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||
* the shadow page table may be a PAE or a long mode page table.
|
||||
*/
|
||||
pm_mask = PT_PRESENT_MASK | shadow_me_mask;
|
||||
if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
|
||||
if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
|
||||
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
|
||||
|
||||
if (WARN_ON_ONCE(!mmu->pml4_root)) {
|
||||
r = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
|
||||
|
||||
if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
|
||||
if (WARN_ON_ONCE(!mmu->pml5_root)) {
|
||||
r = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; ++i) {
|
||||
@ -3563,7 +3570,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||
mmu->pae_root[i] = root | pm_mask;
|
||||
}
|
||||
|
||||
if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
|
||||
if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
|
||||
mmu->root_hpa = __pa(mmu->pml5_root);
|
||||
else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
|
||||
mmu->root_hpa = __pa(mmu->pml4_root);
|
||||
else
|
||||
mmu->root_hpa = __pa(mmu->pae_root);
|
||||
@ -3579,7 +3588,9 @@ out_unlock:
|
||||
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
||||
u64 *pml4_root, *pae_root;
|
||||
u64 *pml5_root = NULL;
|
||||
u64 *pml4_root = NULL;
|
||||
u64 *pae_root;
|
||||
|
||||
/*
|
||||
* When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
|
||||
@ -3591,21 +3602,15 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
|
||||
mmu->shadow_root_level < PT64_ROOT_4LEVEL)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* This mess only works with 4-level paging and needs to be updated to
|
||||
* work with 5-level paging.
|
||||
*/
|
||||
if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
|
||||
return -EIO;
|
||||
|
||||
if (mmu->pae_root && mmu->pml4_root)
|
||||
if (mmu->pae_root && mmu->pml4_root && mmu->pml5_root)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The special roots should always be allocated in concert. Yell and
|
||||
* bail if KVM ends up in a state where only one of the roots is valid.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root))
|
||||
if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
|
||||
mmu->pml5_root))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
@ -3616,16 +3621,31 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
|
||||
if (!pae_root)
|
||||
return -ENOMEM;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!pml4_root) {
|
||||
free_page((unsigned long)pae_root);
|
||||
return -ENOMEM;
|
||||
if (!pml4_root)
|
||||
goto err_pml4;
|
||||
|
||||
if (mmu->shadow_root_level > PT64_ROOT_4LEVEL) {
|
||||
pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!pml5_root)
|
||||
goto err_pml5;
|
||||
}
|
||||
#endif
|
||||
|
||||
mmu->pae_root = pae_root;
|
||||
mmu->pml4_root = pml4_root;
|
||||
mmu->pml5_root = pml5_root;
|
||||
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
err_pml5:
|
||||
free_page((unsigned long)pml4_root);
|
||||
err_pml4:
|
||||
free_page((unsigned long)pae_root);
|
||||
return -ENOMEM;
|
||||
#endif
|
||||
}
|
||||
|
||||
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
|
||||
@ -5461,6 +5481,7 @@ static void free_mmu_pages(struct kvm_mmu *mmu)
|
||||
set_memory_encrypted((unsigned long)mmu->pae_root, 1);
|
||||
free_page((unsigned long)mmu->pae_root);
|
||||
free_page((unsigned long)mmu->pml4_root);
|
||||
free_page((unsigned long)mmu->pml5_root);
|
||||
}
|
||||
|
||||
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
|
||||
|
Loading…
Reference in New Issue
Block a user