KVM: x86/mmu: Merge TDP MMU put and free root
kvm_tdp_mmu_put_root and kvm_tdp_mmu_free_root are always called together, so merge the functions to simplify TDP MMU root refcounting / freeing. Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20210401233736.638171-5-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
4bba36d72b
commit
2bdb3d84ce
@ -3120,8 +3120,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
|
||||
|
||||
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
|
||||
|
||||
if (is_tdp_mmu_page(sp) && kvm_tdp_mmu_put_root(kvm, sp))
|
||||
kvm_tdp_mmu_free_root(kvm, sp);
|
||||
if (is_tdp_mmu_page(sp))
|
||||
kvm_tdp_mmu_put_root(kvm, sp);
|
||||
else if (!--sp->root_count && sp->role.invalid)
|
||||
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
|
||||
|
||||
|
@ -41,10 +41,31 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
|
||||
rcu_barrier();
|
||||
}
|
||||
|
||||
static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
|
||||
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
gfn_t start, gfn_t end, bool can_yield, bool flush);
|
||||
|
||||
static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
|
||||
{
|
||||
if (kvm_tdp_mmu_put_root(kvm, root))
|
||||
kvm_tdp_mmu_free_root(kvm, root);
|
||||
free_page((unsigned long)sp->spt);
|
||||
kmem_cache_free(mmu_page_header_cache, sp);
|
||||
}
|
||||
|
||||
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
|
||||
{
|
||||
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
||||
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
|
||||
if (--root->root_count)
|
||||
return;
|
||||
|
||||
WARN_ON(!root->tdp_mmu_page);
|
||||
|
||||
list_del(&root->link);
|
||||
|
||||
zap_gfn_range(kvm, root, 0, max_gfn, false, false);
|
||||
|
||||
tdp_mmu_free_sp(root);
|
||||
}
|
||||
|
||||
static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
|
||||
@ -66,7 +87,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
|
||||
struct kvm_mmu_page *next_root;
|
||||
|
||||
next_root = list_next_entry(root, link);
|
||||
tdp_mmu_put_root(kvm, root);
|
||||
kvm_tdp_mmu_put_root(kvm, root);
|
||||
return next_root;
|
||||
}
|
||||
|
||||
@ -89,31 +110,6 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
|
||||
if (kvm_mmu_page_as_id(_root) != _as_id) { \
|
||||
} else
|
||||
|
||||
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
gfn_t start, gfn_t end, bool can_yield, bool flush);
|
||||
|
||||
static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
|
||||
{
|
||||
free_page((unsigned long)sp->spt);
|
||||
kmem_cache_free(mmu_page_header_cache, sp);
|
||||
}
|
||||
|
||||
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
|
||||
{
|
||||
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
||||
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
|
||||
WARN_ON(root->root_count);
|
||||
WARN_ON(!root->tdp_mmu_page);
|
||||
|
||||
list_del(&root->link);
|
||||
|
||||
zap_gfn_range(kvm, root, 0, max_gfn, false, false);
|
||||
|
||||
tdp_mmu_free_sp(root);
|
||||
}
|
||||
|
||||
static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
|
||||
int level)
|
||||
{
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
||||
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
|
||||
|
||||
static inline void kvm_tdp_mmu_get_root(struct kvm *kvm,
|
||||
struct kvm_mmu_page *root)
|
||||
@ -17,14 +16,7 @@ static inline void kvm_tdp_mmu_get_root(struct kvm *kvm,
|
||||
++root->root_count;
|
||||
}
|
||||
|
||||
static inline bool kvm_tdp_mmu_put_root(struct kvm *kvm,
|
||||
struct kvm_mmu_page *root)
|
||||
{
|
||||
lockdep_assert_held(&kvm->mmu_lock);
|
||||
--root->root_count;
|
||||
|
||||
return !root->root_count;
|
||||
}
|
||||
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
|
||||
|
||||
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
|
||||
gfn_t end, bool can_yield, bool flush);
|
||||
|
Loading…
Reference in New Issue
Block a user