KVM: x86/mmu: Do not filter address spaces in for_each_tdp_mmu_root_yield_safe()

All callers except the MMU notifier want to process all address spaces.
Remove the address space ID argument of for_each_tdp_mmu_root_yield_safe()
and switch the MMU notifier to use __for_each_tdp_mmu_root_yield_safe().

Extracted out of a patch by Sean Christopherson <seanjc@google.com>

Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2023-09-21 05:44:56 -04:00
parent 50107e8b2a
commit 441a5dfcd9
3 changed files with 14 additions and 19 deletions

View File

@ -6246,7 +6246,6 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{ {
bool flush; bool flush;
int i;
if (WARN_ON_ONCE(gfn_end <= gfn_start)) if (WARN_ON_ONCE(gfn_end <= gfn_start))
return; return;
@ -6257,11 +6256,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end); flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
if (tdp_mmu_enabled) { if (tdp_mmu_enabled)
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
gfn_end, flush);
}
if (flush) if (flush)
kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start); kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);

View File

@ -211,8 +211,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \ #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true) __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \ #define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false) for (_root = tdp_mmu_next_root(_kvm, NULL, false, false); \
_root; \
_root = tdp_mmu_next_root(_kvm, _root, false, false)) \
if (!kvm_lockdep_assert_mmu_lock_held(_kvm, false)) { \
} else
/* /*
* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
@ -877,12 +881,11 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
* true if a TLB flush is needed before releasing the MMU lock, i.e. if one or * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
* more SPTEs were zapped since the MMU lock was last acquired. * more SPTEs were zapped since the MMU lock was last acquired.
*/ */
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
bool flush)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) for_each_tdp_mmu_root_yield_safe(kvm, root)
flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush); flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
return flush; return flush;
@ -891,7 +894,6 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
void kvm_tdp_mmu_zap_all(struct kvm *kvm) void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
int i;
/* /*
* Zap all roots, including invalid roots, as all SPTEs must be dropped * Zap all roots, including invalid roots, as all SPTEs must be dropped
@ -905,10 +907,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
* is being destroyed or the userspace VMM has exited. In both cases, * is being destroyed or the userspace VMM has exited. In both cases,
* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request. * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
*/ */
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { for_each_tdp_mmu_root_yield_safe(kvm, root)
for_each_tdp_mmu_root_yield_safe(kvm, root, i) tdp_mmu_zap_root(kvm, root, false);
tdp_mmu_zap_root(kvm, root, false);
}
} }
/* /*
@ -1148,7 +1148,7 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id) __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end, flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
range->may_block, flush); range->may_block, flush);

View File

@ -20,8 +20,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared); bool shared);
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
bool flush);
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);