KVM: x86/mmu: Allow zapping collapsible SPTEs to use MMU read lock

To reduce the impact of disabling dirty logging, change the TDP MMU
function which zaps collapsible SPTEs to run under the MMU read lock.
This way, page faults on zapped SPTEs can proceed in parallel with
kvm_mmu_zap_collapsible_sptes.

Signed-off-by: Ben Gardon <bgardon@google.com>
Message-Id: <20210401233736.638171-11-bgardon@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Ben Gardon 2021-04-01 16:37:33 -07:00 committed by Paolo Bonzini
parent 6103bc0740
commit 2db6f772b5
2 changed files with 23 additions and 8 deletions

View File

@ -5601,13 +5601,19 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
if (is_tdp_mmu_enabled(kvm))
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
write_unlock(&kvm->mmu_lock);
if (is_tdp_mmu_enabled(kvm)) {
flush = false;
read_lock(&kvm->mmu_lock);
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
read_unlock(&kvm->mmu_lock);
}
}
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,

View File

@ -1257,7 +1257,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
rcu_read_lock();
tdp_root_for_each_pte(iter, root, start, end) {
if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
flush = false;
continue;
}
@ -1272,8 +1273,14 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
pfn, PG_LEVEL_NUM))
continue;
tdp_mmu_set_spte(kvm, &iter, 0);
if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
/*
* The iter must explicitly re-read the SPTE because
* the atomic cmpxchg failed.
*/
iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
goto retry;
}
flush = true;
}
@ -1292,7 +1299,9 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
{
struct kvm_mmu_page *root;
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, false)
lockdep_assert_held_read(&kvm->mmu_lock);
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
flush = zap_collapsible_spte_range(kvm, root, slot, flush);
return flush;