forked from Minki/linux
Revert "KVM: MMU: collapse TLB flushes when zap all pages"
Unwinding optimizations related to obsolete pages is a step towards
removing x86 KVM's fast invalidate mechanism, i.e. this is one part of
a revert all patches from the series that introduced the mechanism[1].
This reverts commit f34d251d66
.
[1] https://lkml.kernel.org/r/1369960590-14138-1-git-send-email-xiaoguangrong@linux.vnet.ibm.com
Cc: Xiao Guangrong <guangrong.xiao@gmail.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
52d5dedc79
commit
210f494261
@ -2211,14 +2211,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
|
||||
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
||||
struct list_head *invalid_list);
|
||||
|
||||
/*
|
||||
* NOTE: we should pay more attention on the zapped-obsolete page
|
||||
* (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
|
||||
* since it has been deleted from active_mmu_pages but still can be found
|
||||
* at hast list.
|
||||
*
|
||||
* for_each_valid_sp() has skipped that kind of pages.
|
||||
*/
|
||||
#define for_each_valid_sp(_kvm, _sp, _gfn) \
|
||||
hlist_for_each_entry(_sp, \
|
||||
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
|
||||
@ -5881,13 +5873,11 @@ restart:
|
||||
if (sp->role.invalid)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Need not flush tlb since we only zap the sp with invalid
|
||||
* generation number.
|
||||
*/
|
||||
if (batch >= BATCH_ZAP_PAGES &&
|
||||
cond_resched_lock(&kvm->mmu_lock)) {
|
||||
(need_resched() || spin_needbreak(&kvm->mmu_lock))) {
|
||||
batch = 0;
|
||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
@ -5898,10 +5888,6 @@ restart:
|
||||
goto restart;
|
||||
}
|
||||
|
||||
/*
|
||||
* Should flush tlb before free page tables since lockless-walking
|
||||
* may use the pages.
|
||||
*/
|
||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||
}
|
||||
|
||||
@ -5920,17 +5906,6 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
|
||||
trace_kvm_mmu_invalidate_zap_all_pages(kvm);
|
||||
kvm->arch.mmu_valid_gen++;
|
||||
|
||||
/*
|
||||
* Notify all vcpus to reload its shadow page table
|
||||
* and flush TLB. Then all vcpus will switch to new
|
||||
* shadow page table with the new mmu_valid_gen.
|
||||
*
|
||||
* Note: we should do this under the protection of
|
||||
* mmu-lock, otherwise, vcpu would purge shadow page
|
||||
* but miss tlb flush.
|
||||
*/
|
||||
kvm_reload_remote_mmus(kvm);
|
||||
|
||||
kvm_zap_obsolete_pages(kvm);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user