mirror of
https://github.com/torvalds/linux.git
synced 2024-12-18 17:12:55 +00:00
KVM: X86: MMU: no mmu_notifier_seq++ in kvm_age_hva
The MMU notifier sequence number keeps GPA->HPA mappings in sync when GPA->HPA lookups are done outside of the MMU lock (e.g., in tdp_page_fault). Since kvm_age_hva doesn't change GPA->HPA, it's unnecessary to increment the sequence number. Signed-off-by: Peter Feiner <pfeiner@google.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
This commit is contained in:
parent
c63e45635b
commit
66d73e12f2
@ -1660,17 +1660,9 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
* This has some overhead, but not as much as the cost of swapping
|
||||
* out actively used pages or breaking up actively used hugepages.
|
||||
*/
|
||||
if (!shadow_accessed_mask) {
|
||||
/*
|
||||
* We are holding the kvm->mmu_lock, and we are blowing up
|
||||
* shadow PTEs. MMU notifier consumers need to be kept at bay.
|
||||
* This is correct as long as we don't decouple the mmu_lock
|
||||
* protected regions (like invalidate_range_start|end does).
|
||||
*/
|
||||
kvm->mmu_notifier_seq++;
|
||||
if (!shadow_accessed_mask)
|
||||
return kvm_handle_hva_range(kvm, start, end, 0,
|
||||
kvm_unmap_rmapp);
|
||||
}
|
||||
|
||||
return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user