KVM: Introduce kvm_unmap_hva_range() for kvm_mmu_notifier_invalidate_range_start()
When we tested KVM under memory pressure, with THP enabled on the host, we noticed that MMU notifier took a long time to invalidate huge pages. Since the invalidation was done with mmu_lock held, it not only wasted the CPU but also made the host harder to respond. This patch mitigates this by using kvm_handle_hva_range(). Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Cc: Alexander Graf <agraf@suse.de> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
committed by
Marcelo Tosatti
parent
84504ef386
commit
b3ae209697
@@ -52,6 +52,8 @@
|
|||||||
|
|
||||||
struct kvm;
|
struct kvm;
|
||||||
extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
||||||
|
extern int kvm_unmap_hva_range(struct kvm *kvm,
|
||||||
|
unsigned long start, unsigned long end);
|
||||||
extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
|
extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
|
|||||||
@@ -870,6 +870,13 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
if (kvm->arch.using_mmu_notifiers)
|
||||||
|
kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||||
unsigned long gfn)
|
unsigned long gfn)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -944,6 +944,7 @@ extern bool kvm_rebooting;
|
|||||||
|
|
||||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
||||||
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||||
int kvm_age_hva(struct kvm *kvm, unsigned long hva);
|
int kvm_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||||
|
|||||||
@@ -1324,6 +1324,11 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
|||||||
return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
|
return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
||||||
|
}
|
||||||
|
|
||||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||||
{
|
{
|
||||||
kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
|
kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
|
||||||
|
|||||||
@@ -332,8 +332,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
* count is also read inside the mmu_lock critical section.
|
* count is also read inside the mmu_lock critical section.
|
||||||
*/
|
*/
|
||||||
kvm->mmu_notifier_count++;
|
kvm->mmu_notifier_count++;
|
||||||
for (; start < end; start += PAGE_SIZE)
|
need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
|
||||||
need_tlb_flush |= kvm_unmap_hva(kvm, start);
|
|
||||||
need_tlb_flush |= kvm->tlbs_dirty;
|
need_tlb_flush |= kvm->tlbs_dirty;
|
||||||
/* we've to flush the tlb before the pages can be freed */
|
/* we've to flush the tlb before the pages can be freed */
|
||||||
if (need_tlb_flush)
|
if (need_tlb_flush)
|
||||||
|
|||||||
Reference in New Issue
Block a user