forked from Minki/linux
KVM: MMU: fix regression from rework mmu_shrink() code
Latest kvm mmu_shrink code rework makes kernel changes kvm->arch.n_used_mmu_pages/ kvm->arch.n_max_mmu_pages at kvm_mmu_free_page/kvm_mmu_alloc_page, which is called by kvm_mmu_commit_zap_page. So the kvm->arch.n_used_mmu_pages or kvm_mmu_available_pages(vcpu->kvm) is unchanged after kvm_mmu_prepare_zap_page(), This caused kvm_mmu_change_mmu_pages/__kvm_mmu_free_some_pages loops forever. Moving kvm_mmu_commit_zap_page would make the while loop performs as normal. Reported-by: Avi Kivity <avi@redhat.com> Signed-off-by: Xiaotian Feng <dfeng@redhat.com> Tested-by: Avi Kivity <avi@redhat.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Tim Pepper <lnxninja@linux.vnet.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
e4abac67b7
commit
80b63faf02
@ -1720,10 +1720,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
|
||||
|
||||
page = container_of(kvm->arch.active_mmu_pages.prev,
|
||||
struct kvm_mmu_page, link);
|
||||
kvm_mmu_prepare_zap_page(kvm, page,
|
||||
&invalid_list);
|
||||
}
|
||||
kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
|
||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||
}
|
||||
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
|
||||
}
|
||||
|
||||
@ -2972,9 +2971,9 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
|
||||
sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
|
||||
struct kvm_mmu_page, link);
|
||||
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
|
||||
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
|
||||
++vcpu->kvm->stat.mmu_recycled;
|
||||
}
|
||||
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
|
||||
}
|
||||
|
||||
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
|
||||
|
Loading…
Reference in New Issue
Block a user