KVM: MMU: avoid double write protected in sync page path
The sync page is already write protected in mmu_sync_children(), don't write protected it again Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
cb83cad2e7
commit
f918b44352
@ -1216,6 +1216,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
|||||||
if ((sp)->gfn != (gfn) || (sp)->role.direct || \
|
if ((sp)->gfn != (gfn) || (sp)->role.direct || \
|
||||||
(sp)->role.invalid) {} else
|
(sp)->role.invalid) {} else
|
||||||
|
|
||||||
|
/* @sp->gfn should be write-protected at the call site */
|
||||||
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||||
struct list_head *invalid_list, bool clear_unsync)
|
struct list_head *invalid_list, bool clear_unsync)
|
||||||
{
|
{
|
||||||
@ -1224,11 +1225,8 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (clear_unsync) {
|
if (clear_unsync)
|
||||||
if (rmap_write_protect(vcpu->kvm, sp->gfn))
|
|
||||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
|
||||||
kvm_unlink_unsync_page(vcpu->kvm, sp);
|
kvm_unlink_unsync_page(vcpu->kvm, sp);
|
||||||
}
|
|
||||||
|
|
||||||
if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
|
if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
|
||||||
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
|
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
|
||||||
|
Loading…
Reference in New Issue
Block a user