kvm: x86/mmu: Flush TLB before zap_gfn_range releases RCU
Since "KVM: x86/mmu: Zap only TDP MMU leafs in kvm_zap_gfn_range()" is going to be reverted, it's not going to be true anymore that the zap-page flow does not free any 'struct kvm_mmu_page'. Introduce an early flush before tdp_mmu_zap_leafs() returns, to preserve bisectability. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
		
							parent
							
								
									714797c98e
								
							
						
					
					
						commit
						fcb93eb6d0
					
				@ -941,13 +941,17 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
 | 
			
		||||
		flush = true;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Need to flush before releasing RCU.  TODO: do it only if intermediate
 | 
			
		||||
	 * page tables were zapped; there is no need to flush under RCU protection
 | 
			
		||||
	 * if no 'struct kvm_mmu_page' is freed.
 | 
			
		||||
	 */
 | 
			
		||||
	if (flush)
 | 
			
		||||
		kvm_flush_remote_tlbs_with_address(kvm, start, end - start);
 | 
			
		||||
 | 
			
		||||
	rcu_read_unlock();
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
 | 
			
		||||
	 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
 | 
			
		||||
	 */
 | 
			
		||||
	return flush;
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user