tlb: mmu_gather: Remove unused start/end arguments from tlb_finish_mmu()
Since commit 7a30df49f6 ("mm: mmu_gather: remove __tlb_reset_range()
for force flush"), the 'start' and 'end' arguments to tlb_finish_mmu()
are no longer used, since we flush the whole mm in case of a nested
invalidation.
Remove the unused arguments and update all callers.
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Yu Zhao <yuzhao@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lkml.kernel.org/r/20210127235347.1402-3-will@kernel.org
			
			
This commit is contained in:
		
							parent
							
								
									912efa17e5
								
							
						
					
					
						commit
						ae8eba8b5d
					
				| @ -36,7 +36,7 @@ | ||||
|  *	    tlb_end_vma(tlb, vma); | ||||
|  *	  } | ||||
|  *	} | ||||
|  *	tlb_finish_mmu(tlb, start, end);	// finish unmap for address space MM
 | ||||
|  *	tlb_finish_mmu(tlb);				// finish unmap for address space MM
 | ||||
|  */ | ||||
| #include <linux/mm.h> | ||||
| #include <linux/pagemap.h> | ||||
|  | ||||
| @ -400,7 +400,7 @@ static void free_ldt_pgtables(struct mm_struct *mm) | ||||
| 
 | ||||
| 	tlb_gather_mmu(&tlb, mm, start, end); | ||||
| 	free_pgd_range(&tlb, start, end, start, end); | ||||
| 	tlb_finish_mmu(&tlb, start, end); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -725,7 +725,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | ||||
| 		free_pgd_range(&tlb, old_start, old_end, new_end, | ||||
| 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); | ||||
| 	} | ||||
| 	tlb_finish_mmu(&tlb, old_start, old_end); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Shrink the vma to just the new range.  Always succeeds. | ||||
|  | ||||
| @ -590,8 +590,7 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) | ||||
| struct mmu_gather; | ||||
| extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, | ||||
| 				unsigned long start, unsigned long end); | ||||
| extern void tlb_finish_mmu(struct mmu_gather *tlb, | ||||
| 				unsigned long start, unsigned long end); | ||||
| extern void tlb_finish_mmu(struct mmu_gather *tlb); | ||||
| 
 | ||||
| static inline void init_tlb_flush_pending(struct mm_struct *mm) | ||||
| { | ||||
|  | ||||
| @ -3985,7 +3985,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | ||||
| 
 | ||||
| 	tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); | ||||
| 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page); | ||||
| 	tlb_finish_mmu(&tlb, tlb_start, tlb_end); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | ||||
| @ -508,7 +508,7 @@ static long madvise_cold(struct vm_area_struct *vma, | ||||
| 	lru_add_drain(); | ||||
| 	tlb_gather_mmu(&tlb, mm, start_addr, end_addr); | ||||
| 	madvise_cold_page_range(&tlb, vma, start_addr, end_addr); | ||||
| 	tlb_finish_mmu(&tlb, start_addr, end_addr); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| @ -560,7 +560,7 @@ static long madvise_pageout(struct vm_area_struct *vma, | ||||
| 	lru_add_drain(); | ||||
| 	tlb_gather_mmu(&tlb, mm, start_addr, end_addr); | ||||
| 	madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); | ||||
| 	tlb_finish_mmu(&tlb, start_addr, end_addr); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| @ -732,7 +732,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma, | ||||
| 			&madvise_free_walk_ops, &tlb); | ||||
| 	tlb_end_vma(&tlb, vma); | ||||
| 	mmu_notifier_invalidate_range_end(&range); | ||||
| 	tlb_finish_mmu(&tlb, range.start, range.end); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
|  | ||||
| @ -1540,7 +1540,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, | ||||
| 	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next) | ||||
| 		unmap_single_vma(&tlb, vma, start, range.end, NULL); | ||||
| 	mmu_notifier_invalidate_range_end(&range); | ||||
| 	tlb_finish_mmu(&tlb, start, range.end); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
| @ -1566,7 +1566,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr | ||||
| 	mmu_notifier_invalidate_range_start(&range); | ||||
| 	unmap_single_vma(&tlb, vma, address, range.end, details); | ||||
| 	mmu_notifier_invalidate_range_end(&range); | ||||
| 	tlb_finish_mmu(&tlb, address, range.end); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  | ||||
| @ -2676,7 +2676,7 @@ static void unmap_region(struct mm_struct *mm, | ||||
| 	unmap_vmas(&tlb, vma, start, end); | ||||
| 	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, | ||||
| 				 next ? next->vm_start : USER_PGTABLES_CEILING); | ||||
| 	tlb_finish_mmu(&tlb, start, end); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
| @ -3219,7 +3219,7 @@ void exit_mmap(struct mm_struct *mm) | ||||
| 	/* Use -1 here to ensure all VMAs in the mm are unmapped */ | ||||
| 	unmap_vmas(&tlb, vma, 0, -1); | ||||
| 	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); | ||||
| 	tlb_finish_mmu(&tlb, 0, -1); | ||||
| 	tlb_finish_mmu(&tlb); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Walk the list again, actually closing and freeing it, | ||||
|  | ||||
| @ -290,14 +290,11 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, | ||||
| /**
 | ||||
|  * tlb_finish_mmu - finish an mmu_gather structure | ||||
|  * @tlb: the mmu_gather structure to finish | ||||
|  * @start: start of the region that will be removed from the page-table | ||||
|  * @end: end of the region that will be removed from the page-table | ||||
|  * | ||||
|  * Called at the end of the shootdown operation to free up any resources that | ||||
|  * were required. | ||||
|  */ | ||||
| void tlb_finish_mmu(struct mmu_gather *tlb, | ||||
| 		unsigned long start, unsigned long end) | ||||
| void tlb_finish_mmu(struct mmu_gather *tlb) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * If there are parallel threads are doing PTE changes on same range | ||||
|  | ||||
| @ -548,13 +548,13 @@ bool __oom_reap_task_mm(struct mm_struct *mm) | ||||
| 						vma->vm_end); | ||||
| 			tlb_gather_mmu(&tlb, mm, range.start, range.end); | ||||
| 			if (mmu_notifier_invalidate_range_start_nonblock(&range)) { | ||||
| 				tlb_finish_mmu(&tlb, range.start, range.end); | ||||
| 				tlb_finish_mmu(&tlb); | ||||
| 				ret = false; | ||||
| 				continue; | ||||
| 			} | ||||
| 			unmap_page_range(&tlb, vma, range.start, range.end, NULL); | ||||
| 			mmu_notifier_invalidate_range_end(&range); | ||||
| 			tlb_finish_mmu(&tlb, range.start, range.end); | ||||
| 			tlb_finish_mmu(&tlb); | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user