[PATCH] mm: free_pages_and_swap_cache opt
Minor optimization (though it doesn't help in the PREEMPT case, severely constrained by small ZAP_BLOCK_SIZE). free_pages_and_swap_cache works in chunks of 16, calling release_pages which works in chunks of PAGEVEC_SIZE. But PAGEVEC_SIZE was dropped from 16 to 14 in 2.6.10, so we're now doing more spin_lock_irq'ing than necessary: use PAGEVEC_SIZE throughout. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
		
							parent
							
								
									161599ff39
								
							
						
					
					
						commit
						c484d41042
					
				| @ -14,6 +14,7 @@ | ||||
| #include <linux/pagemap.h> | ||||
| #include <linux/buffer_head.h> | ||||
| #include <linux/backing-dev.h> | ||||
| #include <linux/pagevec.h> | ||||
| 
 | ||||
| #include <asm/pgtable.h> | ||||
| 
 | ||||
| @ -272,12 +273,11 @@ void free_page_and_swap_cache(struct page *page) | ||||
|  */ | ||||
| void free_pages_and_swap_cache(struct page **pages, int nr) | ||||
| { | ||||
| 	int chunk = 16; | ||||
| 	struct page **pagep = pages; | ||||
| 
 | ||||
| 	lru_add_drain(); | ||||
| 	while (nr) { | ||||
| 		int todo = min(chunk, nr); | ||||
| 		int todo = min(nr, PAGEVEC_SIZE); | ||||
| 		int i; | ||||
| 
 | ||||
| 		for (i = 0; i < todo; i++) | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user