writeback: remove the internal 5% low bound on dirty_ratio
The dirty_ratio was silently limited in global_dirty_limits() to >= 5%. This is not a user expected behavior. And it's inconsistent with calc_period_shift(), which uses the plain vm_dirty_ratio value. Let's remove the internal bound. At the same time, fix balance_dirty_pages() to work with the dirty_thresh=0 case. This allows applications to proceed when dirty+writeback pages are all cleaned. And ">" fits with the name "exceeded" better than ">=" does. Neil thinks it is an aesthetic improvement as well as a functional one :) Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Cc: Jan Kara <jack@suse.cz> Proposed-by: Con Kolivas <kernel@kolivas.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Neil Brown <neilb@suse.de> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Michael Rubin <mrubin@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									0e093d9976
								
							
						
					
					
						commit
						4cbec4c8b9
					
				| @ -582,7 +582,7 @@ static inline bool over_bground_thresh(void) | |||||||
| 	global_dirty_limits(&background_thresh, &dirty_thresh); | 	global_dirty_limits(&background_thresh, &dirty_thresh); | ||||||
| 
 | 
 | ||||||
| 	return (global_page_state(NR_FILE_DIRTY) + | 	return (global_page_state(NR_FILE_DIRTY) + | ||||||
| 		global_page_state(NR_UNSTABLE_NFS) >= background_thresh); | 		global_page_state(NR_UNSTABLE_NFS) > background_thresh); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | |||||||
| @ -415,14 +415,8 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | |||||||
| 
 | 
 | ||||||
| 	if (vm_dirty_bytes) | 	if (vm_dirty_bytes) | ||||||
| 		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | 		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | ||||||
| 	else { | 	else | ||||||
| 		int dirty_ratio; | 		dirty = (vm_dirty_ratio * available_memory) / 100; | ||||||
| 
 |  | ||||||
| 		dirty_ratio = vm_dirty_ratio; |  | ||||||
| 		if (dirty_ratio < 5) |  | ||||||
| 			dirty_ratio = 5; |  | ||||||
| 		dirty = (dirty_ratio * available_memory) / 100; |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	if (dirty_background_bytes) | 	if (dirty_background_bytes) | ||||||
| 		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | 		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | ||||||
| @ -510,7 +504,7 @@ static void balance_dirty_pages(struct address_space *mapping, | |||||||
| 		 * catch-up. This avoids (excessively) small writeouts | 		 * catch-up. This avoids (excessively) small writeouts | ||||||
| 		 * when the bdi limits are ramping up. | 		 * when the bdi limits are ramping up. | ||||||
| 		 */ | 		 */ | ||||||
| 		if (nr_reclaimable + nr_writeback < | 		if (nr_reclaimable + nr_writeback <= | ||||||
| 				(background_thresh + dirty_thresh) / 2) | 				(background_thresh + dirty_thresh) / 2) | ||||||
| 			break; | 			break; | ||||||
| 
 | 
 | ||||||
| @ -542,8 +536,8 @@ static void balance_dirty_pages(struct address_space *mapping, | |||||||
| 		 * the last resort safeguard. | 		 * the last resort safeguard. | ||||||
| 		 */ | 		 */ | ||||||
| 		dirty_exceeded = | 		dirty_exceeded = | ||||||
| 			(bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh) | 			(bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) | ||||||
| 			|| (nr_reclaimable + nr_writeback >= dirty_thresh); | 			|| (nr_reclaimable + nr_writeback > dirty_thresh); | ||||||
| 
 | 
 | ||||||
| 		if (!dirty_exceeded) | 		if (!dirty_exceeded) | ||||||
| 			break; | 			break; | ||||||
|  | |||||||
		Loading…
	
		Reference in New Issue
	
	Block a user