[PATCH] throttle_vm_writeout(): don't loop on GFP_NOFS and GFP_NOIO allocations

throttle_vm_writeout() is designed to wait for the dirty levels to subside.
But if the caller holds IO or FS locks, we might be holding up that writeout.

So change it to take a single nap to give other devices a chance to clean some
memory, then return.

Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Kumar Gala <galak@kernel.crashing.org>
Cc: Pete Zaitcev <zaitcev@redhat.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrew Morton 2007-02-28 20:13:21 -08:00 committed by Linus Torvalds
parent b1a316f6f9
commit 232ea4d69d
3 changed files with 13 additions and 4 deletions

View File

@ -84,7 +84,7 @@ static inline void wait_on_inode(struct inode *inode)
int wakeup_pdflush(long nr_pages);
void laptop_io_completion(void);
void laptop_sync_completion(void);
void throttle_vm_writeout(void);
void throttle_vm_writeout(gfp_t gfp_mask);
/* These are exported to sysctl. */
extern int dirty_background_ratio;

View File

@ -296,11 +296,21 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
}
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
void throttle_vm_writeout(void)
void throttle_vm_writeout(gfp_t gfp_mask)
{
long background_thresh;
long dirty_thresh;
if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) {
/*
* The caller might hold locks which can prevent IO completion
* or progress in the filesystem. So we cannot just sit here
* waiting for IO to complete.
*/
congestion_wait(WRITE, HZ/10);
return;
}
for ( ; ; ) {
get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
@ -317,7 +327,6 @@ void throttle_vm_writeout(void)
}
}
/*
* writeback at least _min_pages, and keep writing until the amount of dirty
* memory is less than the background threshold, or until we're all clean.

View File

@ -952,7 +952,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
}
}
throttle_vm_writeout();
throttle_vm_writeout(sc->gfp_mask);
atomic_dec(&zone->reclaim_in_progress);
return nr_reclaimed;