mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 07:01:32 +00:00
89a09141df
The current NFS client congestion logic is severly broken, it marks the backing device congested during each nfs_writepages() call but doesn't mirror this in nfs_writepage() which makes for deadlocks. Also it implements its own waitqueue. Replace this by a more regular congestion implementation that puts a cap on the number of active writeback pages and uses the bdi congestion waitqueue. Also always use an interruptible wait since it makes sense to be able to SIGKILL the process even for mounts without 'intr'. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: Christoph Lameter <clameter@engr.sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
86 lines
2.0 KiB
C
86 lines
2.0 KiB
C
|
|
#include <linux/wait.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/module.h>
|
|
|
|
static wait_queue_head_t congestion_wqh[2] = {
|
|
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
|
|
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
|
|
};
|
|
|
|
|
|
void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
|
|
{
|
|
enum bdi_state bit;
|
|
wait_queue_head_t *wqh = &congestion_wqh[rw];
|
|
|
|
bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
|
|
clear_bit(bit, &bdi->state);
|
|
smp_mb__after_clear_bit();
|
|
if (waitqueue_active(wqh))
|
|
wake_up(wqh);
|
|
}
|
|
EXPORT_SYMBOL(clear_bdi_congested);
|
|
|
|
void set_bdi_congested(struct backing_dev_info *bdi, int rw)
|
|
{
|
|
enum bdi_state bit;
|
|
|
|
bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
|
|
set_bit(bit, &bdi->state);
|
|
}
|
|
EXPORT_SYMBOL(set_bdi_congested);
|
|
|
|
/**
|
|
* congestion_wait - wait for a backing_dev to become uncongested
|
|
* @rw: READ or WRITE
|
|
* @timeout: timeout in jiffies
|
|
*
|
|
* Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
|
|
* write congestion. If no backing_devs are congested then just wait for the
|
|
* next write to be completed.
|
|
*/
|
|
long congestion_wait(int rw, long timeout)
|
|
{
|
|
long ret;
|
|
DEFINE_WAIT(wait);
|
|
wait_queue_head_t *wqh = &congestion_wqh[rw];
|
|
|
|
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
|
|
ret = io_schedule_timeout(timeout);
|
|
finish_wait(wqh, &wait);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(congestion_wait);
|
|
|
|
long congestion_wait_interruptible(int rw, long timeout)
|
|
{
|
|
long ret;
|
|
DEFINE_WAIT(wait);
|
|
wait_queue_head_t *wqh = &congestion_wqh[rw];
|
|
|
|
prepare_to_wait(wqh, &wait, TASK_INTERRUPTIBLE);
|
|
if (signal_pending(current))
|
|
ret = -ERESTARTSYS;
|
|
else
|
|
ret = io_schedule_timeout(timeout);
|
|
finish_wait(wqh, &wait);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(congestion_wait_interruptible);
|
|
|
|
/**
|
|
* congestion_end - wake up sleepers on a congested backing_dev_info
|
|
* @rw: READ or WRITE
|
|
*/
|
|
void congestion_end(int rw)
|
|
{
|
|
wait_queue_head_t *wqh = &congestion_wqh[rw];
|
|
|
|
if (waitqueue_active(wqh))
|
|
wake_up(wqh);
|
|
}
|
|
EXPORT_SYMBOL(congestion_end);
|