writeback: Separate inode requeueing after writeback

Move inode requeueing after inode has been written out into a separate
function.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
This commit is contained in:
Jan Kara 2012-05-03 14:47:58 +02:00 committed by Fengguang Wu
parent 6290be1c1d
commit ccb26b5a65

View File

@ -344,6 +344,60 @@ static void inode_wait_for_writeback(struct inode *inode,
}
}
/*
* Find proper writeback list for the inode depending on its current state and
* possibly also change of its state while we were doing writeback. Here we
* handle things such as livelock prevention or fairness of writeback among
* inodes. This function can be called only by flusher thread - noone else
* processes all inodes in writeback lists and requeueing inodes behind flusher
* thread's back can have unexpected consequences.
*/
static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
struct writeback_control *wbc)
{
if (inode->i_state & I_FREEING)
return;
/*
* Sync livelock prevention. Each inode is tagged and synced in one
* shot. If still dirty, it will be redirty_tail()'ed below. Update
* the dirty time to prevent enqueue and sync it again.
*/
if ((inode->i_state & I_DIRTY) &&
(wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
inode->dirtied_when = jiffies;
if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
/*
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything.
*/
if (wbc->nr_to_write <= 0) {
/* Slice used up. Queue for next turn. */
requeue_io(inode, wb);
} else {
/*
* Writeback blocked by something other than
* congestion. Delay the inode for some time to
* avoid spinning on the CPU (100% iowait)
* retrying writeback of the dirty page/inode
* that cannot be performed immediately.
*/
redirty_tail(inode, wb);
}
} else if (inode->i_state & I_DIRTY) {
/*
* Filesystems can dirty the inode during writeback operations,
* such as delayed allocation during submission or metadata
* updates after data IO completion.
*/
redirty_tail(inode, wb);
} else {
/* The inode is clean. Remove from writeback lists. */
list_del_init(&inode->i_wb_list);
}
}
/*
* Write out an inode's dirty pages. Called under wb->list_lock and
* inode->i_lock. Either the caller has an active reference on the inode or
@ -422,53 +476,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_FREEING)) {
/*
* Sync livelock prevention. Each inode is tagged and synced in
* one shot. If still dirty, it will be redirty_tail()'ed below.
* Update the dirty time to prevent enqueue and sync it again.
*/
if ((inode->i_state & I_DIRTY) &&
(wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
inode->dirtied_when = jiffies;
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
/*
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything.
*/
if (wbc->nr_to_write <= 0) {
/*
* slice used up: queue for next turn
*/
requeue_io(inode, wb);
} else {
/*
* Writeback blocked by something other than
* congestion. Delay the inode for some time to
* avoid spinning on the CPU (100% iowait)
* retrying writeback of the dirty page/inode
* that cannot be performed immediately.
*/
redirty_tail(inode, wb);
}
} else if (inode->i_state & I_DIRTY) {
/*
* Filesystems can dirty the inode during writeback
* operations, such as delayed allocation during
* submission or metadata updates after data IO
* completion.
*/
redirty_tail(inode, wb);
} else {
/*
* The inode is clean. At this point we either have
* a reference to the inode or it's on it's way out.
* No need to add it back to the LRU.
*/
list_del_init(&inode->i_wb_list);
}
}
requeue_inode(inode, wb, wbc);
inode_sync_complete(inode);
trace_writeback_single_inode(inode, wbc, nr_to_write);
return ret;