forked from Minki/linux
iomap: fall back to buffered writes for invalidation failures
Failing to invalid the page cache means data in incoherent, which is a very bad state for the system. Always fall back to buffered I/O through the page cache if we can't invalidate mappings. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Goldwyn Rodrigues <rgoldwyn@suse.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Acked-by: Bob Peterson <rpeterso@redhat.com> Acked-by: Damien Le Moal <damien.lemoal@wdc.com> Reviewed-by: Theodore Ts'o <tytso@mit.edu> # for ext4 Reviewed-by: Andreas Gruenbacher <agruenba@redhat.com> # for gfs2 Reviewed-by: Ritesh Harjani <riteshh@linux.ibm.com>
This commit is contained in:
parent
80e543ae24
commit
60263d5889
@ -544,6 +544,8 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||||||
iomap_ops = &ext4_iomap_overwrite_ops;
|
iomap_ops = &ext4_iomap_overwrite_ops;
|
||||||
ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
|
ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
|
||||||
is_sync_kiocb(iocb) || unaligned_io || extend);
|
is_sync_kiocb(iocb) || unaligned_io || extend);
|
||||||
|
if (ret == -ENOTBLK)
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
if (extend)
|
if (extend)
|
||||||
ret = ext4_handle_inode_extension(inode, offset, ret, count);
|
ret = ext4_handle_inode_extension(inode, offset, ret, count);
|
||||||
|
@ -814,7 +814,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
|||||||
|
|
||||||
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
|
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
|
||||||
is_sync_kiocb(iocb));
|
is_sync_kiocb(iocb));
|
||||||
|
if (ret == -ENOTBLK)
|
||||||
|
ret = 0;
|
||||||
out:
|
out:
|
||||||
gfs2_glock_dq(&gh);
|
gfs2_glock_dq(&gh);
|
||||||
out_uninit:
|
out_uninit:
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <linux/backing-dev.h>
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
#include <linux/task_io_accounting_ops.h>
|
#include <linux/task_io_accounting_ops.h>
|
||||||
|
#include "trace.h"
|
||||||
|
|
||||||
#include "../internal.h"
|
#include "../internal.h"
|
||||||
|
|
||||||
@ -401,6 +402,9 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||||||
* can be mapped into multiple disjoint IOs and only a subset of the IOs issued
|
* can be mapped into multiple disjoint IOs and only a subset of the IOs issued
|
||||||
* may be pure data writes. In that case, we still need to do a full data sync
|
* may be pure data writes. In that case, we still need to do a full data sync
|
||||||
* completion.
|
* completion.
|
||||||
|
*
|
||||||
|
* Returns -ENOTBLK In case of a page invalidation invalidation failure for
|
||||||
|
* writes. The callers needs to fall back to buffered I/O in this case.
|
||||||
*/
|
*/
|
||||||
ssize_t
|
ssize_t
|
||||||
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
@ -478,13 +482,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
if (iov_iter_rw(iter) == WRITE) {
|
if (iov_iter_rw(iter) == WRITE) {
|
||||||
/*
|
/*
|
||||||
* Try to invalidate cache pages for the range we are writing.
|
* Try to invalidate cache pages for the range we are writing.
|
||||||
* If this invalidation fails, tough, the write will still work,
|
* If this invalidation fails, let the caller fall back to
|
||||||
* but racing two incompatible write paths is a pretty crazy
|
* buffered I/O.
|
||||||
* thing to do, so we don't support it 100%.
|
|
||||||
*/
|
*/
|
||||||
if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
|
if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
|
||||||
end >> PAGE_SHIFT))
|
end >> PAGE_SHIFT)) {
|
||||||
dio_warn_stale_pagecache(iocb->ki_filp);
|
trace_iomap_dio_invalidate_fail(inode, pos, count);
|
||||||
|
ret = -ENOTBLK;
|
||||||
|
goto out_free_dio;
|
||||||
|
}
|
||||||
|
|
||||||
if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
|
if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
|
||||||
ret = sb_init_dio_done_wq(inode->i_sb);
|
ret = sb_init_dio_done_wq(inode->i_sb);
|
||||||
|
@ -74,6 +74,7 @@ DEFINE_EVENT(iomap_range_class, name, \
|
|||||||
DEFINE_RANGE_EVENT(iomap_writepage);
|
DEFINE_RANGE_EVENT(iomap_writepage);
|
||||||
DEFINE_RANGE_EVENT(iomap_releasepage);
|
DEFINE_RANGE_EVENT(iomap_releasepage);
|
||||||
DEFINE_RANGE_EVENT(iomap_invalidatepage);
|
DEFINE_RANGE_EVENT(iomap_invalidatepage);
|
||||||
|
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
|
||||||
|
|
||||||
#define IOMAP_TYPE_STRINGS \
|
#define IOMAP_TYPE_STRINGS \
|
||||||
{ IOMAP_HOLE, "HOLE" }, \
|
{ IOMAP_HOLE, "HOLE" }, \
|
||||||
|
@ -553,8 +553,8 @@ out:
|
|||||||
xfs_iunlock(ip, iolock);
|
xfs_iunlock(ip, iolock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No fallback to buffered IO on errors for XFS, direct IO will either
|
* No fallback to buffered IO after short writes for XFS, direct I/O
|
||||||
* complete fully or fail.
|
* will either complete fully or return an error.
|
||||||
*/
|
*/
|
||||||
ASSERT(ret < 0 || ret == count);
|
ASSERT(ret < 0 || ret == count);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -786,8 +786,11 @@ static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||||||
if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
|
if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
|
||||||
return -EFBIG;
|
return -EFBIG;
|
||||||
|
|
||||||
if (iocb->ki_flags & IOCB_DIRECT)
|
if (iocb->ki_flags & IOCB_DIRECT) {
|
||||||
return zonefs_file_dio_write(iocb, from);
|
ssize_t ret = zonefs_file_dio_write(iocb, from);
|
||||||
|
if (ret != -ENOTBLK)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return zonefs_file_buffered_write(iocb, from);
|
return zonefs_file_buffered_write(iocb, from);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user