sort out blockdev_direct_IO variants

Move the call to vmtruncate to get rid of accessive blocks to the callers
in prepearation of the new truncate calling sequence.  This was only done
for DIO_LOCKING filesystems, so the __blockdev_direct_IO_newtrunc variant
was not needed anyway.  Get rid of blockdev_direct_IO_no_locking and
its _newtrunc variant while at it as just opencoding the two additional
paramters is shorted than the name suffix.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Christoph Hellwig 2010-06-04 11:29:53 +02:00 committed by Al Viro
parent 256249584b
commit eafdc7d190
15 changed files with 146 additions and 119 deletions

View File

@ -172,9 +172,8 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
return blockdev_direct_IO_no_locking_newtrunc(rw, iocb, inode, return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
I_BDEV(inode), iov, offset, nr_segs, nr_segs, blkdev_get_blocks, NULL, NULL, 0);
blkdev_get_blocks, NULL);
} }
int __sync_blockdev(struct block_device *bdev, int wait) int __sync_blockdev(struct block_device *bdev, int wait)

View File

@ -1136,8 +1136,27 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
return ret; return ret;
} }
/*
* This is a library function for use by filesystem drivers.
*
* The locking rules are governed by the flags parameter:
* - if the flags value contains DIO_LOCKING we use a fancy locking
* scheme for dumb filesystems.
* For writes this function is called under i_mutex and returns with
* i_mutex held, for reads, i_mutex is not held on entry, but it is
* taken and dropped again before returning.
* For reads and writes i_alloc_sem is taken in shared mode and released
* on I/O completion (which may happen asynchronously after returning to
* the caller).
*
* - if the flags value does NOT contain DIO_LOCKING we don't use any
* internal locking but rather rely on the filesystem to synchronize
* direct I/O reads/writes versus each other and truncate.
* For reads and writes both i_mutex and i_alloc_sem are not held on
* entry and are never taken.
*/
ssize_t ssize_t
__blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode, __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset, struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags) dio_submit_t submit_io, int flags)
@ -1233,57 +1252,4 @@ __blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode,
out: out:
return retval; return retval;
} }
EXPORT_SYMBOL(__blockdev_direct_IO_newtrunc);
/*
* This is a library function for use by filesystem drivers.
*
* The locking rules are governed by the flags parameter:
* - if the flags value contains DIO_LOCKING we use a fancy locking
* scheme for dumb filesystems.
* For writes this function is called under i_mutex and returns with
* i_mutex held, for reads, i_mutex is not held on entry, but it is
* taken and dropped again before returning.
* For reads and writes i_alloc_sem is taken in shared mode and released
* on I/O completion (which may happen asynchronously after returning to
* the caller).
*
* - if the flags value does NOT contain DIO_LOCKING we don't use any
* internal locking but rather rely on the filesystem to synchronize
* direct I/O reads/writes versus each other and truncate.
* For reads and writes both i_mutex and i_alloc_sem are not held on
* entry and are never taken.
*/
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
ssize_t retval;
retval = __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov,
offset, nr_segs, get_block, end_io, submit_io, flags);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again for DIO_LOCKING.
* NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this in
* their own manner. This is a further example of where the old
* truncate sequence is inadequate.
*
* NOTE: filesystems with their own locking have to handle this
* on their own.
*/
if (flags & DIO_LOCKING) {
if (unlikely((rw & WRITE) && retval < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
vmtruncate(inode, isize);
}
}
return retval;
}
EXPORT_SYMBOL(__blockdev_direct_IO); EXPORT_SYMBOL(__blockdev_direct_IO);

View File

@ -838,7 +838,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
ssize_t ret; ssize_t ret;
ret = blockdev_direct_IO_newtrunc(rw, iocb, inode, inode->i_sb->s_bdev, ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
iov, offset, nr_segs, ext2_get_block, NULL); iov, offset, nr_segs, ext2_get_block, NULL);
if (ret < 0 && (rw & WRITE)) if (ret < 0 && (rw & WRITE))
ext2_write_failed(mapping, offset + iov_length(iov, nr_segs)); ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));

View File

@ -1785,6 +1785,17 @@ retry:
ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, offset, nr_segs,
ext3_get_block, NULL); ext3_get_block, NULL);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
vmtruncate(inode, isize);
}
if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
goto retry; goto retry;

View File

@ -3545,15 +3545,24 @@ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
retry: retry:
if (rw == READ && ext4_should_dioread_nolock(inode)) if (rw == READ && ext4_should_dioread_nolock(inode))
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, ret = __blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov, inode->i_sb->s_bdev, iov,
offset, nr_segs, offset, nr_segs,
ext4_get_block, NULL); ext4_get_block, NULL, NULL, 0);
else else {
ret = blockdev_direct_IO(rw, iocb, inode, ret = blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov, inode->i_sb->s_bdev, iov,
offset, nr_segs, offset, nr_segs,
ext4_get_block, NULL); ext4_get_block, NULL);
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
vmtruncate(inode, isize);
}
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry; goto retry;

View File

@ -212,8 +212,8 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
* FAT need to use the DIO_LOCKING for avoiding the race * FAT need to use the DIO_LOCKING for avoiding the race
* condition of fat_get_block() and ->truncate(). * condition of fat_get_block() and ->truncate().
*/ */
ret = blockdev_direct_IO_newtrunc(rw, iocb, inode, inode->i_sb->s_bdev, ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
iov, offset, nr_segs, fat_get_block, NULL); iov, offset, nr_segs, fat_get_block, NULL);
if (ret < 0 && (rw & WRITE)) if (ret < 0 && (rw & WRITE))
fat_write_failed(mapping, offset + iov_length(iov, nr_segs)); fat_write_failed(mapping, offset + iov_length(iov, nr_segs));

View File

@ -1047,9 +1047,9 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
if (rv != 1) if (rv != 1)
goto out; /* dio not valid, fall back to buffered i/o */ goto out; /* dio not valid, fall back to buffered i/o */
rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
iov, offset, nr_segs, offset, nr_segs, gfs2_get_block_direct,
gfs2_get_block_direct, NULL); NULL, NULL, 0);
out: out:
gfs2_glock_dq_m(1, &gh); gfs2_glock_dq_m(1, &gh);
gfs2_holder_uninit(&gh); gfs2_holder_uninit(&gh);

View File

@ -112,9 +112,24 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
ssize_t ret;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, hfs_get_block, NULL); offset, nr_segs, hfs_get_block, NULL);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
vmtruncate(inode, isize);
}
return ret;
} }
static int hfs_writepages(struct address_space *mapping, static int hfs_writepages(struct address_space *mapping,

View File

@ -105,9 +105,24 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
ssize_t ret;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, hfsplus_get_block, NULL); offset, nr_segs, hfsplus_get_block, NULL);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
vmtruncate(inode, isize);
}
return ret;
} }
static int hfsplus_writepages(struct address_space *mapping, static int hfsplus_writepages(struct address_space *mapping,

View File

@ -317,9 +317,24 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
ssize_t ret;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, jfs_get_block, NULL); offset, nr_segs, jfs_get_block, NULL);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
vmtruncate(inode, isize);
}
return ret;
} }
const struct address_space_operations jfs_aops = { const struct address_space_operations jfs_aops = {

View File

@ -237,6 +237,19 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
/* Needs synchronization with the cleaner */ /* Needs synchronization with the cleaner */
size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, nilfs_get_block, NULL); offset, nr_segs, nilfs_get_block, NULL);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && size < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
vmtruncate(inode, isize);
}
return size; return size;
} }

View File

@ -643,11 +643,10 @@ static ssize_t ocfs2_direct_IO(int rw,
if (i_size_read(inode) <= offset) if (i_size_read(inode) <= offset)
return 0; return 0;
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
inode->i_sb->s_bdev, iov, offset, iov, offset, nr_segs,
nr_segs, ocfs2_direct_IO_get_blocks,
ocfs2_direct_IO_get_blocks, ocfs2_dio_end_io, NULL, 0);
ocfs2_dio_end_io);
mlog_exit(ret); mlog_exit(ret);
return ret; return ret;

View File

@ -3057,10 +3057,25 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
ssize_t ret;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, offset, nr_segs,
reiserfs_get_blocks_direct_io, NULL); reiserfs_get_blocks_direct_io, NULL);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
vmtruncate(inode, isize);
}
return ret;
} }
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)

View File

@ -1478,17 +1478,17 @@ xfs_vm_direct_IO(
if (rw & WRITE) { if (rw & WRITE) {
iocb->private = xfs_alloc_ioend(inode, IO_NEW); iocb->private = xfs_alloc_ioend(inode, IO_NEW);
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
offset, nr_segs, offset, nr_segs,
xfs_get_blocks_direct, xfs_get_blocks_direct,
xfs_end_io_direct_write); xfs_end_io_direct_write, NULL, 0);
if (ret != -EIOCBQUEUED && iocb->private) if (ret != -EIOCBQUEUED && iocb->private)
xfs_destroy_ioend(iocb->private); xfs_destroy_ioend(iocb->private);
} else { } else {
ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
offset, nr_segs, offset, nr_segs,
xfs_get_blocks_direct, xfs_get_blocks_direct,
NULL); NULL, NULL, 0);
} }
return ret; return ret;

View File

@ -2269,16 +2269,6 @@ static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
struct bio; struct bio;
typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset); loff_t file_offset);
void dio_end_io(struct bio *bio, int error);
ssize_t __blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int lock_type);
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int lock_type);
enum { enum {
/* need locking between buffered and direct access */ /* need locking between buffered and direct access */
@ -2288,24 +2278,13 @@ enum {
DIO_SKIP_HOLES = 0x02, DIO_SKIP_HOLES = 0x02,
}; };
static inline ssize_t blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, void dio_end_io(struct bio *bio, int error);
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
loff_t offset, unsigned long nr_segs, get_block_t get_block, ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
dio_iodone_t end_io) struct block_device *bdev, const struct iovec *iov, loff_t offset,
{ unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset, dio_submit_t submit_io, int flags);
nr_segs, get_block, end_io, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}
static inline ssize_t blockdev_direct_IO_no_locking_newtrunc(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
loff_t offset, unsigned long nr_segs, get_block_t get_block,
dio_iodone_t end_io)
{
return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset,
nr_segs, get_block, end_io, NULL, 0);
}
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov, struct inode *inode, struct block_device *bdev, const struct iovec *iov,
loff_t offset, unsigned long nr_segs, get_block_t get_block, loff_t offset, unsigned long nr_segs, get_block_t get_block,
@ -2315,15 +2294,6 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
nr_segs, get_block, end_io, NULL, nr_segs, get_block, end_io, NULL,
DIO_LOCKING | DIO_SKIP_HOLES); DIO_LOCKING | DIO_SKIP_HOLES);
} }
static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
loff_t offset, unsigned long nr_segs, get_block_t get_block,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
nr_segs, get_block, end_io, NULL, 0);
}
#endif #endif
extern const struct file_operations generic_ro_fops; extern const struct file_operations generic_ro_fops;