forked from Minki/linux
fsdax: decouple zeroing from the iomap buffered I/O code
Unshare the DAX and iomap buffered I/O page zeroing code. This code previously did a IS_DAX check deep inside the iomap code, which in fact was the only DAX check in the code. Instead move these checks into the callers. Most callers already have DAX special casing anyway and XFS will need it for reflink support as well. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Link: https://lore.kernel.org/r/20211129102203.2243509-19-hch@lst.de Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
e5c71954ca
commit
c6f4046865
77
fs/dax.c
77
fs/dax.c
@ -1135,25 +1135,74 @@ static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||
return ret;
|
||||
}
|
||||
|
||||
s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
|
||||
static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
||||
{
|
||||
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
|
||||
long rc, id;
|
||||
unsigned offset = offset_in_page(pos);
|
||||
unsigned size = min_t(u64, PAGE_SIZE - offset, length);
|
||||
const struct iomap *iomap = &iter->iomap;
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
loff_t pos = iter->pos;
|
||||
u64 length = iomap_length(iter);
|
||||
s64 written = 0;
|
||||
|
||||
id = dax_read_lock();
|
||||
if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
|
||||
rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
|
||||
else
|
||||
rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
|
||||
dax_read_unlock(id);
|
||||
/* already zeroed? we're done. */
|
||||
if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
|
||||
return length;
|
||||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
return size;
|
||||
do {
|
||||
unsigned offset = offset_in_page(pos);
|
||||
unsigned size = min_t(u64, PAGE_SIZE - offset, length);
|
||||
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
|
||||
long rc;
|
||||
int id;
|
||||
|
||||
id = dax_read_lock();
|
||||
if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
|
||||
rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
|
||||
else
|
||||
rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
|
||||
dax_read_unlock(id);
|
||||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
pos += size;
|
||||
length -= size;
|
||||
written += size;
|
||||
if (did_zero)
|
||||
*did_zero = true;
|
||||
} while (length > 0);
|
||||
|
||||
return written;
|
||||
}
|
||||
|
||||
int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
struct iomap_iter iter = {
|
||||
.inode = inode,
|
||||
.pos = pos,
|
||||
.len = len,
|
||||
.flags = IOMAP_ZERO,
|
||||
};
|
||||
int ret;
|
||||
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.processed = dax_zero_iter(&iter, did_zero);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_zero_range);
|
||||
|
||||
int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
unsigned int blocksize = i_blocksize(inode);
|
||||
unsigned int off = pos & (blocksize - 1);
|
||||
|
||||
/* Block boundary? Nothing to do */
|
||||
if (!off)
|
||||
return 0;
|
||||
return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_truncate_page);
|
||||
|
||||
static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
|
||||
struct iov_iter *iter)
|
||||
{
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/dax.h>
|
||||
#include "ext2.h"
|
||||
#include "acl.h"
|
||||
#include "xattr.h"
|
||||
@ -1297,9 +1298,9 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
|
||||
inode_dio_wait(inode);
|
||||
|
||||
if (IS_DAX(inode)) {
|
||||
error = iomap_zero_range(inode, newsize,
|
||||
PAGE_ALIGN(newsize) - newsize, NULL,
|
||||
&ext2_iomap_ops);
|
||||
error = dax_zero_range(inode, newsize,
|
||||
PAGE_ALIGN(newsize) - newsize, NULL,
|
||||
&ext2_iomap_ops);
|
||||
} else if (test_opt(inode->i_sb, NOBH))
|
||||
error = nobh_truncate_page(inode->i_mapping,
|
||||
newsize, ext2_get_block);
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/iversion.h>
|
||||
#include <linux/dax.h>
|
||||
|
||||
#include "ext4_jbd2.h"
|
||||
#include "xattr.h"
|
||||
@ -3780,8 +3781,8 @@ static int ext4_block_zero_page_range(handle_t *handle,
|
||||
length = max;
|
||||
|
||||
if (IS_DAX(inode)) {
|
||||
return iomap_zero_range(inode, from, length, NULL,
|
||||
&ext4_iomap_ops);
|
||||
return dax_zero_range(inode, from, length, NULL,
|
||||
&ext4_iomap_ops);
|
||||
}
|
||||
return __ext4_block_zero_page_range(handle, mapping, from, length);
|
||||
}
|
||||
|
@ -870,26 +870,8 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_file_unshare);
|
||||
|
||||
static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
|
||||
{
|
||||
struct page *page;
|
||||
int status;
|
||||
unsigned offset = offset_in_page(pos);
|
||||
unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
|
||||
|
||||
status = iomap_write_begin(iter, pos, bytes, &page);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
zero_user(page, offset, bytes);
|
||||
mark_page_accessed(page);
|
||||
|
||||
return iomap_write_end(iter, pos, bytes, bytes, page);
|
||||
}
|
||||
|
||||
static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
||||
{
|
||||
struct iomap *iomap = &iter->iomap;
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
loff_t pos = iter->pos;
|
||||
loff_t length = iomap_length(iter);
|
||||
@ -900,12 +882,19 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
|
||||
return length;
|
||||
|
||||
do {
|
||||
s64 bytes;
|
||||
unsigned offset = offset_in_page(pos);
|
||||
size_t bytes = min_t(u64, PAGE_SIZE - offset, length);
|
||||
struct page *page;
|
||||
int status;
|
||||
|
||||
if (IS_DAX(iter->inode))
|
||||
bytes = dax_iomap_zero(pos, length, iomap);
|
||||
else
|
||||
bytes = __iomap_zero_iter(iter, pos, length);
|
||||
status = iomap_write_begin(iter, pos, bytes, &page);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
zero_user(page, offset, bytes);
|
||||
mark_page_accessed(page);
|
||||
|
||||
bytes = iomap_write_end(iter, pos, bytes, bytes, page);
|
||||
if (bytes < 0)
|
||||
return bytes;
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "xfs_dquot.h"
|
||||
#include "xfs_reflink.h"
|
||||
|
||||
|
||||
#define XFS_ALLOC_ALIGN(mp, off) \
|
||||
(((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
|
||||
|
||||
@ -1321,6 +1320,9 @@ xfs_zero_range(
|
||||
{
|
||||
struct inode *inode = VFS_I(ip);
|
||||
|
||||
if (IS_DAX(inode))
|
||||
return dax_zero_range(inode, pos, len, did_zero,
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
return iomap_zero_range(inode, pos, len, did_zero,
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
}
|
||||
@ -1333,6 +1335,9 @@ xfs_truncate_page(
|
||||
{
|
||||
struct inode *inode = VFS_I(ip);
|
||||
|
||||
if (IS_DAX(inode))
|
||||
return dax_truncate_page(inode, pos, did_zero,
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
return iomap_truncate_page(inode, pos, did_zero,
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ typedef unsigned long dax_entry_t;
|
||||
struct dax_device;
|
||||
struct gendisk;
|
||||
struct iomap_ops;
|
||||
struct iomap_iter;
|
||||
struct iomap;
|
||||
|
||||
struct dax_operations {
|
||||
@ -170,6 +171,11 @@ static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
|
||||
}
|
||||
#endif
|
||||
|
||||
int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
||||
const struct iomap_ops *ops);
|
||||
int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
||||
const struct iomap_ops *ops);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DAX)
|
||||
int dax_read_lock(void);
|
||||
void dax_read_unlock(int id);
|
||||
@ -204,7 +210,6 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||
pgoff_t index);
|
||||
s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap);
|
||||
static inline bool dax_mapping(struct address_space *mapping)
|
||||
{
|
||||
return mapping->host && IS_DAX(mapping->host);
|
||||
|
Loading…
Reference in New Issue
Block a user