iomap: generic inline data handling
Add generic inline data handling by adding a pointer to the inline data region to struct iomap. When handling a buffered IOMAP_INLINE write, iomap_write_begin will copy the current inline data from the inline data region into the page cache, and iomap_write_end will copy the changes in the page cache back to the inline data region. This doesn't cover inline data reads and direct I/O yet because so far, we have no users. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> [hch: small cleanups to better fit in with other iomap work] Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
parent
ebf00be37d
commit
19e0c58f65
62
fs/iomap.c
62
fs/iomap.c
@ -103,6 +103,26 @@ iomap_sector(struct iomap *iomap, loff_t pos)
|
|||||||
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
|
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
iomap_read_inline_data(struct inode *inode, struct page *page,
|
||||||
|
struct iomap *iomap)
|
||||||
|
{
|
||||||
|
size_t size = i_size_read(inode);
|
||||||
|
void *addr;
|
||||||
|
|
||||||
|
if (PageUptodate(page))
|
||||||
|
return;
|
||||||
|
|
||||||
|
BUG_ON(page->index);
|
||||||
|
BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
|
||||||
|
|
||||||
|
addr = kmap_atomic(page);
|
||||||
|
memcpy(addr, iomap->inline_data, size);
|
||||||
|
memset(addr + size, 0, PAGE_SIZE - size);
|
||||||
|
kunmap_atomic(addr);
|
||||||
|
SetPageUptodate(page);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
|
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
|
||||||
{
|
{
|
||||||
@ -133,7 +153,11 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
|
|||||||
if (!page)
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
status = __block_write_begin_int(page, pos, len, NULL, iomap);
|
if (iomap->type == IOMAP_INLINE)
|
||||||
|
iomap_read_inline_data(inode, page, iomap);
|
||||||
|
else
|
||||||
|
status = __block_write_begin_int(page, pos, len, NULL, iomap);
|
||||||
|
|
||||||
if (unlikely(status)) {
|
if (unlikely(status)) {
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
@ -146,14 +170,37 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
iomap_write_end_inline(struct inode *inode, struct page *page,
|
||||||
|
struct iomap *iomap, loff_t pos, unsigned copied)
|
||||||
|
{
|
||||||
|
void *addr;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!PageUptodate(page));
|
||||||
|
BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
|
||||||
|
|
||||||
|
addr = kmap_atomic(page);
|
||||||
|
memcpy(iomap->inline_data + pos, addr + pos, copied);
|
||||||
|
kunmap_atomic(addr);
|
||||||
|
|
||||||
|
mark_inode_dirty(inode);
|
||||||
|
__generic_write_end(inode, pos, copied, page);
|
||||||
|
return copied;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
|
iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
|
||||||
unsigned copied, struct page *page)
|
unsigned copied, struct page *page, struct iomap *iomap)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = generic_write_end(NULL, inode->i_mapping, pos, len,
|
if (iomap->type == IOMAP_INLINE) {
|
||||||
copied, page, NULL);
|
ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
|
||||||
|
} else {
|
||||||
|
ret = generic_write_end(NULL, inode->i_mapping, pos, len,
|
||||||
|
copied, page, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
if (ret < len)
|
if (ret < len)
|
||||||
iomap_write_failed(inode, pos, len);
|
iomap_write_failed(inode, pos, len);
|
||||||
return ret;
|
return ret;
|
||||||
@ -208,7 +255,8 @@ again:
|
|||||||
|
|
||||||
flush_dcache_page(page);
|
flush_dcache_page(page);
|
||||||
|
|
||||||
status = iomap_write_end(inode, pos, bytes, copied, page);
|
status = iomap_write_end(inode, pos, bytes, copied, page,
|
||||||
|
iomap);
|
||||||
if (unlikely(status < 0))
|
if (unlikely(status < 0))
|
||||||
break;
|
break;
|
||||||
copied = status;
|
copied = status;
|
||||||
@ -302,7 +350,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||||||
|
|
||||||
WARN_ON_ONCE(!PageUptodate(page));
|
WARN_ON_ONCE(!PageUptodate(page));
|
||||||
|
|
||||||
status = iomap_write_end(inode, pos, bytes, bytes, page);
|
status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
|
||||||
if (unlikely(status <= 0)) {
|
if (unlikely(status <= 0)) {
|
||||||
if (WARN_ON_ONCE(status == 0))
|
if (WARN_ON_ONCE(status == 0))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
@ -354,7 +402,7 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
|
|||||||
zero_user(page, offset, bytes);
|
zero_user(page, offset, bytes);
|
||||||
mark_page_accessed(page);
|
mark_page_accessed(page);
|
||||||
|
|
||||||
return iomap_write_end(inode, pos, bytes, bytes, page);
|
return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
|
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
|
||||||
|
@ -55,6 +55,7 @@ struct iomap {
|
|||||||
u16 flags; /* flags for mapping */
|
u16 flags; /* flags for mapping */
|
||||||
struct block_device *bdev; /* block device for I/O */
|
struct block_device *bdev; /* block device for I/O */
|
||||||
struct dax_device *dax_dev; /* dax_dev for dax operations */
|
struct dax_device *dax_dev; /* dax_dev for dax operations */
|
||||||
|
void *inline_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user