mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
xfs: use shmem_get_folio in in xfile_load
Switch to using shmem_get_folio in xfile_load instead of using shmem_read_mapping_page_gfp. This gets us support for large folios and also optimized reading from unallocated space, as shmem_get_folio with SGP_READ won't allocate a page for them just to zero the content. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: "Darrick J. Wong" <djwong@kernel.org> Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
This commit is contained in:
parent
fd2634e2dd
commit
e97d70a573
@ -34,13 +34,6 @@
|
||||
* xfiles assume that the caller will handle all required concurrency
|
||||
* management; standard vfs locks (freezer and inode) are not taken. Reads
|
||||
* and writes are satisfied directly from the page cache.
|
||||
*
|
||||
* NOTE: The current shmemfs implementation has a quirk that in-kernel reads
|
||||
* of a hole cause a page to be mapped into the file. If you are going to
|
||||
* create a sparse xfile, please be careful about reading from uninitialized
|
||||
* parts of the file. These pages are !Uptodate and will eventually be
|
||||
* reclaimed if not written, but in the short term this boosts memory
|
||||
* consumption.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -118,10 +111,7 @@ xfile_load(
|
||||
loff_t pos)
|
||||
{
|
||||
struct inode *inode = file_inode(xf->file);
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page = NULL;
|
||||
unsigned int pflags;
|
||||
int error = 0;
|
||||
|
||||
if (count > MAX_RW_COUNT)
|
||||
return -ENOMEM;
|
||||
@ -132,43 +122,44 @@ xfile_load(
|
||||
|
||||
pflags = memalloc_nofs_save();
|
||||
while (count > 0) {
|
||||
struct folio *folio;
|
||||
unsigned int len;
|
||||
unsigned int offset;
|
||||
|
||||
len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
|
||||
|
||||
/*
|
||||
* In-kernel reads of a shmem file cause it to allocate a page
|
||||
* if the mapping shows a hole. Therefore, if we hit ENOMEM
|
||||
* we can continue by zeroing the caller's buffer.
|
||||
*/
|
||||
page = shmem_read_mapping_page_gfp(mapping, pos >> PAGE_SHIFT,
|
||||
__GFP_NOWARN);
|
||||
if (IS_ERR(page)) {
|
||||
error = PTR_ERR(page);
|
||||
if (error != -ENOMEM) {
|
||||
error = -ENOMEM;
|
||||
if (shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
|
||||
SGP_READ) < 0)
|
||||
break;
|
||||
if (!folio) {
|
||||
/*
|
||||
* No data stored at this offset, just zero the output
|
||||
* buffer until the next page boundary.
|
||||
*/
|
||||
len = min_t(ssize_t, count,
|
||||
PAGE_SIZE - offset_in_page(pos));
|
||||
memset(buf, 0, len);
|
||||
} else {
|
||||
if (filemap_check_wb_err(inode->i_mapping, 0)) {
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
break;
|
||||
}
|
||||
|
||||
memset(buf, 0, len);
|
||||
goto advance;
|
||||
offset = offset_in_folio(folio, pos);
|
||||
len = min_t(ssize_t, count, folio_size(folio) - offset);
|
||||
memcpy(buf, folio_address(folio) + offset, len);
|
||||
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* xfile pages must never be mapped into userspace, so
|
||||
* we skip the dcache flush.
|
||||
*/
|
||||
memcpy(buf, page_address(page) + offset_in_page(pos), len);
|
||||
put_page(page);
|
||||
|
||||
advance:
|
||||
count -= len;
|
||||
pos += len;
|
||||
buf += len;
|
||||
}
|
||||
memalloc_nofs_restore(pflags);
|
||||
|
||||
return error;
|
||||
if (count)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user