mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
netfs: Remove unnecessary references to pages
These places should all use folios instead of pages. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Link: https://lore.kernel.org/r/20241005182307.3190401-4-willy@infradead.org Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
c6a90fe7f0
commit
e995e8b600
@ -646,7 +646,7 @@ static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
|
|||||||
if (unlikely(always_fill)) {
|
if (unlikely(always_fill)) {
|
||||||
if (pos - offset + len <= i_size)
|
if (pos - offset + len <= i_size)
|
||||||
return false; /* Page entirely before EOF */
|
return false; /* Page entirely before EOF */
|
||||||
zero_user_segment(&folio->page, 0, plen);
|
folio_zero_segment(folio, 0, plen);
|
||||||
folio_mark_uptodate(folio);
|
folio_mark_uptodate(folio);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -665,7 +665,7 @@ static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
|
|||||||
|
|
||||||
return false;
|
return false;
|
||||||
zero_out:
|
zero_out:
|
||||||
zero_user_segments(&folio->page, 0, offset, offset + len, plen);
|
folio_zero_segments(folio, 0, offset, offset + len, plen);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -732,7 +732,7 @@ retry:
|
|||||||
if (folio_test_uptodate(folio))
|
if (folio_test_uptodate(folio))
|
||||||
goto have_folio;
|
goto have_folio;
|
||||||
|
|
||||||
/* If the page is beyond the EOF, we want to clear it - unless it's
|
/* If the folio is beyond the EOF, we want to clear it - unless it's
|
||||||
* within the cache granule containing the EOF, in which case we need
|
* within the cache granule containing the EOF, in which case we need
|
||||||
* to preload the granule.
|
* to preload the granule.
|
||||||
*/
|
*/
|
||||||
@ -792,7 +792,7 @@ error:
|
|||||||
EXPORT_SYMBOL(netfs_write_begin);
|
EXPORT_SYMBOL(netfs_write_begin);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Preload the data into a page we're proposing to write into.
|
* Preload the data into a folio we're proposing to write into.
|
||||||
*/
|
*/
|
||||||
int netfs_prefetch_for_write(struct file *file, struct folio *folio,
|
int netfs_prefetch_for_write(struct file *file, struct folio *folio,
|
||||||
size_t offset, size_t len)
|
size_t offset, size_t len)
|
||||||
|
@ -83,13 +83,13 @@ static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
|
|||||||
* netfs_perform_write - Copy data into the pagecache.
|
* netfs_perform_write - Copy data into the pagecache.
|
||||||
* @iocb: The operation parameters
|
* @iocb: The operation parameters
|
||||||
* @iter: The source buffer
|
* @iter: The source buffer
|
||||||
* @netfs_group: Grouping for dirty pages (eg. ceph snaps).
|
* @netfs_group: Grouping for dirty folios (eg. ceph snaps).
|
||||||
*
|
*
|
||||||
* Copy data into pagecache pages attached to the inode specified by @iocb.
|
* Copy data into pagecache folios attached to the inode specified by @iocb.
|
||||||
* The caller must hold appropriate inode locks.
|
* The caller must hold appropriate inode locks.
|
||||||
*
|
*
|
||||||
* Dirty pages are tagged with a netfs_folio struct if they're not up to date
|
* Dirty folios are tagged with a netfs_folio struct if they're not up to date
|
||||||
* to indicate the range modified. Dirty pages may also be tagged with a
|
* to indicate the range modified. Dirty folios may also be tagged with a
|
||||||
* netfs-specific grouping such that data from an old group gets flushed before
|
* netfs-specific grouping such that data from an old group gets flushed before
|
||||||
* a new one is started.
|
* a new one is started.
|
||||||
*/
|
*/
|
||||||
@ -223,11 +223,11 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
* we try to read it.
|
* we try to read it.
|
||||||
*/
|
*/
|
||||||
if (fpos >= ctx->zero_point) {
|
if (fpos >= ctx->zero_point) {
|
||||||
zero_user_segment(&folio->page, 0, offset);
|
folio_zero_segment(folio, 0, offset);
|
||||||
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
|
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
|
||||||
if (unlikely(copied == 0))
|
if (unlikely(copied == 0))
|
||||||
goto copy_failed;
|
goto copy_failed;
|
||||||
zero_user_segment(&folio->page, offset + copied, flen);
|
folio_zero_segment(folio, offset + copied, flen);
|
||||||
__netfs_set_group(folio, netfs_group);
|
__netfs_set_group(folio, netfs_group);
|
||||||
folio_mark_uptodate(folio);
|
folio_mark_uptodate(folio);
|
||||||
trace_netfs_folio(folio, netfs_modify_and_clear);
|
trace_netfs_folio(folio, netfs_modify_and_clear);
|
||||||
@ -407,7 +407,7 @@ EXPORT_SYMBOL(netfs_perform_write);
|
|||||||
* netfs_buffered_write_iter_locked - write data to a file
|
* netfs_buffered_write_iter_locked - write data to a file
|
||||||
* @iocb: IO state structure (file, offset, etc.)
|
* @iocb: IO state structure (file, offset, etc.)
|
||||||
* @from: iov_iter with data to write
|
* @from: iov_iter with data to write
|
||||||
* @netfs_group: Grouping for dirty pages (eg. ceph snaps).
|
* @netfs_group: Grouping for dirty folios (eg. ceph snaps).
|
||||||
*
|
*
|
||||||
* This function does all the work needed for actually writing data to a
|
* This function does all the work needed for actually writing data to a
|
||||||
* file. It does all basic checks, removes SUID from the file, updates
|
* file. It does all basic checks, removes SUID from the file, updates
|
||||||
|
Loading…
Reference in New Issue
Block a user