Page cache changes for 5.19

- Appoint myself page cache maintainer
 
  - Fix how scsicam uses the page cache
 
  - Use the memalloc_nofs_save() API to replace AOP_FLAG_NOFS
 
  - Remove the AOP flags entirely
 
  - Remove pagecache_write_begin() and pagecache_write_end()
 
  - Documentation updates
 
  - Convert several address_space operations to use folios:
    - is_dirty_writeback
    - readpage becomes read_folio
    - releasepage becomes release_folio
    - freepage becomes free_folio
 
  - Change filler_t to require a struct file pointer be the first argument
    like ->read_folio
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAmKNMDUACgkQDpNsjXcp
 gj4/mwf/bpHhXH4ZoNIvtUpTF6rZbqeffmc0VrbxCZDZ6igRnRPglxZ9H9v6L53O
 7B0FBQIfxgNKHZpdqGdOkv8cjg/GMe/HJUbEy5wOakYPo4L9fZpHbDZ9HM2Eankj
 xBqLIBgBJ7doKr+Y62DAN19TVD8jfRfVtli5mqXJoNKf65J7BkxljoTH1L3EXD9d
 nhLAgyQjR67JQrT/39KMW+17GqLhGefLQ4YnAMONtB6TVwX/lZmigKpzVaCi4r26
 bnk5vaR/3PdjtNxIoYvxdc71y2Eg05n2jEq9Wcy1AaDv/5vbyZUlZ2aBSaIVbtKX
 WfrhN9O3L0bU5qS7p9PoyfLc9wpq8A==
 =djLv
 -----END PGP SIGNATURE-----

Merge tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache

Pull page cache updates from Matthew Wilcox:

 - Appoint myself page cache maintainer

 - Fix how scsicam uses the page cache

 - Use the memalloc_nofs_save() API to replace AOP_FLAG_NOFS

 - Remove the AOP flags entirely

 - Remove pagecache_write_begin() and pagecache_write_end()

 - Documentation updates

 - Convert several address_space operations to use folios:
     - is_dirty_writeback
     - readpage becomes read_folio
     - releasepage becomes release_folio
     - freepage becomes free_folio

 - Change filler_t to require a struct file pointer be the first
   argument like ->read_folio

* tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache: (107 commits)
  nilfs2: Fix some kernel-doc comments
  Appoint myself page cache maintainer
  fs: Remove aops->freepage
  secretmem: Convert to free_folio
  nfs: Convert to free_folio
  orangefs: Convert to free_folio
  fs: Add free_folio address space operation
  fs: Convert drop_buffers() to use a folio
  fs: Change try_to_free_buffers() to take a folio
  jbd2: Convert release_buffer_page() to use a folio
  jbd2: Convert jbd2_journal_try_to_free_buffers to take a folio
  reiserfs: Convert release_buffer_page() to use a folio
  fs: Remove last vestiges of releasepage
  ubifs: Convert to release_folio
  reiserfs: Convert to release_folio
  orangefs: Convert to release_folio
  ocfs2: Convert to release_folio
  nilfs2: Remove comment about releasepage
  nfs: Convert to release_folio
  jfs: Convert to release_folio
  ...
This commit is contained in:
Linus Torvalds 2022-05-24 19:55:07 -07:00
commit fdaf9a5840
161 changed files with 1233 additions and 1221 deletions

View File

@ -433,11 +433,11 @@ has done a write and then the page it wrote from has been released by the VM,
after which it *has* to look in the cache.
To inform fscache that a page might now be in the cache, the following function
should be called from the ``releasepage`` address space op::
should be called from the ``release_folio`` address space op::
void fscache_note_page_release(struct fscache_cookie *cookie);
if the page has been released (ie. releasepage returned true).
if the page has been released (ie. release_folio returned true).
Page release and page invalidation should also wait for any mark left on the
page to say that a DIO write is underway from that page::

View File

@ -1256,7 +1256,7 @@ inline encryption hardware will encrypt/decrypt the file contents.
When inline encryption isn't used, filesystems must encrypt/decrypt
the file contents themselves, as described below:
For the read path (->readpage()) of regular files, filesystems can
For the read path (->read_folio()) of regular files, filesystems can
read the ciphertext into the page cache and decrypt it in-place. The
page lock must be held until decryption has finished, to prevent the
page from becoming visible to userspace prematurely.

View File

@ -559,7 +559,7 @@ already verified). Below, we describe how filesystems implement this.
Pagecache
~~~~~~~~~
For filesystems using Linux's pagecache, the ``->readpage()`` and
For filesystems using Linux's pagecache, the ``->read_folio()`` and
``->readahead()`` methods must be modified to verify pages before they
are marked Uptodate. Merely hooking ``->read_iter()`` would be
insufficient, since ``->read_iter()`` is not used for memory maps.

View File

@ -237,20 +237,20 @@ address_space_operations
prototypes::
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
int (*read_folio)(struct file *, struct folio *);
int (*writepages)(struct address_space *, struct writeback_control *);
bool (*dirty_folio)(struct address_space *, struct folio *folio);
void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
bool (*release_folio)(struct folio *, gfp_t);
void (*free_folio)(struct folio *);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
bool (*isolate_page) (struct page *, isolate_mode_t);
int (*migratepage)(struct address_space *, struct page *, struct page *);
@ -262,22 +262,22 @@ prototypes::
int (*swap_deactivate)(struct file *);
locking rules:
All except dirty_folio and freepage may block
All except dirty_folio and free_folio may block
====================== ======================== ========= ===============
ops PageLocked(page) i_rwsem invalidate_lock
ops folio locked i_rwsem invalidate_lock
====================== ======================== ========= ===============
writepage: yes, unlocks (see below)
readpage: yes, unlocks shared
read_folio: yes, unlocks shared
writepages:
dirty_folio maybe
dirty_folio: maybe
readahead: yes, unlocks shared
write_begin: locks the page exclusive
write_end: yes, unlocks exclusive
bmap:
invalidate_folio: yes exclusive
releasepage: yes
freepage: yes
release_folio: yes
free_folio: yes
direct_IO:
isolate_page: yes
migratepage: yes (both)
@ -289,13 +289,13 @@ swap_activate: no
swap_deactivate: no
====================== ======================== ========= ===============
->write_begin(), ->write_end() and ->readpage() may be called from
->write_begin(), ->write_end() and ->read_folio() may be called from
the request handler (/dev/loop).
->readpage() unlocks the page, either synchronously or via I/O
->read_folio() unlocks the folio, either synchronously or via I/O
completion.
->readahead() unlocks the pages that I/O is attempted on like ->readpage().
->readahead() unlocks the folios that I/O is attempted on like ->read_folio().
->writepage() is used for two purposes: for "memory cleansing" and for
"sync". These are quite different operations and the behaviour may differ
@ -372,12 +372,12 @@ invalidate_lock before invalidating page cache in truncate / hole punch
path (and thus calling into ->invalidate_folio) to block races between page
cache invalidation and page cache filling functions (fault, read, ...).
->releasepage() is called when the kernel is about to try to drop the
buffers from the page in preparation for freeing it. It returns zero to
indicate that the buffers are (or may be) freeable. If ->releasepage is zero,
the kernel assumes that the fs has no private interest in the buffers.
->release_folio() is called when the kernel is about to try to drop the
buffers from the folio in preparation for freeing it. It returns false to
indicate that the buffers are (or may be) freeable. If ->release_folio is
NULL, the kernel assumes that the fs has no private interest in the buffers.
->freepage() is called when the kernel is done dropping the page
->free_folio() is called when the kernel has dropped the folio
from the page cache.
->launder_folio() may be called prior to releasing a folio if

View File

@ -96,7 +96,7 @@ attached to an inode (or NULL if fscache is disabled)::
Buffered Read Helpers
=====================
The library provides a set of read helpers that handle the ->readpage(),
The library provides a set of read helpers that handle the ->read_folio(),
->readahead() and much of the ->write_begin() VM operations and translate them
into a common call framework.
@ -136,20 +136,19 @@ Read Helper Functions
Three read helpers are provided::
void netfs_readahead(struct readahead_control *ractl);
int netfs_readpage(struct file *file,
struct page *page);
int netfs_read_folio(struct file *file,
struct folio *folio);
int netfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos,
unsigned int len,
unsigned int flags,
struct folio **_folio,
void **_fsdata);
Each corresponds to a VM address space operation. These operations use the
state in the per-inode context.
For ->readahead() and ->readpage(), the network filesystem just point directly
For ->readahead() and ->read_folio(), the network filesystem just point directly
at the corresponding read helper; whereas for ->write_begin(), it may be a
little more complicated as the network filesystem might want to flush
conflicting writes or track dirty data and needs to put the acquired folio if

View File

@ -624,7 +624,7 @@ any symlink that might use page_follow_link_light/page_put_link() must
have inode_nohighmem(inode) called before anything might start playing with
its pagecache. No highmem pages should end up in the pagecache of such
symlinks. That includes any preseeding that might be done during symlink
creation. __page_symlink() will honour the mapping gfp flags, so once
creation. page_symlink() will honour the mapping gfp flags, so once
you've done inode_nohighmem() it's safe to use, but if you allocate and
insert the page manually, make sure to use the right gfp flags.

View File

@ -620,9 +620,9 @@ Writeback.
The first can be used independently to the others. The VM can try to
either write dirty pages in order to clean them, or release clean pages
in order to reuse them. To do this it can call the ->writepage method
on dirty pages, and ->releasepage on clean pages with PagePrivate set.
Clean pages without PagePrivate and with no external references will be
released without notice being given to the address_space.
on dirty pages, and ->release_folio on clean folios with the private
flag set. Clean pages without PagePrivate and with no external references
will be released without notice being given to the address_space.
To achieve this functionality, pages need to be placed on an LRU with
lru_cache_add and mark_page_active needs to be called whenever the page
@ -656,7 +656,7 @@ by memory-mapping the page. Data is written into the address space by
the application, and then written-back to storage typically in whole
pages, however the address_space has finer control of write sizes.
The read process essentially only requires 'readpage'. The write
The read process essentially only requires 'read_folio'. The write
process is more complicated and uses write_begin/write_end or
dirty_folio to write data into the address_space, and writepage and
writepages to writeback data to storage.
@ -722,20 +722,20 @@ cache in your filesystem. The following members are defined:
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
int (*read_folio)(struct file *, struct folio *);
int (*writepages)(struct address_space *, struct writeback_control *);
bool (*dirty_folio)(struct address_space *, struct folio *);
void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
bool (*release_folio)(struct folio *, gfp_t);
void (*free_folio)(struct folio *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/* isolate a page for migration */
bool (*isolate_page) (struct page *, isolate_mode_t);
@ -747,7 +747,7 @@ cache in your filesystem. The following members are defined:
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
void (*is_dirty_writeback)(struct folio *, bool *, bool *);
int (*error_remove_page) (struct mapping *mapping, struct page *page);
int (*swap_activate)(struct file *);
int (*swap_deactivate)(struct file *);
@ -772,14 +772,14 @@ cache in your filesystem. The following members are defined:
See the file "Locking" for more details.
``readpage``
called by the VM to read a page from backing store. The page
will be Locked when readpage is called, and should be unlocked
and marked uptodate once the read completes. If ->readpage
discovers that it needs to unlock the page for some reason, it
can do so, and then return AOP_TRUNCATED_PAGE. In this case,
the page will be relocated, relocked and if that all succeeds,
->readpage will be called again.
``read_folio``
called by the VM to read a folio from backing store. The folio
will be locked when read_folio is called, and should be unlocked
and marked uptodate once the read completes. If ->read_folio
discovers that it cannot perform the I/O at this time, it can
unlock the folio and return AOP_TRUNCATED_PAGE. In this case,
the folio will be looked up again, relocked and if that all succeeds,
->read_folio will be called again.
``writepages``
called by the VM to write out pages associated with the
@ -832,9 +832,6 @@ cache in your filesystem. The following members are defined:
passed to write_begin is greater than the number of bytes copied
into the page).
flags is a field for AOP_FLAG_xxx flags, described in
include/linux/fs.h.
A void * may be returned in fsdata, which then gets passed into
write_end.
@ -867,36 +864,35 @@ cache in your filesystem. The following members are defined:
address space. This generally corresponds to either a
truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length'
will be folio_size()). Any private data associated with the page
will be folio_size()). Any private data associated with the folio
should be updated to reflect this truncation. If offset is 0
and length is folio_size(), then the private data should be
released, because the page must be able to be completely
discarded. This may be done by calling the ->releasepage
released, because the folio must be able to be completely
discarded. This may be done by calling the ->release_folio
function, but in this case the release MUST succeed.
``releasepage``
releasepage is called on PagePrivate pages to indicate that the
page should be freed if possible. ->releasepage should remove
any private data from the page and clear the PagePrivate flag.
If releasepage() fails for some reason, it must indicate failure
with a 0 return value. releasepage() is used in two distinct
though related cases. The first is when the VM finds a clean
page with no active users and wants to make it a free page. If
->releasepage succeeds, the page will be removed from the
address_space and become free.
``release_folio``
release_folio is called on folios with private data to tell the
filesystem that the folio is about to be freed. ->release_folio
should remove any private data from the folio and clear the
private flag. If release_folio() fails, it should return false.
release_folio() is used in two distinct though related cases.
The first is when the VM wants to free a clean folio with no
active users. If ->release_folio succeeds, the folio will be
removed from the address_space and be freed.
The second case is when a request has been made to invalidate
some or all pages in an address_space. This can happen through
the fadvise(POSIX_FADV_DONTNEED) system call or by the
filesystem explicitly requesting it as nfs and 9fs do (when they
some or all folios in an address_space. This can happen
through the fadvise(POSIX_FADV_DONTNEED) system call or by the
filesystem explicitly requesting it as nfs and 9p do (when they
believe the cache may be out of date with storage) by calling
invalidate_inode_pages2(). If the filesystem makes such a call,
and needs to be certain that all pages are invalidated, then its
releasepage will need to ensure this. Possibly it can clear the
PageUptodate bit if it cannot free private data yet.
and needs to be certain that all folios are invalidated, then
its release_folio will need to ensure this. Possibly it can
clear the uptodate flag if it cannot free private data yet.
``freepage``
freepage is called once the page is no longer visible in the
``free_folio``
free_folio is called once the folio is no longer visible in the
page cache in order to allow the cleanup of any private data.
Since it may be called by the memory reclaimer, it should not
assume that the original address_space mapping still exists, and
@ -935,14 +931,14 @@ cache in your filesystem. The following members are defined:
without needing I/O to bring the whole page up to date.
``is_dirty_writeback``
Called by the VM when attempting to reclaim a page. The VM uses
Called by the VM when attempting to reclaim a folio. The VM uses
dirty and writeback information to determine if it needs to
stall to allow flushers a chance to complete some IO.
Ordinarily it can use PageDirty and PageWriteback but some
filesystems have more complex state (unstable pages in NFS
Ordinarily it can use folio_test_dirty and folio_test_writeback but
some filesystems have more complex state (unstable folios in NFS
prevent reclaim) or do not set those flags due to locking
problems. This callback allows a filesystem to indicate to the
VM if a page should be treated as dirty or writeback for the
VM if a folio should be treated as dirty or writeback for the
purposes of stalling.
``error_remove_page``

View File

@ -14878,6 +14878,19 @@ F: Documentation/core-api/padata.rst
F: include/linux/padata.h
F: kernel/padata.c
PAGE CACHE
M: Matthew Wilcox (Oracle) <willy@infradead.org>
L: linux-fsdevel@vger.kernel.org
S: Supported
T: git git://git.infradead.org/users/willy/pagecache.git
F: Documentation/filesystems/locking.rst
F: Documentation/filesystems/vfs.rst
F: include/linux/pagemap.h
F: mm/filemap.c
F: mm/page-writeback.c
F: mm/readahead.c
F: mm/truncate.c
PAGE POOL
M: Jesper Dangaard Brouer <hawk@kernel.org>
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>

View File

@ -372,9 +372,9 @@ static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, blkdev_get_block, wbc);
}
static int blkdev_readpage(struct file * file, struct page * page)
static int blkdev_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page, blkdev_get_block);
return block_read_full_folio(folio, blkdev_get_block);
}
static void blkdev_readahead(struct readahead_control *rac)
@ -383,11 +383,9 @@ static void blkdev_readahead(struct readahead_control *rac)
}
static int blkdev_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags, struct page **pagep,
void **fsdata)
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
return block_write_begin(mapping, pos, len, flags, pagep,
blkdev_get_block);
return block_write_begin(mapping, pos, len, pagep, blkdev_get_block);
}
static int blkdev_write_end(struct file *file, struct address_space *mapping,
@ -412,7 +410,7 @@ static int blkdev_writepages(struct address_space *mapping,
const struct address_space_operations def_blk_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = blkdev_readpage,
.read_folio = blkdev_read_folio,
.readahead = blkdev_readahead,
.writepage = blkdev_writepage,
.write_begin = blkdev_write_begin,

View File

@ -408,6 +408,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
{
struct address_space *mapping = obj->base.filp->f_mapping;
const struct address_space_operations *aops = mapping->a_ops;
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
u64 remain, offset;
unsigned int pg;
@ -465,9 +466,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err)
return err;
err = pagecache_write_begin(obj->base.filp, mapping,
offset, len, 0,
&page, &data);
err = aops->write_begin(obj->base.filp, mapping, offset, len,
&page, &data);
if (err < 0)
return err;
@ -477,9 +477,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
len);
kunmap_atomic(vaddr);
err = pagecache_write_end(obj->base.filp, mapping,
offset, len, len - unwritten,
page, data);
err = aops->write_end(obj->base.filp, mapping, offset, len,
len - unwritten, page, data);
if (err < 0)
return err;
@ -622,6 +621,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
{
struct drm_i915_gem_object *obj;
struct file *file;
const struct address_space_operations *aops;
resource_size_t offset;
int err;
@ -633,15 +633,15 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
aops = file->f_mapping->a_ops;
offset = 0;
do {
unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
struct page *page;
void *pgdata, *vaddr;
err = pagecache_write_begin(file, file->f_mapping,
offset, len, 0,
&page, &pgdata);
err = aops->write_begin(file, file->f_mapping, offset, len,
&page, &pgdata);
if (err < 0)
goto fail;
@ -649,9 +649,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
memcpy(vaddr, data, len);
kunmap(page);
err = pagecache_write_end(file, file->f_mapping,
offset, len, len,
page, pgdata);
err = aops->write_end(file, file->f_mapping, offset, len, len,
page, pgdata);
if (err < 0)
goto fail;

View File

@ -34,15 +34,14 @@ unsigned char *scsi_bios_ptable(struct block_device *dev)
{
struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping;
unsigned char *res = NULL;
struct page *page;
struct folio *folio;
page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
folio = read_mapping_folio(mapping, 0, NULL);
if (IS_ERR(folio))
return NULL;
if (!PageError(page))
res = kmemdup(page_address(page) + 0x1be, 66, GFP_KERNEL);
put_page(page);
res = kmemdup(folio_address(folio) + 0x1be, 66, GFP_KERNEL);
folio_put(folio);
return res;
}
EXPORT_SYMBOL(scsi_bios_ptable);

View File

@ -100,29 +100,28 @@ const struct netfs_request_ops v9fs_req_ops = {
};
/**
* v9fs_release_page - release the private state associated with a page
* @page: The page to be released
* v9fs_release_folio - release the private state associated with a folio
* @folio: The folio to be released
* @gfp: The caller's allocation restrictions
*
* Returns 1 if the page can be released, false otherwise.
* Returns true if the page can be released, false otherwise.
*/
static int v9fs_release_page(struct page *page, gfp_t gfp)
static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
{
struct folio *folio = page_folio(page);
struct inode *inode = folio_inode(folio);
if (folio_test_private(folio))
return 0;
return false;
#ifdef CONFIG_9P_FSCACHE
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return 0;
return false;
folio_wait_fscache(folio);
}
#endif
fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
return 1;
return true;
}
static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
@ -260,7 +259,7 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int flags,
loff_t pos, unsigned int len,
struct page **subpagep, void **fsdata)
{
int retval;
@ -275,7 +274,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
* file. We need to do this before we get a lock on the page in case
* there's more than one writer competing for the same cache block.
*/
retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata);
retval = netfs_write_begin(filp, mapping, pos, len, &folio, fsdata);
if (retval < 0)
return retval;
@ -336,13 +335,13 @@ static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
#endif
const struct address_space_operations v9fs_addr_operations = {
.readpage = netfs_readpage,
.read_folio = netfs_read_folio,
.readahead = netfs_readahead,
.dirty_folio = v9fs_dirty_folio,
.writepage = v9fs_vfs_writepage,
.write_begin = v9fs_write_begin,
.write_end = v9fs_write_end,
.releasepage = v9fs_release_page,
.release_folio = v9fs_release_folio,
.invalidate_folio = v9fs_invalidate_folio,
.launder_folio = v9fs_launder_folio,
.direct_IO = v9fs_direct_IO,

View File

@ -38,9 +38,9 @@ static int adfs_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, adfs_get_block, wbc);
}
static int adfs_readpage(struct file *file, struct page *page)
static int adfs_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page, adfs_get_block);
return block_read_full_folio(folio, adfs_get_block);
}
static void adfs_write_failed(struct address_space *mapping, loff_t to)
@ -52,13 +52,13 @@ static void adfs_write_failed(struct address_space *mapping, loff_t to)
}
static int adfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
adfs_get_block,
&ADFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
@ -75,7 +75,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations adfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = adfs_readpage,
.read_folio = adfs_read_folio,
.writepage = adfs_writepage,
.write_begin = adfs_write_begin,
.write_end = generic_write_end,

View File

@ -375,9 +375,9 @@ static int affs_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, affs_get_block, wbc);
}
static int affs_readpage(struct file *file, struct page *page)
static int affs_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page, affs_get_block);
return block_read_full_folio(folio, affs_get_block);
}
static void affs_write_failed(struct address_space *mapping, loff_t to)
@ -414,13 +414,13 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
static int affs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
affs_get_block,
&AFFS_I(mapping->host)->mmu_private);
if (unlikely(ret))
@ -455,7 +455,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations affs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = affs_readpage,
.read_folio = affs_read_folio,
.writepage = affs_writepage,
.write_begin = affs_write_begin,
.write_end = affs_write_end,
@ -629,8 +629,9 @@ out:
}
static int
affs_readpage_ofs(struct file *file, struct page *page)
affs_read_folio_ofs(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
u32 to;
int err;
@ -650,7 +651,7 @@ affs_readpage_ofs(struct file *file, struct page *page)
}
static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
@ -670,7 +671,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
}
index = pos >> PAGE_SHIFT;
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
@ -837,7 +838,7 @@ err_bh:
const struct address_space_operations affs_aops_ofs = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = affs_readpage_ofs,
.read_folio = affs_read_folio_ofs,
//.writepage = affs_writepage_ofs,
.write_begin = affs_write_begin_ofs,
.write_end = affs_write_end_ofs
@ -887,7 +888,7 @@ affs_truncate(struct inode *inode)
loff_t isize = inode->i_size;
int res;
res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata);
res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata);
if (!res)
res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
else

View File

@ -11,8 +11,9 @@
#include "affs.h"
static int affs_symlink_readpage(struct file *file, struct page *page)
static int affs_symlink_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct buffer_head *bh;
struct inode *inode = page->mapping->host;
char *link = page_address(page);
@ -67,7 +68,7 @@ fail:
}
const struct address_space_operations affs_symlink_aops = {
.readpage = affs_symlink_readpage,
.read_folio = affs_symlink_read_folio,
};
const struct inode_operations affs_symlink_inode_operations = {

View File

@ -41,7 +41,7 @@ static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags);
static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags);
static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags);
static void afs_dir_invalidate_folio(struct folio *folio, size_t offset,
size_t length);
@ -75,7 +75,7 @@ const struct inode_operations afs_dir_inode_operations = {
const struct address_space_operations afs_dir_aops = {
.dirty_folio = afs_dir_dirty_folio,
.releasepage = afs_dir_releasepage,
.release_folio = afs_dir_release_folio,
.invalidate_folio = afs_dir_invalidate_folio,
};
@ -2002,9 +2002,8 @@ error:
* Release a directory folio and clean up its private state if it's not busy
* - return true if the folio can now be released, false if not
*/
static int afs_dir_releasepage(struct page *subpage, gfp_t gfp_flags)
static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags)
{
struct folio *folio = page_folio(subpage);
struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
_enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));

View File

@ -19,10 +19,10 @@
#include "internal.h"
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
static int afs_symlink_readpage(struct file *file, struct page *page);
static int afs_symlink_read_folio(struct file *file, struct folio *folio);
static void afs_invalidate_folio(struct folio *folio, size_t offset,
size_t length);
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags);
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
static void afs_vm_open(struct vm_area_struct *area);
@ -50,11 +50,11 @@ const struct inode_operations afs_file_inode_operations = {
};
const struct address_space_operations afs_file_aops = {
.readpage = netfs_readpage,
.read_folio = netfs_read_folio,
.readahead = netfs_readahead,
.dirty_folio = afs_dirty_folio,
.launder_folio = afs_launder_folio,
.releasepage = afs_releasepage,
.release_folio = afs_release_folio,
.invalidate_folio = afs_invalidate_folio,
.write_begin = afs_write_begin,
.write_end = afs_write_end,
@ -63,8 +63,8 @@ const struct address_space_operations afs_file_aops = {
};
const struct address_space_operations afs_symlink_aops = {
.readpage = afs_symlink_readpage,
.releasepage = afs_releasepage,
.read_folio = afs_symlink_read_folio,
.release_folio = afs_release_folio,
.invalidate_folio = afs_invalidate_folio,
};
@ -332,11 +332,10 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq)
afs_put_read(fsreq);
}
static int afs_symlink_readpage(struct file *file, struct page *page)
static int afs_symlink_read_folio(struct file *file, struct folio *folio)
{
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host);
struct afs_read *fsreq;
struct folio *folio = page_folio(page);
int ret;
fsreq = afs_alloc_read(GFP_NOFS);
@ -347,13 +346,13 @@ static int afs_symlink_readpage(struct file *file, struct page *page)
fsreq->len = folio_size(folio);
fsreq->vnode = vnode;
fsreq->iter = &fsreq->def_iter;
iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages,
iov_iter_xarray(&fsreq->def_iter, READ, &folio->mapping->i_pages,
fsreq->pos, fsreq->len);
ret = afs_fetch_data(fsreq->vnode, fsreq);
if (ret == 0)
SetPageUptodate(page);
unlock_page(page);
folio_mark_uptodate(folio);
folio_unlock(folio);
return ret;
}
@ -482,16 +481,15 @@ static void afs_invalidate_folio(struct folio *folio, size_t offset,
* release a page and clean up its private state if it's not busy
* - return true if the page can now be released, false if not
*/
static int afs_releasepage(struct page *page, gfp_t gfp)
static bool afs_release_folio(struct folio *folio, gfp_t gfp)
{
struct folio *folio = page_folio(page);
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
_enter("{{%llx:%llu}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
gfp);
/* deny if page is being written to the cache and the caller hasn't
/* deny if folio is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
if (folio_test_fscache(folio)) {

View File

@ -311,7 +311,7 @@ struct afs_net {
atomic_t n_lookup; /* Number of lookups done */
atomic_t n_reval; /* Number of dentries needing revalidation */
atomic_t n_inval; /* Number of invalidations by the server */
atomic_t n_relpg; /* Number of invalidations by releasepage */
atomic_t n_relpg; /* Number of invalidations by release_folio */
atomic_t n_read_dir; /* Number of directory pages read */
atomic_t n_dir_cr; /* Number of directory entry creation edits */
atomic_t n_dir_rm; /* Number of directory entry removal edits */
@ -1535,7 +1535,7 @@ bool afs_dirty_folio(struct address_space *, struct folio *);
#define afs_dirty_folio filemap_dirty_folio
#endif
extern int afs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
extern int afs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,

View File

@ -42,7 +42,7 @@ static void afs_folio_start_fscache(bool caching, struct folio *folio)
* prepare to perform part of a write to a page
*/
int afs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **_page, void **fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
@ -60,7 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
* file. We need to do this before we get a lock on the page in case
* there's more than one writer competing for the same cache block.
*/
ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata);
ret = netfs_write_begin(file, mapping, pos, len, &folio, fsdata);
if (ret < 0)
return ret;

View File

@ -40,7 +40,7 @@ MODULE_LICENSE("GPL");
static int befs_readdir(struct file *, struct dir_context *);
static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
static int befs_readpage(struct file *file, struct page *page);
static int befs_read_folio(struct file *file, struct folio *folio);
static sector_t befs_bmap(struct address_space *mapping, sector_t block);
static struct dentry *befs_lookup(struct inode *, struct dentry *,
unsigned int);
@ -48,7 +48,7 @@ static struct inode *befs_iget(struct super_block *, unsigned long);
static struct inode *befs_alloc_inode(struct super_block *sb);
static void befs_free_inode(struct inode *inode);
static void befs_destroy_inodecache(void);
static int befs_symlink_readpage(struct file *, struct page *);
static int befs_symlink_read_folio(struct file *, struct folio *);
static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
@ -87,12 +87,12 @@ static const struct inode_operations befs_dir_inode_operations = {
};
static const struct address_space_operations befs_aops = {
.readpage = befs_readpage,
.read_folio = befs_read_folio,
.bmap = befs_bmap,
};
static const struct address_space_operations befs_symlink_aops = {
.readpage = befs_symlink_readpage,
.read_folio = befs_symlink_read_folio,
};
static const struct export_operations befs_export_operations = {
@ -102,16 +102,16 @@ static const struct export_operations befs_export_operations = {
};
/*
* Called by generic_file_read() to read a page of data
* Called by generic_file_read() to read a folio of data
*
* In turn, simply calls a generic block read function and
* passes it the address of befs_get_block, for mapping file
* positions to disk blocks.
*/
static int
befs_readpage(struct file *file, struct page *page)
befs_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page, befs_get_block);
return block_read_full_folio(folio, befs_get_block);
}
static sector_t
@ -468,8 +468,9 @@ befs_destroy_inodecache(void)
* The data stream become link name. Unless the LONG_SYMLINK
* flag is set.
*/
static int befs_symlink_readpage(struct file *unused, struct page *page)
static int befs_symlink_read_folio(struct file *unused, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb;
struct befs_inode_info *befs_ino = BEFS_I(inode);

View File

@ -155,9 +155,9 @@ static int bfs_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, bfs_get_block, wbc);
}
static int bfs_readpage(struct file *file, struct page *page)
static int bfs_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page, bfs_get_block);
return block_read_full_folio(folio, bfs_get_block);
}
static void bfs_write_failed(struct address_space *mapping, loff_t to)
@ -169,13 +169,12 @@ static void bfs_write_failed(struct address_space *mapping, loff_t to)
}
static int bfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
ret = block_write_begin(mapping, pos, len, flags, pagep,
bfs_get_block);
ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block);
if (unlikely(ret))
bfs_write_failed(mapping, pos + len);
@ -190,7 +189,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations bfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = bfs_readpage,
.read_folio = bfs_read_folio,
.writepage = bfs_writepage,
.write_begin = bfs_write_begin,
.write_end = generic_write_end,

View File

@ -996,12 +996,12 @@ static int btree_writepages(struct address_space *mapping,
return btree_write_cache_pages(mapping, wbc);
}
static int btree_releasepage(struct page *page, gfp_t gfp_flags)
static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
{
if (PageWriteback(page) || PageDirty(page))
return 0;
if (folio_test_writeback(folio) || folio_test_dirty(folio))
return false;
return try_release_extent_buffer(page);
return try_release_extent_buffer(&folio->page);
}
static void btree_invalidate_folio(struct folio *folio, size_t offset,
@ -1010,7 +1010,7 @@ static void btree_invalidate_folio(struct folio *folio, size_t offset,
struct extent_io_tree *tree;
tree = &BTRFS_I(folio->mapping->host)->io_tree;
extent_invalidate_folio(tree, folio, offset);
btree_releasepage(&folio->page, GFP_NOFS);
btree_release_folio(folio, GFP_NOFS);
if (folio_get_private(folio)) {
btrfs_warn(BTRFS_I(folio->mapping->host)->root->fs_info,
"folio private not zero on folio %llu",
@ -1071,7 +1071,7 @@ static bool btree_dirty_folio(struct address_space *mapping,
static const struct address_space_operations btree_aops = {
.writepages = btree_writepages,
.releasepage = btree_releasepage,
.release_folio = btree_release_folio,
.invalidate_folio = btree_invalidate_folio,
#ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage,

View File

@ -3799,8 +3799,9 @@ out:
return ret;
}
int btrfs_readpage(struct file *file, struct page *page)
int btrfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1;
@ -5306,7 +5307,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
}
/*
* a helper for releasepage, this tests for areas of the page that
* a helper for release_folio, this tests for areas of the page that
* are locked or under IO and drops the related state bits if it is safe
* to drop the page.
*/
@ -5342,7 +5343,7 @@ static int try_release_extent_state(struct extent_io_tree *tree,
}
/*
* a helper for releasepage. As long as there are no locked extents
* a helper for release_folio. As long as there are no locked extents
* in the range corresponding to the page, both state records and extent
* map records are removed
*/
@ -6042,10 +6043,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
*
* It is only cleared in two cases: freeing the last non-tree
* reference to the extent_buffer when its STALE bit is set or
* calling releasepage when the tree reference is the only reference.
* calling release_folio when the tree reference is the only reference.
*
* In both cases, care is taken to ensure that the extent_buffer's
* pages are not under io. However, releasepage can be concurrently
* pages are not under io. However, release_folio can be concurrently
* called with creating new references, which is prone to race
* conditions between the calls to check_buffer_tree_ref in those
* codepaths and clearing TREE_REF in try_release_extent_buffer.
@ -6310,7 +6311,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
/*
* We can't unlock the pages just yet since the extent buffer
* hasn't been properly inserted in the radix tree, this
* opens a race with btree_releasepage which can free a page
* opens a race with btree_release_folio which can free a page
* while we are still filling in all pages for the buffer and
* we could crash.
*/
@ -6339,7 +6340,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
/*
* Now it's safe to unlock the pages because any calls to
* btree_releasepage will correctly detect that a page belongs to a
* btree_release_folio will correctly detect that a page belongs to a
* live buffer and won't free them prematurely.
*/
for (i = 0; i < num_pages; i++)
@ -6721,7 +6722,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
eb->read_mirror = 0;
atomic_set(&eb->io_pages, num_reads);
/*
* It is possible for releasepage to clear the TREE_REF bit before we
* It is possible for release_folio to clear the TREE_REF bit before we
* set io_pages. See check_buffer_tree_ref for a more detailed comment.
*/
check_buffer_tree_ref(eb);

View File

@ -149,7 +149,7 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
int btrfs_readpage(struct file *file, struct page *page);
int btrfs_read_folio(struct file *file, struct folio *folio);
int extent_write_full_page(struct page *page, struct writeback_control *wbc);
int extent_write_locked_range(struct inode *inode, u64 start, u64 end);
int extent_writepages(struct address_space *mapping,

View File

@ -1307,11 +1307,12 @@ static int prepare_uptodate_page(struct inode *inode,
struct page *page, u64 pos,
bool force_uptodate)
{
struct folio *folio = page_folio(page);
int ret = 0;
if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
ret = btrfs_read_folio(NULL, folio);
if (ret)
return ret;
lock_page(page);
@ -1321,8 +1322,8 @@ static int prepare_uptodate_page(struct inode *inode,
}
/*
* Since btrfs_readpage() will unlock the page before it
* returns, there is a window where btrfs_releasepage() can be
* Since btrfs_read_folio() will unlock the folio before it
* returns, there is a window where btrfs_release_folio() can be
* called to release the page. Here we check both inode
* mapping and PagePrivate() to make sure the page was not
* released.
@ -2364,7 +2365,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct address_space *mapping = filp->f_mapping;
if (!mapping->a_ops->readpage)
if (!mapping->a_ops->read_folio)
return -ENOEXEC;
file_accessed(filp);

View File

@ -465,7 +465,7 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
io_ctl->pages[i] = page;
if (uptodate && !PageUptodate(page)) {
btrfs_readpage(NULL, page);
btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (page->mapping != inode->i_mapping) {
btrfs_err(BTRFS_I(inode)->root->fs_info,

View File

@ -4809,7 +4809,7 @@ again:
goto out_unlock;
if (!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
ret = btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (page->mapping != mapping) {
unlock_page(page);
@ -8204,7 +8204,7 @@ static void btrfs_readahead(struct readahead_control *rac)
}
/*
* For releasepage() and invalidate_folio() we have a race window where
* For release_folio() and invalidate_folio() we have a race window where
* folio_end_writeback() is called but the subpage spinlock is not yet released.
* If we continue to release/invalidate the page, we could cause use-after-free
* for subpage spinlock. So this function is to spin and wait for subpage
@ -8236,22 +8236,22 @@ static void wait_subpage_spinlock(struct page *page)
spin_unlock_irq(&subpage->lock);
}
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
int ret = try_release_extent_mapping(page, gfp_flags);
int ret = try_release_extent_mapping(&folio->page, gfp_flags);
if (ret == 1) {
wait_subpage_spinlock(page);
clear_page_extent_mapped(page);
wait_subpage_spinlock(&folio->page);
clear_page_extent_mapped(&folio->page);
}
return ret;
}
static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
if (PageWriteback(page) || PageDirty(page))
return 0;
return __btrfs_releasepage(page, gfp_flags);
if (folio_test_writeback(folio) || folio_test_dirty(folio))
return false;
return __btrfs_release_folio(folio, gfp_flags);
}
#ifdef CONFIG_MIGRATION
@ -8322,7 +8322,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
* still safe to wait for ordered extent to finish.
*/
if (!(offset == 0 && length == folio_size(folio))) {
btrfs_releasepage(&folio->page, GFP_NOFS);
btrfs_release_folio(folio, GFP_NOFS);
return;
}
@ -8446,7 +8446,7 @@ next:
ASSERT(!folio_test_ordered(folio));
btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
if (!inode_evicting)
__btrfs_releasepage(&folio->page, GFP_NOFS);
__btrfs_release_folio(folio, GFP_NOFS);
clear_page_extent_mapped(&folio->page);
}
@ -11415,13 +11415,13 @@ static const struct file_operations btrfs_dir_file_operations = {
* For now we're avoiding this by dropping bmap.
*/
static const struct address_space_operations btrfs_aops = {
.readpage = btrfs_readpage,
.read_folio = btrfs_read_folio,
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readahead = btrfs_readahead,
.direct_IO = noop_direct_IO,
.invalidate_folio = btrfs_invalidate_folio,
.releasepage = btrfs_releasepage,
.release_folio = btrfs_release_folio,
#ifdef CONFIG_MIGRATION
.migratepage = btrfs_migratepage,
#endif

View File

@ -1358,7 +1358,7 @@ again:
* make it uptodate.
*/
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (page->mapping != mapping || !PagePrivate(page)) {
unlock_page(page);

View File

@ -1101,7 +1101,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
continue;
/*
* if we are modifying block in fs tree, wait for readpage
* if we are modifying block in fs tree, wait for read_folio
* to complete and drop the extent cache
*/
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
@ -1563,7 +1563,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
end = (u64)-1;
}
/* the lock_extent waits for readpage to complete */
/* the lock_extent waits for read_folio to complete */
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
@ -2818,7 +2818,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
* Subpage can't handle page with DIRTY but without UPTODATE
* bit as it can lead to the following deadlock:
*
* btrfs_readpage()
* btrfs_read_folio()
* | Page already *locked*
* |- btrfs_lock_and_flush_ordered_range()
* |- btrfs_start_ordered_extent()
@ -2967,11 +2967,12 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
goto release_page;
if (PageReadahead(page))
page_cache_async_readahead(inode->i_mapping, ra, NULL, page,
page_index, last_index + 1 - page_index);
page_cache_async_readahead(inode->i_mapping, ra, NULL,
page_folio(page), page_index,
last_index + 1 - page_index);
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (!PageUptodate(page)) {
ret = -EIO;

View File

@ -4907,11 +4907,11 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
if (PageReadahead(page))
page_cache_async_readahead(sctx->cur_inode->i_mapping,
&sctx->ra, NULL, page, index,
last_index + 1 - index);
&sctx->ra, NULL, page_folio(page),
index, last_index + 1 - index);
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
btrfs_read_folio(NULL, page_folio(page));
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);

View File

@ -79,26 +79,26 @@ void unlock_buffer(struct buffer_head *bh)
EXPORT_SYMBOL(unlock_buffer);
/*
* Returns if the page has dirty or writeback buffers. If all the buffers
* are unlocked and clean then the PageDirty information is stale. If
* any of the pages are locked, it is assumed they are locked for IO.
* Returns if the folio has dirty or writeback buffers. If all the buffers
* are unlocked and clean then the folio_test_dirty information is stale. If
* any of the buffers are locked, it is assumed they are locked for IO.
*/
void buffer_check_dirty_writeback(struct page *page,
void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct buffer_head *head, *bh;
*dirty = false;
*writeback = false;
BUG_ON(!PageLocked(page));
BUG_ON(!folio_test_locked(folio));
if (!page_has_buffers(page))
head = folio_buffers(folio);
if (!head)
return;
if (PageWriteback(page))
if (folio_test_writeback(folio))
*writeback = true;
head = page_buffers(page);
bh = head;
do {
if (buffer_locked(bh))
@ -314,7 +314,7 @@ static void decrypt_bh(struct work_struct *work)
}
/*
* I/O completion handler for block_read_full_page() - pages
* I/O completion handler for block_read_full_folio() - pages
* which come unlocked at the end of I/O.
*/
static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
@ -955,7 +955,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
size);
goto done;
}
if (!try_to_free_buffers(page))
if (!try_to_free_buffers(page_folio(page)))
goto failed;
}
@ -1060,8 +1060,8 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* Also. When blockdev buffers are explicitly read with bread(), they
* individually become uptodate. But their backing page remains not
* uptodate - even if all of its buffers are uptodate. A subsequent
* block_read_full_page() against that page will discover all the uptodate
* buffers, will set the page uptodate and will perform no I/O.
* block_read_full_folio() against that folio will discover all the uptodate
* buffers, will set the folio uptodate and will perform no I/O.
*/
/**
@ -2088,7 +2088,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
/*
* If this is a partial write which happened to make all buffers
* uptodate then we can optimize away a bogus readpage() for
* uptodate then we can optimize away a bogus read_folio() for
* the next read(). Here we 'discover' whether the page went
* uptodate as a result of this (potentially partial) write.
*/
@ -2104,13 +2104,13 @@ static int __block_commit_write(struct inode *inode, struct page *page,
* The filesystem needs to handle block truncation upon failure.
*/
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
unsigned flags, struct page **pagep, get_block_t *get_block)
struct page **pagep, get_block_t *get_block)
{
pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int status;
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
@ -2137,12 +2137,12 @@ int block_write_end(struct file *file, struct address_space *mapping,
if (unlikely(copied < len)) {
/*
* The buffers that were written will now be uptodate, so we
* don't have to worry about a readpage reading them and
* overwriting a partial write. However if we have encountered
* a short write and only partially written into a buffer, it
* will not be marked uptodate, so a readpage might come in and
* destroy our partial write.
* The buffers that were written will now be uptodate, so
* we don't have to worry about a read_folio reading them
* and overwriting a partial write. However if we have
* encountered a short write and only partially written
* into a buffer, it will not be marked uptodate, so a
* read_folio might come in and destroy our partial write.
*
* Do the simplest thing, and just treat any short write to a
* non uptodate page as a zero-length write, and force the
@ -2245,26 +2245,28 @@ bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
EXPORT_SYMBOL(block_is_partially_uptodate);
/*
* Generic "read page" function for block devices that have the normal
* Generic "read_folio" function for block devices that have the normal
* get_block functionality. This is most of the block device filesystems.
* Reads the page asynchronously --- the unlock_buffer() and
* Reads the folio asynchronously --- the unlock_buffer() and
* set/clear_buffer_uptodate() functions propagate buffer state into the
* page struct once IO has completed.
* folio once IO has completed.
*/
int block_read_full_page(struct page *page, get_block_t *get_block)
int block_read_full_folio(struct folio *folio, get_block_t *get_block)
{
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
sector_t iblock, lblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
unsigned int blocksize, bbits;
int nr, i;
int fully_mapped = 1;
head = create_page_buffers(page, inode, 0);
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
head = create_page_buffers(&folio->page, inode, 0);
blocksize = head->b_size;
bbits = block_size_bits(blocksize);
iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
lblock = (i_size_read(inode)+blocksize-1) >> bbits;
bh = head;
nr = 0;
@ -2282,10 +2284,11 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
SetPageError(page);
folio_set_error(folio);
}
if (!buffer_mapped(bh)) {
zero_user(page, i * blocksize, blocksize);
folio_zero_range(folio, i * blocksize,
blocksize);
if (!err)
set_buffer_uptodate(bh);
continue;
@ -2301,16 +2304,16 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
} while (i++, iblock++, (bh = bh->b_this_page) != head);
if (fully_mapped)
SetPageMappedToDisk(page);
folio_set_mappedtodisk(folio);
if (!nr) {
/*
* All buffers are uptodate - we can set the page uptodate
* All buffers are uptodate - we can set the folio uptodate
* as well. But not if get_block() returned an error.
*/
if (!PageError(page))
SetPageUptodate(page);
unlock_page(page);
if (!folio_test_error(folio))
folio_mark_uptodate(folio);
folio_unlock(folio);
return 0;
}
@ -2335,7 +2338,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
}
return 0;
}
EXPORT_SYMBOL(block_read_full_page);
EXPORT_SYMBOL(block_read_full_folio);
/* utility function for filesystems that need to do work on expanding
* truncates. Uses filesystem pagecache writes to allow the filesystem to
@ -2344,6 +2347,7 @@ EXPORT_SYMBOL(block_read_full_page);
int generic_cont_expand_simple(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
struct page *page;
void *fsdata;
int err;
@ -2352,11 +2356,11 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
if (err)
goto out;
err = pagecache_write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
if (err)
goto out;
err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
BUG_ON(err > 0);
out:
@ -2368,6 +2372,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
loff_t pos, loff_t *bytes)
{
struct inode *inode = mapping->host;
const struct address_space_operations *aops = mapping->a_ops;
unsigned int blocksize = i_blocksize(inode);
struct page *page;
void *fsdata;
@ -2387,12 +2392,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = PAGE_SIZE - zerofrom;
err = pagecache_write_begin(file, mapping, curpos, len, 0,
err = aops->write_begin(file, mapping, curpos, len,
&page, &fsdata);
if (err)
goto out;
zero_user(page, zerofrom, len);
err = pagecache_write_end(file, mapping, curpos, len, len,
err = aops->write_end(file, mapping, curpos, len, len,
page, fsdata);
if (err < 0)
goto out;
@ -2420,12 +2425,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = offset - zerofrom;
err = pagecache_write_begin(file, mapping, curpos, len, 0,
err = aops->write_begin(file, mapping, curpos, len,
&page, &fsdata);
if (err)
goto out;
zero_user(page, zerofrom, len);
err = pagecache_write_end(file, mapping, curpos, len, len,
err = aops->write_end(file, mapping, curpos, len, len,
page, fsdata);
if (err < 0)
goto out;
@ -2441,7 +2446,7 @@ out:
* We may have to extend the file.
*/
int cont_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata,
get_block_t *get_block, loff_t *bytes)
{
@ -2460,7 +2465,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
(*bytes)++;
}
return block_write_begin(mapping, pos, len, flags, pagep, get_block);
return block_write_begin(mapping, pos, len, pagep, get_block);
}
EXPORT_SYMBOL(cont_write_begin);
@ -2568,8 +2573,7 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
* On exit the page is fully uptodate in the areas outside (from,to)
* The filesystem needs to handle block truncation upon failure.
*/
int nobh_write_begin(struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
struct page **pagep, void **fsdata,
get_block_t *get_block)
{
@ -2591,7 +2595,7 @@ int nobh_write_begin(struct address_space *mapping,
from = pos & (PAGE_SIZE - 1);
to = from + len;
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
@ -2790,44 +2794,28 @@ int nobh_truncate_page(struct address_space *mapping,
loff_t from, get_block_t *get_block)
{
pgoff_t index = from >> PAGE_SHIFT;
unsigned offset = from & (PAGE_SIZE-1);
unsigned blocksize;
sector_t iblock;
unsigned length, pos;
struct inode *inode = mapping->host;
struct page *page;
unsigned blocksize = i_blocksize(inode);
struct folio *folio;
struct buffer_head map_bh;
size_t offset;
sector_t iblock;
int err;
blocksize = i_blocksize(inode);
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
if (!length)
if (!(from & (blocksize - 1)))
return 0;
length = blocksize - length;
iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_CREAT,
mapping_gfp_mask(mapping));
err = -ENOMEM;
if (!page)
if (!folio)
goto out;
if (page_has_buffers(page)) {
has_buffers:
unlock_page(page);
put_page(page);
return block_truncate_page(mapping, from, get_block);
}
/* Find the buffer that contains "offset" */
pos = blocksize;
while (offset >= pos) {
iblock++;
pos += blocksize;
}
if (folio_buffers(folio))
goto has_buffers;
iblock = from >> inode->i_blkbits;
map_bh.b_size = blocksize;
map_bh.b_state = 0;
err = get_block(inode, iblock, &map_bh, 0);
@ -2838,29 +2826,35 @@ has_buffers:
goto unlock;
/* Ok, it's mapped. Make sure it's up-to-date */
if (!PageUptodate(page)) {
err = mapping->a_ops->readpage(NULL, page);
if (!folio_test_uptodate(folio)) {
err = mapping->a_ops->read_folio(NULL, folio);
if (err) {
put_page(page);
folio_put(folio);
goto out;
}
lock_page(page);
if (!PageUptodate(page)) {
folio_lock(folio);
if (!folio_test_uptodate(folio)) {
err = -EIO;
goto unlock;
}
if (page_has_buffers(page))
if (folio_buffers(folio))
goto has_buffers;
}
zero_user(page, offset, length);
set_page_dirty(page);
offset = offset_in_folio(folio, from);
folio_zero_segment(folio, offset, round_up(offset, blocksize));
folio_mark_dirty(folio);
err = 0;
unlock:
unlock_page(page);
put_page(page);
folio_unlock(folio);
folio_put(folio);
out:
return err;
has_buffers:
folio_unlock(folio);
folio_put(folio);
return block_truncate_page(mapping, from, get_block);
}
EXPORT_SYMBOL(nobh_truncate_page);
@ -3161,20 +3155,20 @@ int sync_dirty_buffer(struct buffer_head *bh)
EXPORT_SYMBOL(sync_dirty_buffer);
/*
* try_to_free_buffers() checks if all the buffers on this particular page
* try_to_free_buffers() checks if all the buffers on this particular folio
* are unused, and releases them if so.
*
* Exclusion against try_to_free_buffers may be obtained by either
* locking the page or by holding its mapping's private_lock.
* locking the folio or by holding its mapping's private_lock.
*
* If the page is dirty but all the buffers are clean then we need to
* be sure to mark the page clean as well. This is because the page
* If the folio is dirty but all the buffers are clean then we need to
* be sure to mark the folio clean as well. This is because the folio
* may be against a block device, and a later reattachment of buffers
* to a dirty page will set *all* buffers dirty. Which would corrupt
* to a dirty folio will set *all* buffers dirty. Which would corrupt
* filesystem data on the same device.
*
* The same applies to regular filesystem pages: if all the buffers are
* clean then we set the page clean and proceed. To do that, we require
* The same applies to regular filesystem folios: if all the buffers are
* clean then we set the folio clean and proceed. To do that, we require
* total exclusion from block_dirty_folio(). That is obtained with
* private_lock.
*
@ -3186,10 +3180,10 @@ static inline int buffer_busy(struct buffer_head *bh)
(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
}
static int
drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
static bool
drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
{
struct buffer_head *head = page_buffers(page);
struct buffer_head *head = folio_buffers(folio);
struct buffer_head *bh;
bh = head;
@ -3207,46 +3201,46 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh = next;
} while (bh != head);
*buffers_to_free = head;
detach_page_private(page);
return 1;
folio_detach_private(folio);
return true;
failed:
return 0;
return false;
}
int try_to_free_buffers(struct page *page)
bool try_to_free_buffers(struct folio *folio)
{
struct address_space * const mapping = page->mapping;
struct address_space * const mapping = folio->mapping;
struct buffer_head *buffers_to_free = NULL;
int ret = 0;
bool ret = 0;
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
BUG_ON(!folio_test_locked(folio));
if (folio_test_writeback(folio))
return false;
if (mapping == NULL) { /* can this still happen? */
ret = drop_buffers(page, &buffers_to_free);
ret = drop_buffers(folio, &buffers_to_free);
goto out;
}
spin_lock(&mapping->private_lock);
ret = drop_buffers(page, &buffers_to_free);
ret = drop_buffers(folio, &buffers_to_free);
/*
* If the filesystem writes its buffers by hand (eg ext3)
* then we can have clean buffers against a dirty page. We
* clean the page here; otherwise the VM will never notice
* then we can have clean buffers against a dirty folio. We
* clean the folio here; otherwise the VM will never notice
* that the filesystem did any IO at all.
*
* Also, during truncate, discard_buffer will have marked all
* the page's buffers clean. We discover that here and clean
* the page also.
* the folio's buffers clean. We discover that here and clean
* the folio also.
*
* private_lock must be held over this entire operation in order
* to synchronise against block_dirty_folio and prevent the
* dirty bit from being lost.
*/
if (ret)
cancel_dirty_page(page);
folio_cancel_dirty(folio);
spin_unlock(&mapping->private_lock);
out:
if (buffers_to_free) {

View File

@ -162,24 +162,24 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
folio_wait_fscache(folio);
}
static int ceph_releasepage(struct page *page, gfp_t gfp)
static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
{
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
dout("%llx:%llx releasepage %p idx %lu (%sdirty)\n",
ceph_vinop(inode), page,
page->index, PageDirty(page) ? "" : "not ");
dout("%llx:%llx release_folio idx %lu (%sdirty)\n",
ceph_vinop(inode),
folio->index, folio_test_dirty(folio) ? "" : "not ");
if (PagePrivate(page))
return 0;
if (folio_test_private(folio))
return false;
if (PageFsCache(page)) {
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return 0;
wait_on_page_fscache(page);
return false;
folio_wait_fscache(folio);
}
ceph_fscache_note_page_release(inode);
return 1;
return true;
}
static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
@ -1314,14 +1314,14 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
* clean, or already dirty within the same snap context.
*/
static int ceph_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned aop_flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct inode *inode = file_inode(file);
struct folio *folio = NULL;
int r;
r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL);
r = netfs_write_begin(file, inode->i_mapping, pos, len, &folio, NULL);
if (r == 0)
folio_wait_fscache(folio);
if (r < 0) {
@ -1375,7 +1375,7 @@ out:
}
const struct address_space_operations ceph_aops = {
.readpage = netfs_readpage,
.read_folio = netfs_read_folio,
.readahead = netfs_readahead,
.writepage = ceph_writepage,
.writepages = ceph_writepages_start,
@ -1383,7 +1383,7 @@ const struct address_space_operations ceph_aops = {
.write_end = ceph_write_end,
.dirty_folio = ceph_dirty_folio,
.invalidate_folio = ceph_invalidate_folio,
.releasepage = ceph_releasepage,
.release_folio = ceph_release_folio,
.direct_IO = noop_direct_IO,
};
@ -1775,7 +1775,7 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
{
struct address_space *mapping = file->f_mapping;
if (!mapping->a_ops->readpage)
if (!mapping->a_ops->read_folio)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &ceph_vmops;

View File

@ -4612,8 +4612,9 @@ read_complete:
return rc;
}
static int cifs_readpage(struct file *file, struct page *page)
static int cifs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
loff_t offset = page_file_offset(page);
int rc = -EACCES;
unsigned int xid;
@ -4626,7 +4627,7 @@ static int cifs_readpage(struct file *file, struct page *page)
return rc;
}
cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n",
page, (int)offset, (int)offset);
rc = cifs_readpage_worker(file, page, &offset);
@ -4681,7 +4682,7 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
}
static int cifs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int oncethru = 0;
@ -4695,7 +4696,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
start:
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page) {
rc = -ENOMEM;
goto out;
@ -4757,16 +4758,16 @@ out:
return rc;
}
static int cifs_release_page(struct page *page, gfp_t gfp)
static bool cifs_release_folio(struct folio *folio, gfp_t gfp)
{
if (PagePrivate(page))
if (folio_test_private(folio))
return 0;
if (PageFsCache(page)) {
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
wait_on_page_fscache(page);
folio_wait_fscache(folio);
}
fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
fscache_note_page_release(cifs_inode_cookie(folio->mapping->host));
return true;
}
@ -4965,14 +4966,14 @@ static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
#endif
const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
.read_folio = cifs_read_folio,
.readahead = cifs_readahead,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.dirty_folio = cifs_dirty_folio,
.releasepage = cifs_release_page,
.release_folio = cifs_release_folio,
.direct_IO = cifs_direct_io,
.invalidate_folio = cifs_invalidate_folio,
.launder_folio = cifs_launder_folio,
@ -4986,18 +4987,18 @@ const struct address_space_operations cifs_addr_ops = {
};
/*
* cifs_readpages requires the server to support a buffer large enough to
* cifs_readahead requires the server to support a buffer large enough to
* contain the header plus one complete page of data. Otherwise, we need
* to leave cifs_readpages out of the address space operations.
* to leave cifs_readahead out of the address space operations.
*/
const struct address_space_operations cifs_addr_ops_smallbuf = {
.readpage = cifs_readpage,
.read_folio = cifs_read_folio,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.dirty_folio = cifs_dirty_folio,
.releasepage = cifs_release_page,
.release_folio = cifs_release_folio,
.invalidate_folio = cifs_invalidate_folio,
.launder_folio = cifs_launder_folio,
};

View File

@ -20,9 +20,10 @@
#include "coda_psdev.h"
#include "coda_linux.h"
static int coda_symlink_filler(struct file *file, struct page *page)
static int coda_symlink_filler(struct file *file, struct folio *folio)
{
struct inode *inode = page->mapping->host;
struct page *page = &folio->page;
struct inode *inode = folio->mapping->host;
int error;
struct coda_inode_info *cii;
unsigned int len = PAGE_SIZE;
@ -44,5 +45,5 @@ fail:
}
const struct address_space_operations coda_symlink_aops = {
.readpage = coda_symlink_filler,
.read_folio = coda_symlink_filler,
};

View File

@ -115,7 +115,7 @@ Block Size
(Block size in cramfs refers to the size of input data that is
compressed at a time. It's intended to be somewhere around
PAGE_SIZE for cramfs_readpage's convenience.)
PAGE_SIZE for cramfs_read_folio's convenience.)
The superblock ought to indicate the block size that the fs was
written for, since comments in <linux/pagemap.h> indicate that
@ -161,7 +161,7 @@ size. The options are:
PAGE_SIZE.
It's easy enough to change the kernel to use a smaller value than
PAGE_SIZE: just make cramfs_readpage read multiple blocks.
PAGE_SIZE: just make cramfs_read_folio read multiple blocks.
The cost of option 1 is that kernels with a larger PAGE_SIZE
value don't get as good compression as they can.
@ -173,9 +173,9 @@ they don't mind their cramfs being inaccessible to kernels with
smaller PAGE_SIZE values.
Option 3 is easy to implement if we don't mind being CPU-inefficient:
e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
e.g. get read_folio to decompress to a buffer of size MAX_BLKSIZE (which
must be no larger than 32KB) and discard what it doesn't need.
Getting readpage to read into all the covered pages is harder.
Getting read_folio to read into all the covered pages is harder.
The main advantage of option 3 over 1, 2, is better compression. The
cost is greater complexity. Probably not worth it, but I hope someone

View File

@ -414,7 +414,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
/*
* Let's create a mixed map if we can't map it all.
* The normal paging machinery will take care of the
* unpopulated ptes via cramfs_readpage().
* unpopulated ptes via cramfs_read_folio().
*/
int i;
vma->vm_flags |= VM_MIXEDMAP;
@ -814,8 +814,9 @@ out:
return d_splice_alias(inode, dentry);
}
static int cramfs_readpage(struct file *file, struct page *page)
static int cramfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
u32 maxblock;
int bytes_filled;
@ -925,7 +926,7 @@ err:
}
static const struct address_space_operations cramfs_aops = {
.readpage = cramfs_readpage
.read_folio = cramfs_read_folio
};
/*

View File

@ -170,16 +170,17 @@ out:
}
/**
* ecryptfs_readpage
* ecryptfs_read_folio
* @file: An eCryptfs file
* @page: Page from eCryptfs inode mapping into which to stick the read data
* @folio: Folio from eCryptfs inode mapping into which to stick the read data
*
* Read in a page, decrypting if necessary.
* Read in a folio, decrypting if necessary.
*
* Returns zero on success; non-zero on error.
*/
static int ecryptfs_readpage(struct file *file, struct page *page)
static int ecryptfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
int rc = 0;
@ -264,7 +265,7 @@ out:
*/
static int ecryptfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
@ -272,7 +273,7 @@ static int ecryptfs_write_begin(struct file *file,
loff_t prev_page_end_size;
int rc = 0;
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
@ -549,7 +550,7 @@ const struct address_space_operations ecryptfs_aops = {
.invalidate_folio = block_invalidate_folio,
#endif
.writepage = ecryptfs_writepage,
.readpage = ecryptfs_readpage,
.read_folio = ecryptfs_read_folio,
.write_begin = ecryptfs_write_begin,
.write_end = ecryptfs_write_end,
.bmap = ecryptfs_bmap,

View File

@ -14,16 +14,18 @@
#include "efs.h"
#include <linux/efs_fs_sb.h>
static int efs_readpage(struct file *file, struct page *page)
static int efs_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page,efs_get_block);
return block_read_full_folio(folio, efs_get_block);
}
static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,efs_get_block);
}
static const struct address_space_operations efs_aops = {
.readpage = efs_readpage,
.read_folio = efs_read_folio,
.bmap = _efs_bmap
};

View File

@ -12,8 +12,9 @@
#include <linux/buffer_head.h>
#include "efs.h"
static int efs_symlink_readpage(struct file *file, struct page *page)
static int efs_symlink_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
char *link = page_address(page);
struct buffer_head * bh;
struct inode * inode = page->mapping->host;
@ -49,5 +50,5 @@ fail:
}
const struct address_space_operations efs_symlink_aops = {
.readpage = efs_symlink_readpage
.read_folio = efs_symlink_read_folio
};

View File

@ -351,9 +351,9 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
* since we dont have write or truncate flows, so no inode
* locking needs to be held at the moment.
*/
static int erofs_readpage(struct file *file, struct page *page)
static int erofs_read_folio(struct file *file, struct folio *folio)
{
return iomap_readpage(page, &erofs_iomap_ops);
return iomap_read_folio(folio, &erofs_iomap_ops);
}
static void erofs_readahead(struct readahead_control *rac)
@ -408,7 +408,7 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
/* for uncompressed (aligned) files and raw access for other files */
const struct address_space_operations erofs_raw_access_aops = {
.readpage = erofs_readpage,
.read_folio = erofs_read_folio,
.readahead = erofs_readahead,
.bmap = erofs_bmap,
.direct_IO = noop_direct_IO,

View File

@ -205,10 +205,9 @@ out:
return ret;
}
static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
{
int ret;
struct folio *folio = page_folio(page);
struct super_block *sb = folio_mapping(folio)->host->i_sb;
struct netfs_io_request *rreq;
struct erofs_map_dev mdev = {
@ -232,7 +231,7 @@ out:
return ret;
}
static int erofs_fscache_readpage_inline(struct folio *folio,
static int erofs_fscache_read_folio_inline(struct folio *folio,
struct erofs_map_blocks *map)
{
struct super_block *sb = folio_mapping(folio)->host->i_sb;
@ -259,9 +258,8 @@ static int erofs_fscache_readpage_inline(struct folio *folio,
return 0;
}
static int erofs_fscache_readpage(struct file *file, struct page *page)
static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
{
struct folio *folio = page_folio(page);
struct inode *inode = folio_mapping(folio)->host;
struct super_block *sb = inode->i_sb;
struct erofs_map_blocks map;
@ -286,7 +284,7 @@ static int erofs_fscache_readpage(struct file *file, struct page *page)
}
if (map.m_flags & EROFS_MAP_META) {
ret = erofs_fscache_readpage_inline(folio, &map);
ret = erofs_fscache_read_folio_inline(folio, &map);
goto out_uptodate;
}
@ -376,7 +374,7 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
if (map.m_flags & EROFS_MAP_META) {
struct folio *folio = readahead_folio(rac);
ret = erofs_fscache_readpage_inline(folio, &map);
ret = erofs_fscache_read_folio_inline(folio, &map);
if (!ret) {
folio_mark_uptodate(folio);
ret = folio_size(folio);
@ -410,11 +408,11 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
}
static const struct address_space_operations erofs_fscache_meta_aops = {
.readpage = erofs_fscache_meta_readpage,
.read_folio = erofs_fscache_meta_read_folio,
};
const struct address_space_operations erofs_fscache_access_aops = {
.readpage = erofs_fscache_readpage,
.read_folio = erofs_fscache_read_folio,
.readahead = erofs_fscache_readahead,
};

View File

@ -578,16 +578,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
#ifdef CONFIG_EROFS_FS_ZIP
static const struct address_space_operations managed_cache_aops;
static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
static bool erofs_managed_cache_release_folio(struct folio *folio, gfp_t gfp)
{
int ret = 1; /* 0 - busy */
struct address_space *const mapping = page->mapping;
bool ret = true;
struct address_space *const mapping = folio->mapping;
DBG_BUGON(!PageLocked(page));
DBG_BUGON(!folio_test_locked(folio));
DBG_BUGON(mapping->a_ops != &managed_cache_aops);
if (PagePrivate(page))
ret = erofs_try_to_free_cached_page(page);
if (folio_test_private(folio))
ret = erofs_try_to_free_cached_page(&folio->page);
return ret;
}
@ -608,12 +608,12 @@ static void erofs_managed_cache_invalidate_folio(struct folio *folio,
DBG_BUGON(stop > folio_size(folio) || stop < length);
if (offset == 0 && stop == folio_size(folio))
while (!erofs_managed_cache_releasepage(&folio->page, GFP_NOFS))
while (!erofs_managed_cache_release_folio(folio, GFP_NOFS))
cond_resched();
}
static const struct address_space_operations managed_cache_aops = {
.releasepage = erofs_managed_cache_releasepage,
.release_folio = erofs_managed_cache_release_folio,
.invalidate_folio = erofs_managed_cache_invalidate_folio,
};

View File

@ -791,7 +791,7 @@ err_out:
static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
unsigned int readahead_pages)
{
/* auto: enable for readpage, disable for readahead */
/* auto: enable for read_folio, disable for readahead */
if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
!readahead_pages)
return true;
@ -1488,8 +1488,9 @@ skip:
}
}
static int z_erofs_readpage(struct file *file, struct page *page)
static int z_erofs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *const inode = page->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
@ -1563,6 +1564,6 @@ static void z_erofs_readahead(struct readahead_control *rac)
}
const struct address_space_operations z_erofs_aops = {
.readpage = z_erofs_readpage,
.read_folio = z_erofs_read_folio,
.readahead = z_erofs_readahead,
};

View File

@ -357,9 +357,9 @@ unlock_ret:
return err;
}
static int exfat_readpage(struct file *file, struct page *page)
static int exfat_read_folio(struct file *file, struct folio *folio)
{
return mpage_readpage(page, exfat_get_block);
return mpage_read_folio(folio, exfat_get_block);
}
static void exfat_readahead(struct readahead_control *rac)
@ -389,13 +389,13 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
}
static int exfat_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int flags,
loff_t pos, unsigned int len,
struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
exfat_get_block,
&EXFAT_I(mapping->host)->i_size_ondisk);
@ -492,7 +492,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
static const struct address_space_operations exfat_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = exfat_readpage,
.read_folio = exfat_read_folio,
.readahead = exfat_readahead,
.writepage = exfat_writepage,
.writepages = exfat_writepages,

View File

@ -875,9 +875,9 @@ static int ext2_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, ext2_get_block, wbc);
}
static int ext2_readpage(struct file *file, struct page *page)
static int ext2_read_folio(struct file *file, struct folio *folio)
{
return mpage_readpage(page, ext2_get_block);
return mpage_read_folio(folio, ext2_get_block);
}
static void ext2_readahead(struct readahead_control *rac)
@ -887,13 +887,11 @@ static void ext2_readahead(struct readahead_control *rac)
static int
ext2_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
int ret;
ret = block_write_begin(mapping, pos, len, flags, pagep,
ext2_get_block);
ret = block_write_begin(mapping, pos, len, pagep, ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
return ret;
@ -913,12 +911,11 @@ static int ext2_write_end(struct file *file, struct address_space *mapping,
static int
ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
int ret;
ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
ret = nobh_write_begin(mapping, pos, len, pagep, fsdata,
ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
@ -969,7 +966,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
const struct address_space_operations ext2_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = ext2_readpage,
.read_folio = ext2_read_folio,
.readahead = ext2_readahead,
.writepage = ext2_writepage,
.write_begin = ext2_write_begin,
@ -985,7 +982,7 @@ const struct address_space_operations ext2_aops = {
const struct address_space_operations ext2_nobh_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = ext2_readpage,
.read_folio = ext2_read_folio,
.readahead = ext2_readahead,
.writepage = ext2_nobh_writepage,
.write_begin = ext2_nobh_write_begin,

View File

@ -3539,7 +3539,6 @@ extern int ext4_readpage_inline(struct inode *inode, struct page *page);
extern int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
unsigned flags,
struct page **pagep);
extern int ext4_write_inline_data_end(struct inode *inode,
loff_t pos, unsigned len,
@ -3552,7 +3551,6 @@ ext4_journalled_write_inline_data(struct inode *inode,
extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
unsigned flags,
struct page **pagep,
void **fsdata);
extern int ext4_try_add_inline_entry(handle_t *handle,

View File

@ -527,13 +527,13 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
}
static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
struct inode *inode,
unsigned flags)
struct inode *inode)
{
int ret, needed_blocks, no_expand;
handle_t *handle = NULL;
int retries = 0, sem_held = 0;
struct page *page = NULL;
unsigned int flags;
unsigned from, to;
struct ext4_iloc iloc;
@ -562,9 +562,9 @@ retry:
/* We cannot recurse into the filesystem as the transaction is already
* started */
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, 0, flags);
flags = memalloc_nofs_save();
page = grab_cache_page_write_begin(mapping, 0);
memalloc_nofs_restore(flags);
if (!page) {
ret = -ENOMEM;
goto out;
@ -649,11 +649,11 @@ out:
int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
unsigned flags,
struct page **pagep)
{
int ret;
handle_t *handle;
unsigned int flags;
struct page *page;
struct ext4_iloc iloc;
@ -691,9 +691,9 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
if (ret)
goto out;
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, 0, flags);
flags = memalloc_nofs_save();
page = grab_cache_page_write_begin(mapping, 0);
memalloc_nofs_restore(flags);
if (!page) {
ret = -ENOMEM;
goto out;
@ -727,8 +727,7 @@ out:
brelse(iloc.bh);
return ret;
convert:
return ext4_convert_inline_data_to_extent(mapping,
inode, flags);
return ext4_convert_inline_data_to_extent(mapping, inode);
}
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
@ -848,13 +847,12 @@ ext4_journalled_write_inline_data(struct inode *inode,
*/
static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
struct inode *inode,
unsigned flags,
void **fsdata)
{
int ret = 0, inline_size;
struct page *page;
page = grab_cache_page_write_begin(mapping, 0, flags);
page = grab_cache_page_write_begin(mapping, 0);
if (!page)
return -ENOMEM;
@ -907,7 +905,6 @@ out:
int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct inode *inode,
loff_t pos, unsigned len,
unsigned flags,
struct page **pagep,
void **fsdata)
{
@ -916,6 +913,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct page *page;
struct ext4_iloc iloc;
int retries = 0;
unsigned int flags;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@ -932,17 +930,10 @@ retry_journal:
if (ret && ret != -ENOSPC)
goto out_journal;
/*
* We cannot recurse into the filesystem as the transaction
* is already started.
*/
flags |= AOP_FLAG_NOFS;
if (ret == -ENOSPC) {
ext4_journal_stop(handle);
ret = ext4_da_convert_inline_data_to_extent(mapping,
inode,
flags,
fsdata);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
@ -950,7 +941,13 @@ retry_journal:
goto out;
}
page = grab_cache_page_write_begin(mapping, 0, flags);
/*
* We cannot recurse into the filesystem as the transaction
* is already started.
*/
flags = memalloc_nofs_save();
page = grab_cache_page_write_begin(mapping, 0);
memalloc_nofs_restore(flags);
if (!page) {
ret = -ENOMEM;
goto out_journal;

View File

@ -1142,7 +1142,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
#endif
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
@ -1156,7 +1156,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
trace_ext4_write_begin(inode, pos, len, flags);
trace_ext4_write_begin(inode, pos, len);
/*
* Reserve one block more for addition to orphan list in case
* we allocate blocks but write fails for some reason
@ -1168,7 +1168,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
flags, pagep);
pagep);
if (ret < 0)
return ret;
if (ret == 1)
@ -1183,7 +1183,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
* the page (if needed) without using GFP_NOFS.
*/
retry_grab:
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
unlock_page(page);
@ -2943,7 +2943,7 @@ static int ext4_nonda_switch(struct super_block *sb)
}
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret, retries = 0;
@ -2959,14 +2959,13 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos,
len, flags, pagep, fsdata);
len, pagep, fsdata);
}
*fsdata = (void *)0;
trace_ext4_da_write_begin(inode, pos, len, flags);
trace_ext4_da_write_begin(inode, pos, len);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_da_write_inline_data_begin(mapping, inode,
pos, len, flags,
ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
pagep, fsdata);
if (ret < 0)
return ret;
@ -2975,7 +2974,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
}
retry:
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
@ -3192,8 +3191,9 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
return iomap_bmap(mapping, block, &ext4_iomap_ops);
}
static int ext4_readpage(struct file *file, struct page *page)
static int ext4_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
int ret = -EAGAIN;
struct inode *inode = page->mapping->host;
@ -3254,19 +3254,19 @@ static void ext4_journalled_invalidate_folio(struct folio *folio,
WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
}
static int ext4_releasepage(struct page *page, gfp_t wait)
static bool ext4_release_folio(struct folio *folio, gfp_t wait)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
trace_ext4_releasepage(page);
trace_ext4_releasepage(&folio->page);
/* Page has dirty journalled data -> cannot release */
if (PageChecked(page))
return 0;
if (folio_test_checked(folio))
return false;
if (journal)
return jbd2_journal_try_to_free_buffers(journal, page);
return jbd2_journal_try_to_free_buffers(journal, folio);
else
return try_to_free_buffers(page);
return try_to_free_buffers(folio);
}
static bool ext4_inode_datasync_dirty(struct inode *inode)
@ -3620,7 +3620,7 @@ static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
}
static const struct address_space_operations ext4_aops = {
.readpage = ext4_readpage,
.read_folio = ext4_read_folio,
.readahead = ext4_readahead,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
@ -3629,7 +3629,7 @@ static const struct address_space_operations ext4_aops = {
.dirty_folio = ext4_dirty_folio,
.bmap = ext4_bmap,
.invalidate_folio = ext4_invalidate_folio,
.releasepage = ext4_releasepage,
.release_folio = ext4_release_folio,
.direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
@ -3638,7 +3638,7 @@ static const struct address_space_operations ext4_aops = {
};
static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage,
.read_folio = ext4_read_folio,
.readahead = ext4_readahead,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
@ -3647,7 +3647,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.dirty_folio = ext4_journalled_dirty_folio,
.bmap = ext4_bmap,
.invalidate_folio = ext4_journalled_invalidate_folio,
.releasepage = ext4_releasepage,
.release_folio = ext4_release_folio,
.direct_IO = noop_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@ -3655,7 +3655,7 @@ static const struct address_space_operations ext4_journalled_aops = {
};
static const struct address_space_operations ext4_da_aops = {
.readpage = ext4_readpage,
.read_folio = ext4_read_folio,
.readahead = ext4_readahead,
.writepage = ext4_writepage,
.writepages = ext4_writepages,
@ -3664,7 +3664,7 @@ static const struct address_space_operations ext4_da_aops = {
.dirty_folio = ext4_dirty_folio,
.bmap = ext4_bmap,
.invalidate_folio = ext4_invalidate_folio,
.releasepage = ext4_releasepage,
.release_folio = ext4_release_folio,
.direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,

View File

@ -8,6 +8,7 @@
#include <linux/fs.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "ext4_extents.h"
@ -127,7 +128,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
pgoff_t index1, pgoff_t index2, struct page *page[2])
{
struct address_space *mapping[2];
unsigned fl = AOP_FLAG_NOFS;
unsigned int flags;
BUG_ON(!inode1 || !inode2);
if (inode1 < inode2) {
@ -139,11 +140,15 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
mapping[1] = inode1->i_mapping;
}
page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
if (!page[0])
flags = memalloc_nofs_save();
page[0] = grab_cache_page_write_begin(mapping[0], index1);
if (!page[0]) {
memalloc_nofs_restore(flags);
return -ENOMEM;
}
page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
page[1] = grab_cache_page_write_begin(mapping[1], index2);
memalloc_nofs_restore(flags);
if (!page[1]) {
unlock_page(page[0]);
put_page(page[0]);
@ -664,8 +669,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
* Up semaphore to avoid following problems:
* a. transaction deadlock among ext4_journal_start,
* ->write_begin via pagefault, and jbd2_journal_commit
* b. racing with ->readpage, ->write_begin, and ext4_get_block
* in move_extent_per_page
* b. racing with ->read_folio, ->write_begin, and
* ext4_get_block in move_extent_per_page
*/
ext4_double_up_write_data_sem(orig_inode, donor_inode);
/* Swap original branches with new branches */

View File

@ -163,7 +163,7 @@ static bool bio_post_read_required(struct bio *bio)
*
* The mpage code never puts partial pages into a BIO (except for end-of-file).
* If a page does not map to a contiguous run of blocks then it simply falls
* back to block_read_full_page().
* back to block_read_full_folio().
*
* Why is this? If a page's completion depends on a number of different BIOs
* which can complete in any order (or at the same time) then determining the
@ -394,7 +394,7 @@ int ext4_mpage_readpages(struct inode *inode,
bio = NULL;
}
if (!PageUptodate(page))
block_read_full_page(page, ext4_get_block);
block_read_full_folio(page_folio(page), ext4_get_block);
else
unlock_page(page);
next_page:

View File

@ -69,6 +69,9 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
static int pagecache_write(struct inode *inode, const void *buf, size_t count,
loff_t pos)
{
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
if (pos + count > inode->i_sb->s_maxbytes)
return -EFBIG;
@ -79,15 +82,13 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
void *fsdata;
int res;
res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
&page, &fsdata);
res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
if (res)
return res;
memcpy_to_page(page, offset_in_page(pos), buf, n);
res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
page, fsdata);
res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata);
if (res < 0)
return res;
if (res != n)

View File

@ -468,7 +468,7 @@ const struct address_space_operations f2fs_meta_aops = {
.writepages = f2fs_write_meta_pages,
.dirty_folio = f2fs_dirty_meta_folio,
.invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page,
.release_folio = f2fs_release_folio,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif

View File

@ -1746,7 +1746,7 @@ unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
}
const struct address_space_operations f2fs_compress_aops = {
.releasepage = f2fs_release_page,
.release_folio = f2fs_release_folio,
.invalidate_folio = f2fs_invalidate_folio,
};

View File

@ -2372,8 +2372,9 @@ next_page:
return ret;
}
static int f2fs_read_data_page(struct file *file, struct page *page)
static int f2fs_read_data_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page_file_mapping(page)->host;
int ret = -EAGAIN;
@ -3314,8 +3315,7 @@ unlock_out:
}
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@ -3325,7 +3325,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
block_t blkaddr = NULL_ADDR;
int err = 0;
trace_f2fs_write_begin(inode, pos, len, flags);
trace_f2fs_write_begin(inode, pos, len);
if (!f2fs_is_checkpoint_ready(sbi)) {
err = -ENOSPC;
@ -3528,28 +3528,30 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
folio_detach_private(folio);
}
int f2fs_release_page(struct page *page, gfp_t wait)
bool f2fs_release_folio(struct folio *folio, gfp_t wait)
{
/* If this is dirty page, keep PagePrivate */
if (PageDirty(page))
return 0;
struct f2fs_sb_info *sbi;
/* If this is dirty folio, keep private data */
if (folio_test_dirty(folio))
return false;
/* This is atomic written page, keep Private */
if (page_private_atomic(page))
return 0;
if (page_private_atomic(&folio->page))
return false;
if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
struct inode *inode = page->mapping->host;
sbi = F2FS_M_SB(folio->mapping);
if (test_opt(sbi, COMPRESS_CACHE)) {
struct inode *inode = folio->mapping->host;
if (inode->i_ino == F2FS_COMPRESS_INO(F2FS_I_SB(inode)))
clear_page_private_data(page);
if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
clear_page_private_data(&folio->page);
}
clear_page_private_gcing(page);
clear_page_private_gcing(&folio->page);
detach_page_private(page);
set_page_private(page, 0);
return 1;
folio_detach_private(folio);
return true;
}
static bool f2fs_dirty_data_folio(struct address_space *mapping,
@ -3936,7 +3938,7 @@ static void f2fs_swap_deactivate(struct file *file)
#endif
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.read_folio = f2fs_read_data_folio,
.readahead = f2fs_readahead,
.writepage = f2fs_write_data_page,
.writepages = f2fs_write_data_pages,
@ -3944,7 +3946,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.write_end = f2fs_write_end,
.dirty_folio = f2fs_dirty_data_folio,
.invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page,
.release_folio = f2fs_release_folio,
.direct_IO = noop_direct_IO,
.bmap = f2fs_bmap,
.swap_activate = f2fs_swap_activate,

View File

@ -18,6 +18,7 @@
#include <linux/kobject.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/sched/mm.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
@ -2654,6 +2655,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write)
{
struct page *page;
unsigned int flags;
if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
if (!for_write)
@ -2673,7 +2675,12 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
if (!for_write)
return grab_cache_page(mapping, index);
return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
flags = memalloc_nofs_save();
page = grab_cache_page_write_begin(mapping, index);
memalloc_nofs_restore(flags);
return page;
}
static inline struct page *f2fs_pagecache_get_page(
@ -3761,7 +3768,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
int compr_blocks, bool allow_balance);
void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int f2fs_release_page(struct page *page, gfp_t wait);
bool f2fs_release_folio(struct folio *folio, gfp_t wait);
#ifdef CONFIG_MIGRATION
int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode);

View File

@ -2165,7 +2165,7 @@ const struct address_space_operations f2fs_node_aops = {
.writepages = f2fs_write_node_pages,
.dirty_folio = f2fs_dirty_node_folio,
.invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page,
.release_folio = f2fs_release_folio,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif

View File

@ -2483,7 +2483,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
tocopy = min_t(unsigned long, sb->s_blocksize - offset,
towrite);
retry:
err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
err = a_ops->write_begin(NULL, mapping, off, tocopy,
&page, &fsdata);
if (unlikely(err)) {
if (err == -ENOMEM) {

View File

@ -74,6 +74,9 @@ static int pagecache_read(struct inode *inode, void *buf, size_t count,
static int pagecache_write(struct inode *inode, const void *buf, size_t count,
loff_t pos)
{
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
if (pos + count > inode->i_sb->s_maxbytes)
return -EFBIG;
@ -85,8 +88,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
void *addr;
int res;
res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
&page, &fsdata);
res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
if (res)
return res;
@ -94,8 +96,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
memcpy(addr + offset_in_page(pos), buf, n);
kunmap_atomic(addr);
res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
page, fsdata);
res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata);
if (res < 0)
return res;
if (res != n)

View File

@ -205,9 +205,9 @@ static int fat_writepages(struct address_space *mapping,
return mpage_writepages(mapping, wbc, fat_get_block);
}
static int fat_readpage(struct file *file, struct page *page)
static int fat_read_folio(struct file *file, struct folio *folio)
{
return mpage_readpage(page, fat_get_block);
return mpage_read_folio(folio, fat_get_block);
}
static void fat_readahead(struct readahead_control *rac)
@ -226,13 +226,13 @@ static void fat_write_failed(struct address_space *mapping, loff_t to)
}
static int fat_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int err;
*pagep = NULL;
err = cont_write_begin(file, mapping, pos, len, flags,
err = cont_write_begin(file, mapping, pos, len,
pagep, fsdata, fat_get_block,
&MSDOS_I(mapping->host)->mmu_private);
if (err < 0)
@ -344,7 +344,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
static const struct address_space_operations fat_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = fat_readpage,
.read_folio = fat_read_folio,
.readahead = fat_readahead,
.writepage = fat_writepage,
.writepages = fat_writepages,

View File

@ -38,33 +38,34 @@
#include "vxfs_inode.h"
static int vxfs_immed_readpage(struct file *, struct page *);
static int vxfs_immed_read_folio(struct file *, struct folio *);
/*
* Address space operations for immed files and directories.
*/
const struct address_space_operations vxfs_immed_aops = {
.readpage = vxfs_immed_readpage,
.read_folio = vxfs_immed_read_folio,
};
/**
* vxfs_immed_readpage - read part of an immed inode into pagecache
* vxfs_immed_read_folio - read part of an immed inode into pagecache
* @file: file context (unused)
* @page: page frame to fill in.
* @folio: folio to fill in.
*
* Description:
* vxfs_immed_readpage reads a part of the immed area of the
* vxfs_immed_read_folio reads a part of the immed area of the
* file that hosts @pp into the pagecache.
*
* Returns:
* Zero on success, else a negative error code.
*
* Locking status:
* @page is locked and will be unlocked.
* @folio is locked and will be unlocked.
*/
static int
vxfs_immed_readpage(struct file *fp, struct page *pp)
vxfs_immed_read_folio(struct file *fp, struct folio *folio)
{
struct page *pp = &folio->page;
struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host);
u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT;
caddr_t kaddr;

View File

@ -38,11 +38,11 @@
#include "vxfs_extern.h"
static int vxfs_readpage(struct file *, struct page *);
static int vxfs_read_folio(struct file *, struct folio *);
static sector_t vxfs_bmap(struct address_space *, sector_t);
const struct address_space_operations vxfs_aops = {
.readpage = vxfs_readpage,
.read_folio = vxfs_read_folio,
.bmap = vxfs_bmap,
};
@ -141,24 +141,23 @@ vxfs_getblk(struct inode *ip, sector_t iblock,
}
/**
* vxfs_readpage - read one page synchronously into the pagecache
* vxfs_read_folio - read one page synchronously into the pagecache
* @file: file context (unused)
* @page: page frame to fill in.
* @folio: folio to fill in.
*
* Description:
* The vxfs_readpage routine reads @page synchronously into the
* The vxfs_read_folio routine reads @folio synchronously into the
* pagecache.
*
* Returns:
* Zero on success, else a negative error code.
*
* Locking status:
* @page is locked and will be unlocked.
* @folio is locked and will be unlocked.
*/
static int
vxfs_readpage(struct file *file, struct page *page)
static int vxfs_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page, vxfs_getblk);
return block_read_full_folio(folio, vxfs_getblk);
}
/**

View File

@ -1957,20 +1957,20 @@ void fuse_init_dir(struct inode *inode)
fi->rdc.version = 0;
}
static int fuse_symlink_readpage(struct file *null, struct page *page)
static int fuse_symlink_read_folio(struct file *null, struct folio *folio)
{
int err = fuse_readlink_page(page->mapping->host, page);
int err = fuse_readlink_page(folio->mapping->host, &folio->page);
if (!err)
SetPageUptodate(page);
folio_mark_uptodate(folio);
unlock_page(page);
folio_unlock(folio);
return err;
}
static const struct address_space_operations fuse_symlink_aops = {
.readpage = fuse_symlink_readpage,
.read_folio = fuse_symlink_read_folio,
};
void fuse_init_symlink(struct inode *inode)

View File

@ -857,8 +857,9 @@ static int fuse_do_readpage(struct file *file, struct page *page)
return 0;
}
static int fuse_readpage(struct file *file, struct page *page)
static int fuse_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
int err;
@ -1174,7 +1175,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
break;
err = -ENOMEM;
page = grab_cache_page_write_begin(mapping, index, 0);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
break;
@ -2273,8 +2274,7 @@ out:
* but how to implement it without killing performance need more thinking.
*/
static int fuse_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
struct fuse_conn *fc = get_fuse_conn(file_inode(file));
@ -2284,7 +2284,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
WARN_ON(!fc->writeback_cache);
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
goto error;
@ -3175,7 +3175,7 @@ static const struct file_operations fuse_file_operations = {
};
static const struct address_space_operations fuse_file_aops = {
.readpage = fuse_readpage,
.read_folio = fuse_read_folio,
.readahead = fuse_readahead,
.writepage = fuse_writepage,
.writepages = fuse_writepages,

View File

@ -464,22 +464,26 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
return 0;
}
static int __gfs2_readpage(void *file, struct page *page)
/**
* gfs2_read_folio - read a folio from a file
* @file: The file to read
* @folio: The folio in the file
*/
static int gfs2_read_folio(struct file *file, struct folio *folio)
{
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
int error;
if (!gfs2_is_jdata(ip) ||
(i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
error = iomap_readpage(page, &gfs2_iomap_ops);
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
error = iomap_read_folio(folio, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
error = stuffed_readpage(ip, page);
unlock_page(page);
error = stuffed_readpage(ip, &folio->page);
folio_unlock(folio);
} else {
error = mpage_readpage(page, gfs2_block_map);
error = mpage_read_folio(folio, gfs2_block_map);
}
if (unlikely(gfs2_withdrawn(sdp)))
@ -488,17 +492,6 @@ static int __gfs2_readpage(void *file, struct page *page)
return error;
}
/**
* gfs2_readpage - read a page of a file
* @file: The file to read
* @page: The page of the file
*/
static int gfs2_readpage(struct file *file, struct page *page)
{
return __gfs2_readpage(file, page);
}
/**
* gfs2_internal_read - read an internal file
* @ip: The gfs2 inode
@ -523,7 +516,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
amt = size - copied;
if (offset + size > PAGE_SIZE)
amt = PAGE_SIZE - offset;
page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
p = kmap_atomic(page);
@ -698,38 +691,40 @@ out:
}
/**
* gfs2_releasepage - free the metadata associated with a page
* @page: the page that's being released
* gfs2_release_folio - free the metadata associated with a folio
* @folio: the folio that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
*
* Calls try_to_free_buffers() to free the buffers and put the page if the
* Calls try_to_free_buffers() to free the buffers and put the folio if the
* buffers can be released.
*
* Returns: 1 if the page was put or else 0
* Returns: true if the folio was put or else false
*/
int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
{
struct address_space *mapping = page->mapping;
struct address_space *mapping = folio->mapping;
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
if (!page_has_buffers(page))
return 0;
head = folio_buffers(folio);
if (!head)
return false;
/*
* From xfs_vm_releasepage: mm accommodates an old ext3 case where
* clean pages might not have had the dirty bit cleared. Thus, it can
* send actual dirty pages to ->releasepage() via shrink_active_list().
* mm accommodates an old ext3 case where clean folios might
* not have had the dirty bit cleared. Thus, it can send actual
* dirty folios to ->release_folio() via shrink_active_list().
*
* As a workaround, we skip pages that contain dirty buffers below.
* Once ->releasepage isn't called on dirty pages anymore, we can warn
* on dirty buffers like we used to here again.
* As a workaround, we skip folios that contain dirty buffers
* below. Once ->release_folio isn't called on dirty folios
* anymore, we can warn on dirty buffers like we used to here
* again.
*/
gfs2_log_lock(sdp);
head = bh = page_buffers(page);
bh = head;
do {
if (atomic_read(&bh->b_count))
goto cannot_release;
@ -739,9 +734,9 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
goto cannot_release;
bh = bh->b_this_page;
} while(bh != head);
} while (bh != head);
head = bh = page_buffers(page);
bh = head;
do {
bd = bh->b_private;
if (bd) {
@ -762,20 +757,20 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
} while (bh != head);
gfs2_log_unlock(sdp);
return try_to_free_buffers(page);
return try_to_free_buffers(folio);
cannot_release:
gfs2_log_unlock(sdp);
return 0;
return false;
}
static const struct address_space_operations gfs2_aops = {
.writepage = gfs2_writepage,
.writepages = gfs2_writepages,
.readpage = gfs2_readpage,
.read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
.dirty_folio = filemap_dirty_folio,
.releasepage = iomap_releasepage,
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap,
.direct_IO = noop_direct_IO,
@ -787,12 +782,12 @@ static const struct address_space_operations gfs2_aops = {
static const struct address_space_operations gfs2_jdata_aops = {
.writepage = gfs2_jdata_writepage,
.writepages = gfs2_jdata_writepages,
.readpage = gfs2_readpage,
.read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
.dirty_folio = jdata_dirty_folio,
.bmap = gfs2_bmap,
.invalidate_folio = gfs2_invalidate_folio,
.releasepage = gfs2_releasepage,
.release_folio = gfs2_release_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};

View File

@ -12,7 +12,7 @@
#include <linux/mm.h>
#include "util.h"
extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask);
extern int gfs2_internal_read(struct gfs2_inode *ip,
char *buf, loff_t *pos, unsigned size);
extern void gfs2_set_aops(struct inode *inode);

View File

@ -92,14 +92,14 @@ const struct address_space_operations gfs2_meta_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
.release_folio = gfs2_release_folio,
};
const struct address_space_operations gfs2_rgrp_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
.release_folio = gfs2_release_folio,
};
/**

View File

@ -491,10 +491,10 @@ void hfs_file_truncate(struct inode *inode)
/* XXX: Can use generic_cont_expand? */
size = inode->i_size - 1;
res = pagecache_write_begin(NULL, mapping, size+1, 0, 0,
&page, &fsdata);
res = hfs_write_begin(NULL, mapping, size + 1, 0, &page,
&fsdata);
if (!res) {
res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
res = generic_write_end(NULL, mapping, size + 1, 0, 0,
page, fsdata);
}
if (res)

View File

@ -201,6 +201,8 @@ extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern const struct address_space_operations hfs_aops;
extern const struct address_space_operations hfs_btree_aops;
int hfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata);
extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
extern int hfs_write_inode(struct inode *, struct writeback_control *);

View File

@ -34,9 +34,9 @@ static int hfs_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, hfs_get_block, wbc);
}
static int hfs_readpage(struct file *file, struct page *page)
static int hfs_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page, hfs_get_block);
return block_read_full_folio(folio, hfs_get_block);
}
static void hfs_write_failed(struct address_space *mapping, loff_t to)
@ -49,14 +49,13 @@ static void hfs_write_failed(struct address_space *mapping, loff_t to)
}
}
static int hfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
int hfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hfs_get_block,
&HFS_I(mapping->host)->phys_size);
if (unlikely(ret))
@ -70,14 +69,15 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfs_get_block);
}
static int hfs_releasepage(struct page *page, gfp_t mask)
static bool hfs_release_folio(struct folio *folio, gfp_t mask)
{
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct hfs_btree *tree;
struct hfs_bnode *node;
u32 nidx;
int i, res = 1;
int i;
bool res = true;
switch (inode->i_ino) {
case HFS_EXT_CNID:
@ -88,27 +88,27 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
break;
default:
BUG();
return 0;
return false;
}
if (!tree)
return 0;
return false;
if (tree->node_size >= PAGE_SIZE) {
nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
else if (atomic_read(&node->refcnt))
res = 0;
res = false;
if (res && node) {
hfs_bnode_unhash(node);
hfs_bnode_free(node);
}
spin_unlock(&tree->hash_lock);
} else {
nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift);
i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
do {
@ -116,7 +116,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
if (!node)
continue;
if (atomic_read(&node->refcnt)) {
res = 0;
res = false;
break;
}
hfs_bnode_unhash(node);
@ -124,7 +124,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
} while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock);
}
return res ? try_to_free_buffers(page) : 0;
return res ? try_to_free_buffers(folio) : false;
}
static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@ -161,18 +161,18 @@ static int hfs_writepages(struct address_space *mapping,
const struct address_space_operations hfs_btree_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hfs_readpage,
.read_folio = hfs_read_folio,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
.releasepage = hfs_releasepage,
.release_folio = hfs_release_folio,
};
const struct address_space_operations hfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hfs_readpage,
.read_folio = hfs_read_folio,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,

View File

@ -557,12 +557,12 @@ void hfsplus_file_truncate(struct inode *inode)
void *fsdata;
loff_t size = inode->i_size;
res = pagecache_write_begin(NULL, mapping, size, 0, 0,
&page, &fsdata);
res = hfsplus_write_begin(NULL, mapping, size, 0,
&page, &fsdata);
if (res)
return;
res = pagecache_write_end(NULL, mapping, size,
0, 0, page, fsdata);
res = generic_write_end(NULL, mapping, size, 0, 0,
page, fsdata);
if (res < 0)
return;
mark_inode_dirty(inode);

View File

@ -468,6 +468,8 @@ extern const struct address_space_operations hfsplus_aops;
extern const struct address_space_operations hfsplus_btree_aops;
extern const struct dentry_operations hfsplus_dentry_operations;
int hfsplus_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata);
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode);
void hfsplus_delete_inode(struct inode *inode);

View File

@ -23,9 +23,9 @@
#include "hfsplus_raw.h"
#include "xattr.h"
static int hfsplus_readpage(struct file *file, struct page *page)
static int hfsplus_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page, hfsplus_get_block);
return block_read_full_folio(folio, hfsplus_get_block);
}
static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
@ -43,14 +43,13 @@ static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
}
}
static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
int hfsplus_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hfsplus_get_block,
&HFSPLUS_I(mapping->host)->phys_size);
if (unlikely(ret))
@ -64,14 +63,15 @@ static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfsplus_get_block);
}
static int hfsplus_releasepage(struct page *page, gfp_t mask)
static bool hfsplus_release_folio(struct folio *folio, gfp_t mask)
{
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct hfs_btree *tree;
struct hfs_bnode *node;
u32 nidx;
int i, res = 1;
int i;
bool res = true;
switch (inode->i_ino) {
case HFSPLUS_EXT_CNID:
@ -85,26 +85,26 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
break;
default:
BUG();
return 0;
return false;
}
if (!tree)
return 0;
return false;
if (tree->node_size >= PAGE_SIZE) {
nidx = page->index >>
nidx = folio->index >>
(tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
else if (atomic_read(&node->refcnt))
res = 0;
res = false;
if (res && node) {
hfs_bnode_unhash(node);
hfs_bnode_free(node);
}
spin_unlock(&tree->hash_lock);
} else {
nidx = page->index <<
nidx = folio->index <<
(PAGE_SHIFT - tree->node_size_shift);
i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
@ -113,7 +113,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
if (!node)
continue;
if (atomic_read(&node->refcnt)) {
res = 0;
res = false;
break;
}
hfs_bnode_unhash(node);
@ -121,7 +121,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
} while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock);
}
return res ? try_to_free_buffers(page) : 0;
return res ? try_to_free_buffers(folio) : false;
}
static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@ -158,18 +158,18 @@ static int hfsplus_writepages(struct address_space *mapping,
const struct address_space_operations hfsplus_btree_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hfsplus_readpage,
.read_folio = hfsplus_read_folio,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
.releasepage = hfsplus_releasepage,
.release_folio = hfsplus_release_folio,
};
const struct address_space_operations hfsplus_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hfsplus_readpage,
.read_folio = hfsplus_read_folio,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,

View File

@ -434,8 +434,9 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
return err;
}
static int hostfs_readpage(struct file *file, struct page *page)
static int hostfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
char *buffer;
loff_t start = page_offset(page);
int bytes_read, ret = 0;
@ -463,12 +464,12 @@ static int hostfs_readpage(struct file *file, struct page *page)
}
static int hostfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
*pagep = grab_cache_page_write_begin(mapping, index, flags);
*pagep = grab_cache_page_write_begin(mapping, index);
if (!*pagep)
return -ENOMEM;
return 0;
@ -504,7 +505,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
static const struct address_space_operations hostfs_aops = {
.writepage = hostfs_writepage,
.readpage = hostfs_readpage,
.read_folio = hostfs_read_folio,
.dirty_folio = filemap_dirty_folio,
.write_begin = hostfs_write_begin,
.write_end = hostfs_write_end,

View File

@ -158,9 +158,9 @@ static const struct iomap_ops hpfs_iomap_ops = {
.iomap_begin = hpfs_iomap_begin,
};
static int hpfs_readpage(struct file *file, struct page *page)
static int hpfs_read_folio(struct file *file, struct folio *folio)
{
return mpage_readpage(page, hpfs_get_block);
return mpage_read_folio(folio, hpfs_get_block);
}
static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
@ -194,13 +194,13 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
}
static int hpfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hpfs_get_block,
&hpfs_i(mapping->host)->mmu_private);
if (unlikely(ret))
@ -247,7 +247,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
const struct address_space_operations hpfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hpfs_readpage,
.read_folio = hpfs_read_folio,
.writepage = hpfs_writepage,
.readahead = hpfs_readahead,
.writepages = hpfs_writepages,

View File

@ -479,8 +479,9 @@ out:
return err;
}
static int hpfs_symlink_readpage(struct file *file, struct page *page)
static int hpfs_symlink_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
char *link = page_address(page);
struct inode *i = page->mapping->host;
struct fnode *fnode;
@ -508,7 +509,7 @@ fail:
}
const struct address_space_operations hpfs_symlink_aops = {
.readpage = hpfs_symlink_readpage
.read_folio = hpfs_symlink_read_folio
};
static int hpfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,

View File

@ -383,7 +383,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
static int hugetlbfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
return -EINVAL;

View File

@ -297,7 +297,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
* what do_mpage_readpage does.
* what do_mpage_read_folio does.
*/
if (!ctx->bio) {
ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
@ -320,10 +320,8 @@ done:
return pos - orig_pos + plen;
}
int
iomap_readpage(struct page *page, const struct iomap_ops *ops)
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
{
struct folio *folio = page_folio(page);
struct iomap_iter iter = {
.inode = folio->mapping->host,
.pos = folio_pos(folio),
@ -351,13 +349,13 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
}
/*
* Just like mpage_readahead and block_read_full_page, we always
* return 0 and just mark the page as PageError on errors. This
* Just like mpage_readahead and block_read_full_folio, we always
* return 0 and just set the folio error flag on errors. This
* should be cleaned up throughout the stack eventually.
*/
return 0;
}
EXPORT_SYMBOL_GPL(iomap_readpage);
EXPORT_SYMBOL_GPL(iomap_read_folio);
static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx)
@ -454,25 +452,23 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
}
EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
int
iomap_releasepage(struct page *page, gfp_t gfp_mask)
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
{
struct folio *folio = page_folio(page);
trace_iomap_releasepage(folio->mapping->host, folio_pos(folio),
trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
folio_size(folio));
/*
* mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to
* ->releasepage() via shrink_active_list(); skip those here.
* mm accommodates an old ext3 case where clean folios might
* not have had the dirty bit cleared. Thus, it can send actual
* dirty folios to ->release_folio() via shrink_active_list();
* skip those here.
*/
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return 0;
return false;
iomap_page_release(folio);
return 1;
return true;
}
EXPORT_SYMBOL_GPL(iomap_releasepage);
EXPORT_SYMBOL_GPL(iomap_release_folio);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
@ -664,10 +660,10 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
/*
* The blocks that were entirely written will now be uptodate, so we
* don't have to worry about a readpage reading them and overwriting a
* don't have to worry about a read_folio reading them and overwriting a
* partial write. However, if we've encountered a short write and only
* partially written into a block, it will not be marked uptodate, so a
* readpage might come in and destroy our partial write.
* read_folio might come in and destroy our partial write.
*
* Do the simplest thing and just treat any short write to a
* non-uptodate page as a zero-length write, and force the caller to
@ -1485,7 +1481,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* Skip the page if it's fully outside i_size, e.g. due to a
* truncate operation that's in progress. We must redirty the
* page so that reclaim stops reclaiming it. Otherwise
* iomap_vm_releasepage() is called on it and gets confused.
* iomap_release_folio() is called on it and gets confused.
*
* Note that the end_index is unsigned long. If the given
* offset is greater than 16TB on a 32-bit system then if we

View File

@ -80,7 +80,7 @@ DEFINE_EVENT(iomap_range_class, name, \
TP_PROTO(struct inode *inode, loff_t off, u64 len),\
TP_ARGS(inode, off, len))
DEFINE_RANGE_EVENT(iomap_writepage);
DEFINE_RANGE_EVENT(iomap_releasepage);
DEFINE_RANGE_EVENT(iomap_release_folio);
DEFINE_RANGE_EVENT(iomap_invalidate_folio);
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);

View File

@ -296,8 +296,9 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
* per reference. We inject the additional pages into the page
* cache as a form of readahead.
*/
static int zisofs_readpage(struct file *file, struct page *page)
static int zisofs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
int err;
@ -369,7 +370,7 @@ static int zisofs_readpage(struct file *file, struct page *page)
}
const struct address_space_operations zisofs_aops = {
.readpage = zisofs_readpage,
.read_folio = zisofs_read_folio,
/* No bmap operation supported */
};

View File

@ -1174,9 +1174,9 @@ struct buffer_head *isofs_bread(struct inode *inode, sector_t block)
return sb_bread(inode->i_sb, blknr);
}
static int isofs_readpage(struct file *file, struct page *page)
static int isofs_read_folio(struct file *file, struct folio *folio)
{
return mpage_readpage(page, isofs_get_block);
return mpage_read_folio(folio, isofs_get_block);
}
static void isofs_readahead(struct readahead_control *rac)
@ -1190,7 +1190,7 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations isofs_aops = {
.readpage = isofs_readpage,
.read_folio = isofs_read_folio,
.readahead = isofs_readahead,
.bmap = _isofs_bmap
};

View File

@ -687,11 +687,12 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
}
/*
* readpage() for symlinks: reads symlink contents into the page and either
* read_folio() for symlinks: reads symlink contents into the folio and either
* makes it uptodate and returns 0 or returns error (-EIO)
*/
static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
static int rock_ridge_symlink_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct iso_inode_info *ei = ISOFS_I(inode);
struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
@ -804,5 +805,5 @@ error:
}
const struct address_space_operations isofs_symlink_aops = {
.readpage = rock_ridge_symlink_readpage
.read_folio = rock_ridge_symlink_read_folio
};

View File

@ -62,6 +62,7 @@ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
*/
static void release_buffer_page(struct buffer_head *bh)
{
struct folio *folio;
struct page *page;
if (buffer_dirty(bh))
@ -71,18 +72,19 @@ static void release_buffer_page(struct buffer_head *bh)
page = bh->b_page;
if (!page)
goto nope;
if (page->mapping)
folio = page_folio(page);
if (folio->mapping)
goto nope;
/* OK, it's a truncated page */
if (!trylock_page(page))
if (!folio_trylock(folio))
goto nope;
get_page(page);
folio_get(folio);
__brelse(bh);
try_to_free_buffers(page);
unlock_page(page);
put_page(page);
try_to_free_buffers(folio);
folio_unlock(folio);
folio_put(folio);
return;
nope:

View File

@ -2143,17 +2143,17 @@ out:
* cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes?
*
* Return 0 on failure, 1 on success
* Return false on failure, true on success
*/
int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio)
{
struct buffer_head *head;
struct buffer_head *bh;
int ret = 0;
bool ret = false;
J_ASSERT(PageLocked(page));
J_ASSERT(folio_test_locked(folio));
head = page_buffers(page);
head = folio_buffers(folio);
bh = head;
do {
struct journal_head *jh;
@ -2175,7 +2175,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page)
goto busy;
} while ((bh = bh->b_this_page) != head);
ret = try_to_free_buffers(page);
ret = try_to_free_buffers(folio);
busy:
return ret;
}
@ -2482,7 +2482,7 @@ int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio,
} while (bh != head);
if (!partial_page) {
if (may_free && try_to_free_buffers(&folio->page))
if (may_free && try_to_free_buffers(folio))
J_ASSERT(!folio_buffers(folio));
}
return 0;

View File

@ -25,9 +25,9 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *pg, void *fsdata);
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
static int jffs2_readpage (struct file *filp, struct page *pg);
static int jffs2_read_folio(struct file *filp, struct folio *folio);
int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
@ -72,7 +72,7 @@ const struct inode_operations jffs2_file_inode_operations =
const struct address_space_operations jffs2_file_address_operations =
{
.readpage = jffs2_readpage,
.read_folio = jffs2_read_folio,
.write_begin = jffs2_write_begin,
.write_end = jffs2_write_end,
};
@ -110,27 +110,26 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
return ret;
}
int jffs2_do_readpage_unlock(void *data, struct page *pg)
int __jffs2_read_folio(struct file *file, struct folio *folio)
{
int ret = jffs2_do_readpage_nolock(data, pg);
unlock_page(pg);
int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page);
folio_unlock(folio);
return ret;
}
static int jffs2_readpage (struct file *filp, struct page *pg)
static int jffs2_read_folio(struct file *file, struct folio *folio)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
struct jffs2_inode_info *f = JFFS2_INODE_INFO(folio->mapping->host);
int ret;
mutex_lock(&f->sem);
ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
ret = __jffs2_read_folio(file, folio);
mutex_unlock(&f->sem);
return ret;
}
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct page *pg;
@ -213,7 +212,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
* page in read_cache_page(), which causes a deadlock.
*/
mutex_lock(&c->alloc_sem);
pg = grab_cache_page_write_begin(mapping, index, flags);
pg = grab_cache_page_write_begin(mapping, index);
if (!pg) {
ret = -ENOMEM;
goto release_sem;

View File

@ -178,7 +178,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
jffs2_complete_reservation(c);
/* We have to do the truncate_setsize() without f->sem held, since
some pages may be locked and waiting for it in readpage().
some pages may be locked and waiting for it in read_folio().
We are protected from a simultaneous write() extending i_size
back past iattr->ia_size, because do_truncate() holds the
generic inode semaphore. */

View File

@ -1327,7 +1327,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
* trying to write out, read_cache_page() will not deadlock. */
mutex_unlock(&f->sem);
page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
jffs2_do_readpage_unlock, inode);
__jffs2_read_folio, NULL);
if (IS_ERR(page)) {
pr_warn("read_cache_page() returned error: %ld\n",
PTR_ERR(page));

View File

@ -155,7 +155,7 @@ extern const struct file_operations jffs2_file_operations;
extern const struct inode_operations jffs2_file_inode_operations;
extern const struct address_space_operations jffs2_file_address_operations;
int jffs2_fsync(struct file *, loff_t, loff_t, int);
int jffs2_do_readpage_unlock(void *data, struct page *pg);
int __jffs2_read_folio(struct file *file, struct folio *folio);
/* ioctl.c */
long jffs2_ioctl(struct file *, unsigned int, unsigned long);

View File

@ -293,9 +293,9 @@ static int jfs_writepages(struct address_space *mapping,
return mpage_writepages(mapping, wbc, jfs_get_block);
}
static int jfs_readpage(struct file *file, struct page *page)
static int jfs_read_folio(struct file *file, struct folio *folio)
{
return mpage_readpage(page, jfs_get_block);
return mpage_read_folio(folio, jfs_get_block);
}
static void jfs_readahead(struct readahead_control *rac)
@ -314,13 +314,12 @@ static void jfs_write_failed(struct address_space *mapping, loff_t to)
}
static int jfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
jfs_get_block);
ret = nobh_write_begin(mapping, pos, len, pagep, fsdata, jfs_get_block);
if (unlikely(ret))
jfs_write_failed(mapping, pos + len);
@ -360,7 +359,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
const struct address_space_operations jfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = jfs_readpage,
.read_folio = jfs_read_folio,
.readahead = jfs_readahead,
.writepage = jfs_writepage,
.writepages = jfs_writepages,

View File

@ -467,8 +467,9 @@ err_out:
return -EIO;
}
static int metapage_readpage(struct file *fp, struct page *page)
static int metapage_read_folio(struct file *fp, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct bio *bio = NULL;
int block_offset;
@ -523,29 +524,29 @@ add_failed:
return -EIO;
}
static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
{
struct metapage *mp;
int ret = 1;
bool ret = true;
int offset;
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = page_to_mp(page, offset);
mp = page_to_mp(&folio->page, offset);
if (!mp)
continue;
jfs_info("metapage_releasepage: mp = 0x%p", mp);
jfs_info("metapage_release_folio: mp = 0x%p", mp);
if (mp->count || mp->nohomeok ||
test_bit(META_dirty, &mp->flag)) {
jfs_info("count = %ld, nohomeok = %d", mp->count,
mp->nohomeok);
ret = 0;
ret = false;
continue;
}
if (mp->lsn)
remove_from_logsync(mp);
remove_metapage(page, mp);
remove_metapage(&folio->page, mp);
INCREMENT(mpStat.pagefree);
free_metapage(mp);
}
@ -559,13 +560,13 @@ static void metapage_invalidate_folio(struct folio *folio, size_t offset,
BUG_ON(folio_test_writeback(folio));
metapage_releasepage(&folio->page, 0);
metapage_release_folio(folio, 0);
}
const struct address_space_operations jfs_metapage_aops = {
.readpage = metapage_readpage,
.read_folio = metapage_read_folio,
.writepage = metapage_writepage,
.releasepage = metapage_releasepage,
.release_folio = metapage_release_folio,
.invalidate_folio = metapage_invalidate_folio,
.dirty_folio = filemap_dirty_folio,
};

View File

@ -539,17 +539,17 @@ int simple_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
}
EXPORT_SYMBOL(simple_setattr);
static int simple_readpage(struct file *file, struct page *page)
static int simple_read_folio(struct file *file, struct folio *folio)
{
clear_highpage(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
folio_zero_range(folio, 0, folio_size(folio));
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
folio_unlock(folio);
return 0;
}
int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
struct page *page;
@ -557,7 +557,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
index = pos >> PAGE_SHIFT;
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
@ -592,7 +592,7 @@ EXPORT_SYMBOL(simple_write_begin);
* should extend on what's done here with a call to mark_inode_dirty() in the
* case that i_size has changed.
*
* Use *ONLY* with simple_readpage()
* Use *ONLY* with simple_read_folio()
*/
static int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
@ -628,7 +628,7 @@ static int simple_write_end(struct file *file, struct address_space *mapping,
* Provides ramfs-style behavior: data in the pagecache, but no writeback.
*/
const struct address_space_operations ram_aops = {
.readpage = simple_readpage,
.read_folio = simple_read_folio,
.write_begin = simple_write_begin,
.write_end = simple_write_end,
.dirty_folio = noop_dirty_folio,

View File

@ -402,9 +402,9 @@ static int minix_writepage(struct page *page, struct writeback_control *wbc)
return block_write_full_page(page, minix_get_block, wbc);
}
static int minix_readpage(struct file *file, struct page *page)
static int minix_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_page(page,minix_get_block);
return block_read_full_folio(folio, minix_get_block);
}
int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
@ -423,13 +423,12 @@ static void minix_write_failed(struct address_space *mapping, loff_t to)
}
static int minix_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
ret = block_write_begin(mapping, pos, len, flags, pagep,
minix_get_block);
ret = block_write_begin(mapping, pos, len, pagep, minix_get_block);
if (unlikely(ret))
minix_write_failed(mapping, pos + len);
@ -444,7 +443,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations minix_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = minix_readpage,
.read_folio = minix_read_folio,
.writepage = minix_writepage,
.write_begin = minix_write_begin,
.write_end = generic_write_end,

View File

@ -36,7 +36,7 @@
*
* The mpage code never puts partial pages into a BIO (except for end-of-file).
* If a page does not map to a contiguous run of blocks then it simply falls
* back to block_read_full_page().
* back to block_read_full_folio().
*
* Why is this? If a page's completion depends on a number of different BIOs
* which can complete in any order (or at the same time) then determining the
@ -68,7 +68,7 @@ static struct bio *mpage_bio_submit(struct bio *bio)
/*
* support function for mpage_readahead. The fs supplied get_block might
* return an up to date buffer. This is used to map that buffer into
* the page, which allows readpage to avoid triggering a duplicate call
* the page, which allows read_folio to avoid triggering a duplicate call
* to get_block.
*
* The idea is to avoid adding buffers to pages that don't already have
@ -296,7 +296,7 @@ confused:
if (args->bio)
args->bio = mpage_bio_submit(args->bio);
if (!PageUptodate(page))
block_read_full_page(page, args->get_block);
block_read_full_folio(page_folio(page), args->get_block);
else
unlock_page(page);
goto out;
@ -364,20 +364,22 @@ EXPORT_SYMBOL(mpage_readahead);
/*
* This isn't called much at all
*/
int mpage_readpage(struct page *page, get_block_t get_block)
int mpage_read_folio(struct folio *folio, get_block_t get_block)
{
struct mpage_readpage_args args = {
.page = page,
.page = &folio->page,
.nr_pages = 1,
.get_block = get_block,
};
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
args.bio = do_mpage_readpage(&args);
if (args.bio)
mpage_bio_submit(args.bio);
return 0;
}
EXPORT_SYMBOL(mpage_readpage);
EXPORT_SYMBOL(mpage_read_folio);
/*
* Writing is not so simple.
@ -425,11 +427,11 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
/*
* we cannot drop the bh if the page is not uptodate or a concurrent
* readpage would fail to serialize with the bh and it would read from
* read_folio would fail to serialize with the bh and it would read from
* disk before we reach the platter.
*/
if (buffer_heads_over_limit && PageUptodate(page))
try_to_free_buffers(page);
try_to_free_buffers(page_folio(page));
}
/*
@ -510,7 +512,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
/*
* Page has buffers, but they are all unmapped. The page was
* created by pagein or read over a hole which was handled by
* block_read_full_page(). If this address_space is also
* block_read_full_folio(). If this address_space is also
* using mpage_readahead then this can rarely happen.
*/
goto confused;

View File

@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/fsnotify.h>
#include <linux/personality.h>
#include <linux/security.h>
@ -5001,28 +5002,28 @@ int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
}
EXPORT_SYMBOL(page_readlink);
/*
* The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
*/
int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
int page_symlink(struct inode *inode, const char *symname, int len)
{
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS);
struct page *page;
void *fsdata;
int err;
unsigned int flags = 0;
if (nofs)
flags |= AOP_FLAG_NOFS;
unsigned int flags;
retry:
err = pagecache_write_begin(NULL, mapping, 0, len-1,
flags, &page, &fsdata);
if (nofs)
flags = memalloc_nofs_save();
err = aops->write_begin(NULL, mapping, 0, len-1, &page, &fsdata);
if (nofs)
memalloc_nofs_restore(flags);
if (err)
goto fail;
memcpy(page_address(page), symname, len-1);
err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
err = aops->write_end(NULL, mapping, 0, len-1, len-1,
page, fsdata);
if (err < 0)
goto fail;
@ -5034,13 +5035,6 @@ retry:
fail:
return err;
}
EXPORT_SYMBOL(__page_symlink);
int page_symlink(struct inode *inode, const char *symname, int len)
{
return __page_symlink(inode, symname, len,
!mapping_gfp_constraint(inode->i_mapping, __GFP_FS));
}
EXPORT_SYMBOL(page_symlink);
const struct inode_operations page_symlink_inode_operations = {

View File

@ -198,22 +198,21 @@ cleanup_free:
EXPORT_SYMBOL(netfs_readahead);
/**
* netfs_readpage - Helper to manage a readpage request
* netfs_read_folio - Helper to manage a read_folio request
* @file: The file to read from
* @subpage: A subpage of the folio to read
* @folio: The folio to read
*
* Fulfil a readpage request by drawing data from the cache if possible, or the
* netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests
* from different sources will get munged together.
* Fulfil a read_folio request by drawing data from the cache if
* possible, or the netfs if not. Space beyond the EOF is zero-filled.
* Multiple I/O requests from different sources will get munged together.
*
* The calling netfs must initialise a netfs context contiguous to the vfs
* inode before calling this.
*
* This is usable whether or not caching is enabled.
*/
int netfs_readpage(struct file *file, struct page *subpage)
int netfs_read_folio(struct file *file, struct folio *folio)
{
struct folio *folio = page_folio(subpage);
struct address_space *mapping = folio_file_mapping(folio);
struct netfs_io_request *rreq;
struct netfs_i_context *ctx = netfs_i_context(mapping->host);
@ -245,7 +244,7 @@ alloc_error:
folio_unlock(folio);
return ret;
}
EXPORT_SYMBOL(netfs_readpage);
EXPORT_SYMBOL(netfs_read_folio);
/*
* Prepare a folio for writing without reading first
@ -302,7 +301,6 @@ zero_out:
* @mapping: The mapping to read from
* @pos: File position at which the write will begin
* @len: The length of the write (may extend beyond the end of the folio chosen)
* @aop_flags: AOP_* flags
* @_folio: Where to put the resultant folio
* @_fsdata: Place for the netfs to store a cookie
*
@ -329,22 +327,19 @@ zero_out:
* This is usable whether or not caching is enabled.
*/
int netfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int aop_flags,
struct folio **_folio, void **_fsdata)
loff_t pos, unsigned int len, struct folio **_folio,
void **_fsdata)
{
struct netfs_io_request *rreq;
struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
struct folio *folio;
unsigned int fgp_flags;
unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
retry:
fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
if (aop_flags & AOP_FLAG_NOFS)
fgp_flags |= FGP_NOFS;
folio = __filemap_get_folio(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
if (!folio)

View File

@ -55,7 +55,7 @@ static int nfs_closedir(struct inode *, struct file *);
static int nfs_readdir(struct file *, struct dir_context *);
static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
static void nfs_readdir_clear_array(struct page*);
static void nfs_readdir_free_folio(struct folio *);
const struct file_operations nfs_dir_operations = {
.llseek = nfs_llseek_dir,
@ -67,7 +67,7 @@ const struct file_operations nfs_dir_operations = {
};
const struct address_space_operations nfs_dir_aops = {
.freepage = nfs_readdir_clear_array,
.free_folio = nfs_readdir_free_folio,
};
#define NFS_INIT_DTSIZE PAGE_SIZE
@ -228,6 +228,11 @@ static void nfs_readdir_clear_array(struct page *page)
kunmap_atomic(array);
}
static void nfs_readdir_free_folio(struct folio *folio)
{
nfs_readdir_clear_array(&folio->page);
}
static void nfs_readdir_page_reinit_array(struct page *page, u64 last_cookie,
u64 change_attr)
{

View File

@ -313,7 +313,7 @@ static bool nfs_want_read_modify_write(struct file *file, struct page *page,
* increment the page use counts until he is done with the page.
*/
static int nfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int ret;
@ -325,7 +325,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
file, mapping->host->i_ino, len, (long long) pos);
start:
page = grab_cache_page_write_begin(mapping, index, flags);
page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
@ -337,7 +337,7 @@ start:
} else if (!once_thru &&
nfs_want_read_modify_write(file, page, pos, len)) {
once_thru = 1;
ret = nfs_readpage(file, page);
ret = nfs_read_folio(file, page_folio(page));
put_page(page);
if (!ret)
goto start;
@ -415,34 +415,31 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset,
}
/*
* Attempt to release the private state associated with a page
* - Called if either PG_private or PG_fscache is set on the page
* - Caller holds page lock
* - Return true (may release page) or false (may not)
* Attempt to release the private state associated with a folio
* - Called if either private or fscache flags are set on the folio
* - Caller holds folio lock
* - Return true (may release folio) or false (may not)
*/
static int nfs_release_page(struct page *page, gfp_t gfp)
static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
{
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
dfprintk(PAGECACHE, "NFS: release_folio(%p)\n", folio);
/* If PagePrivate() is set, then the page is not freeable */
if (PagePrivate(page))
return 0;
return nfs_fscache_release_page(page, gfp);
/* If the private flag is set, then the folio is not freeable */
if (folio_test_private(folio))
return false;
return nfs_fscache_release_folio(folio, gfp);
}
static void nfs_check_dirty_writeback(struct page *page,
static void nfs_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct nfs_inode *nfsi;
struct address_space *mapping = page_file_mapping(page);
if (!mapping || PageSwapCache(page))
return;
struct address_space *mapping = folio->mapping;
/*
* Check if an unstable page is currently being committed and
* if so, have the VM treat it as if the page is under writeback
* so it will not block due to pages that will shortly be freeable.
* Check if an unstable folio is currently being committed and
* if so, have the VM treat it as if the folio is under writeback
* so it will not block due to folios that will shortly be freeable.
*/
nfsi = NFS_I(mapping->host);
if (atomic_read(&nfsi->commit_info.rpcs_out)) {
@ -451,11 +448,11 @@ static void nfs_check_dirty_writeback(struct page *page,
}
/*
* If PagePrivate() is set, then the page is not freeable and as the
* inode is not being committed, it's not going to be cleaned in the
* near future so treat it as dirty
* If the private flag is set, then the folio is not freeable
* and as the inode is not being committed, it's not going to
* be cleaned in the near future so treat it as dirty
*/
if (PagePrivate(page))
if (folio_test_private(folio))
*dirty = true;
}
@ -517,7 +514,7 @@ static void nfs_swap_deactivate(struct file *file)
}
const struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage,
.read_folio = nfs_read_folio,
.readahead = nfs_readahead,
.dirty_folio = filemap_dirty_folio,
.writepage = nfs_writepage,
@ -525,7 +522,7 @@ const struct address_space_operations nfs_file_aops = {
.write_begin = nfs_write_begin,
.write_end = nfs_write_end,
.invalidate_folio = nfs_invalidate_folio,
.releasepage = nfs_release_page,
.release_folio = nfs_release_folio,
.direct_IO = nfs_direct_IO,
#ifdef CONFIG_MIGRATION
.migratepage = nfs_migrate_page,

View File

@ -48,14 +48,14 @@ extern void nfs_fscache_release_file(struct inode *, struct file *);
extern int __nfs_fscache_read_page(struct inode *, struct page *);
extern void __nfs_fscache_write_page(struct inode *, struct page *);
static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
if (PageFsCache(page)) {
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
wait_on_page_fscache(page);
fscache_note_page_release(nfs_i_fscache(page->mapping->host));
nfs_inc_fscache_stats(page->mapping->host,
folio_wait_fscache(folio);
fscache_note_page_release(nfs_i_fscache(folio->mapping->host));
nfs_inc_fscache_stats(folio->mapping->host,
NFSIOS_FSCACHE_PAGES_UNCACHED);
}
return true;
@ -129,9 +129,9 @@ static inline void nfs_fscache_open_file(struct inode *inode,
struct file *filp) {}
static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}
static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
return 1; /* True: may release page */
return true; /* may release folio */
}
static inline int nfs_fscache_read_page(struct inode *inode, struct page *page)
{

View File

@ -333,8 +333,9 @@ out:
* - The error flag is set for this page. This happens only when a
* previous async read operation failed.
*/
int nfs_readpage(struct file *file, struct page *page)
int nfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct nfs_readdesc desc;
struct inode *inode = page_file_mapping(page)->host;
int ret;

View File

@ -26,21 +26,21 @@
* and straight-forward than readdir caching.
*/
static int nfs_symlink_filler(void *data, struct page *page)
static int nfs_symlink_filler(struct file *file, struct folio *folio)
{
struct inode *inode = data;
struct inode *inode = folio->mapping->host;
int error;
error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
error = NFS_PROTO(inode)->readlink(inode, &folio->page, 0, PAGE_SIZE);
if (error < 0)
goto error;
SetPageUptodate(page);
unlock_page(page);
folio_mark_uptodate(folio);
folio_unlock(folio);
return 0;
error:
SetPageError(page);
unlock_page(page);
folio_set_error(folio);
folio_unlock(folio);
return -EIO;
}
@ -67,7 +67,7 @@ static const char *nfs_get_link(struct dentry *dentry,
if (err)
return err;
page = read_cache_page(&inode->i_data, 0, nfs_symlink_filler,
inode);
NULL);
if (IS_ERR(page))
return ERR_CAST(page);
}

View File

@ -63,10 +63,10 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n)
/**
* nilfs_get_block() - get a file block on the filesystem (callback function)
* @inode - inode struct of the target file
* @blkoff - file block number
* @bh_result - buffer head to be mapped on
* @create - indicate whether allocating the block or not when it has not
* @inode: inode struct of the target file
* @blkoff: file block number
* @bh_result: buffer head to be mapped on
* @create: indicate whether allocating the block or not when it has not
* been allocated yet.
*
* This function does not issue actual read request of the specified data
@ -140,14 +140,14 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
}
/**
* nilfs_readpage() - implement readpage() method of nilfs_aops {}
* nilfs_read_folio() - implement read_folio() method of nilfs_aops {}
* address_space_operations.
* @file - file struct of the file to be read
* @page - the page to be read
* @file: file struct of the file to be read
* @folio: the folio to be read
*/
static int nilfs_readpage(struct file *file, struct page *page)
static int nilfs_read_folio(struct file *file, struct folio *folio)
{
return mpage_readpage(page, nilfs_get_block);
return mpage_read_folio(folio, nilfs_get_block);
}
static void nilfs_readahead(struct readahead_control *rac)
@ -248,7 +248,7 @@ void nilfs_write_failed(struct address_space *mapping, loff_t to)
}
static int nilfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
@ -258,8 +258,7 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
if (unlikely(err))
return err;
err = block_write_begin(mapping, pos, len, flags, pagep,
nilfs_get_block);
err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block);
if (unlikely(err)) {
nilfs_write_failed(mapping, pos + len);
nilfs_transaction_abort(inode->i_sb);
@ -299,13 +298,12 @@ nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
const struct address_space_operations nilfs_aops = {
.writepage = nilfs_writepage,
.readpage = nilfs_readpage,
.read_folio = nilfs_read_folio,
.writepages = nilfs_writepages,
.dirty_folio = nilfs_dirty_folio,
.readahead = nilfs_readahead,
.write_begin = nilfs_write_begin,
.write_end = nilfs_write_end,
/* .releasepage = nilfs_releasepage, */
.invalidate_folio = block_invalidate_folio,
.direct_IO = nilfs_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
@ -1088,6 +1086,7 @@ int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
/**
* nilfs_dirty_inode - reflect changes on given inode to an inode block.
* @inode: inode of the file to be registered.
* @flags: flags to determine the dirty state of the inode
*
* nilfs_dirty_inode() loads a inode block containing the specified
* @inode and copies data from a nilfs_inode to a corresponding inode

Some files were not shown because too many files have changed in this diff Show More