vfs-6.12.folio

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZuQEvgAKCRCRxhvAZXjc
 ou77AQD3U1KjbdgzbUi6kaUmiiWOPhfYTlm8mho8dBjqvTCB+AD/XTWSFCWWhHB4
 KyQZTbjRD81xmVNbKjASazp0EA6Ahwc=
 =gIsD
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.12.folio' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs

Pull vfs folio updates from Christian Brauner:
 "This contains work to port write_begin and write_end to rely on folios
  for various filesystems.

  This converts ocfs2, vboxfs, orangefs, jffs2, hostfs, fuse, f2fs,
  ecryptfs, ntfs3, nilfs2, reiserfs, minixfs, qnx6, sysv, ufs, and
  squashfs.

  After this series lands a bunch of the filesystems in this list do not
  mention struct page anymore"

* tag 'vfs-6.12.folio' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs: (61 commits)
  Squashfs: Ensure all readahead pages have been used
  Squashfs: Rewrite and update squashfs_readahead_fragment() to not use page->index
  Squashfs: Update squashfs_readpage_block() to not use page->index
  Squashfs: Update squashfs_readahead() to not use page->index
  Squashfs: Update page_actor to not use page->index
  jffs2: Use a folio in jffs2_garbage_collect_dnode()
  jffs2: Convert jffs2_do_readpage_nolock to take a folio
  buffer: Convert __block_write_begin() to take a folio
  ocfs2: Convert ocfs2_write_zero_page to use a folio
  fs: Convert aops->write_begin to take a folio
  fs: Convert aops->write_end to take a folio
  vboxsf: Use a folio in vboxsf_write_end()
  orangefs: Convert orangefs_write_begin() to use a folio
  orangefs: Convert orangefs_write_end() to use a folio
  jffs2: Convert jffs2_write_begin() to use a folio
  jffs2: Convert jffs2_write_end() to use a folio
  hostfs: Convert hostfs_write_end() to use a folio
  fuse: Convert fuse_write_begin() to use a folio
  fuse: Convert fuse_write_end() to use a folio
  f2fs: Convert f2fs_write_begin() to use a folio
  ...
This commit is contained in:
Linus Torvalds 2024-09-16 08:54:30 +02:00
commit 2775df6e5e
83 changed files with 994 additions and 1064 deletions

View File

@ -251,10 +251,10 @@ prototypes::
void (*readahead)(struct readahead_control *); void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping, int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata); struct folio **foliop, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping, int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata); struct folio *folio, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t); sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len); void (*invalidate_folio) (struct folio *, size_t start, size_t len);
bool (*release_folio)(struct folio *, gfp_t); bool (*release_folio)(struct folio *, gfp_t);
@ -280,7 +280,7 @@ read_folio: yes, unlocks shared
writepages: writepages:
dirty_folio: maybe dirty_folio: maybe
readahead: yes, unlocks shared readahead: yes, unlocks shared
write_begin: locks the page exclusive write_begin: locks the folio exclusive
write_end: yes, unlocks exclusive write_end: yes, unlocks exclusive
bmap: bmap:
invalidate_folio: yes exclusive invalidate_folio: yes exclusive

View File

@ -810,7 +810,7 @@ cache in your filesystem. The following members are defined:
struct page **pagep, void **fsdata); struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping, int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata); struct folio *folio, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t); sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len); void (*invalidate_folio) (struct folio *, size_t start, size_t len);
bool (*release_folio)(struct folio *, gfp_t); bool (*release_folio)(struct folio *, gfp_t);
@ -926,12 +926,12 @@ cache in your filesystem. The following members are defined:
(if they haven't been read already) so that the updated blocks (if they haven't been read already) so that the updated blocks
can be written out properly. can be written out properly.
The filesystem must return the locked pagecache page for the The filesystem must return the locked pagecache folio for the
specified offset, in ``*pagep``, for the caller to write into. specified offset, in ``*foliop``, for the caller to write into.
It must be able to cope with short writes (where the length It must be able to cope with short writes (where the length
passed to write_begin is greater than the number of bytes copied passed to write_begin is greater than the number of bytes copied
into the page). into the folio).
A void * may be returned in fsdata, which then gets passed into A void * may be returned in fsdata, which then gets passed into
write_end. write_end.
@ -944,8 +944,8 @@ cache in your filesystem. The following members are defined:
called. len is the original len passed to write_begin, and called. len is the original len passed to write_begin, and
copied is the amount that was able to be copied. copied is the amount that was able to be copied.
The filesystem must take care of unlocking the page and The filesystem must take care of unlocking the folio,
releasing it refcount, and updating i_size. decrementing its refcount, and updating i_size.
Returns < 0 on failure, otherwise the number of bytes (<= Returns < 0 on failure, otherwise the number of bytes (<=
'copied') that were able to be copied into pagecache. 'copied') that were able to be copied into pagecache.

View File

@ -451,20 +451,20 @@ static void blkdev_readahead(struct readahead_control *rac)
} }
static int blkdev_write_begin(struct file *file, struct address_space *mapping, static int blkdev_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata) loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{ {
return block_write_begin(mapping, pos, len, pagep, blkdev_get_block); return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
} }
static int blkdev_write_end(struct file *file, struct address_space *mapping, static int blkdev_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, struct page *page, loff_t pos, unsigned len, unsigned copied, struct folio *folio,
void *fsdata) void *fsdata)
{ {
int ret; int ret;
ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return ret; return ret;
} }

View File

@ -424,7 +424,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
struct address_space *mapping = obj->base.filp->f_mapping; struct address_space *mapping = obj->base.filp->f_mapping;
const struct address_space_operations *aops = mapping->a_ops; const struct address_space_operations *aops = mapping->a_ops;
char __user *user_data = u64_to_user_ptr(arg->data_ptr); char __user *user_data = u64_to_user_ptr(arg->data_ptr);
u64 remain, offset; u64 remain;
loff_t pos;
unsigned int pg; unsigned int pg;
/* Caller already validated user args */ /* Caller already validated user args */
@ -457,12 +458,12 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
*/ */
remain = arg->size; remain = arg->size;
offset = arg->offset; pos = arg->offset;
pg = offset_in_page(offset); pg = offset_in_page(pos);
do { do {
unsigned int len, unwritten; unsigned int len, unwritten;
struct page *page; struct folio *folio;
void *data, *vaddr; void *data, *vaddr;
int err; int err;
char __maybe_unused c; char __maybe_unused c;
@ -480,21 +481,19 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err) if (err)
return err; return err;
err = aops->write_begin(obj->base.filp, mapping, offset, len, err = aops->write_begin(obj->base.filp, mapping, pos, len,
&page, &data); &folio, &data);
if (err < 0) if (err < 0)
return err; return err;
vaddr = kmap_local_page(page); vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos));
pagefault_disable(); pagefault_disable();
unwritten = __copy_from_user_inatomic(vaddr + pg, unwritten = __copy_from_user_inatomic(vaddr, user_data, len);
user_data,
len);
pagefault_enable(); pagefault_enable();
kunmap_local(vaddr); kunmap_local(vaddr);
err = aops->write_end(obj->base.filp, mapping, offset, len, err = aops->write_end(obj->base.filp, mapping, pos, len,
len - unwritten, page, data); len - unwritten, folio, data);
if (err < 0) if (err < 0)
return err; return err;
@ -504,7 +503,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
remain -= len; remain -= len;
user_data += len; user_data += len;
offset += len; pos += len;
pg = 0; pg = 0;
} while (remain); } while (remain);
@ -660,7 +659,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct file *file; struct file *file;
const struct address_space_operations *aops; const struct address_space_operations *aops;
resource_size_t offset; loff_t pos;
int err; int err;
GEM_WARN_ON(IS_DGFX(i915)); GEM_WARN_ON(IS_DGFX(i915));
@ -672,29 +671,27 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
file = obj->base.filp; file = obj->base.filp;
aops = file->f_mapping->a_ops; aops = file->f_mapping->a_ops;
offset = 0; pos = 0;
do { do {
unsigned int len = min_t(typeof(size), size, PAGE_SIZE); unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
struct page *page; struct folio *folio;
void *pgdata, *vaddr; void *fsdata;
err = aops->write_begin(file, file->f_mapping, offset, len, err = aops->write_begin(file, file->f_mapping, pos, len,
&page, &pgdata); &folio, &fsdata);
if (err < 0) if (err < 0)
goto fail; goto fail;
vaddr = kmap(page); memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len);
memcpy(vaddr, data, len);
kunmap(page);
err = aops->write_end(file, file->f_mapping, offset, len, len, err = aops->write_end(file, file->f_mapping, pos, len, len,
page, pgdata); folio, fsdata);
if (err < 0) if (err < 0)
goto fail; goto fail;
size -= len; size -= len;
data += len; data += len;
offset += len; pos += len;
} while (size); } while (size);
return obj; return obj;

View File

@ -55,12 +55,11 @@ static void adfs_write_failed(struct address_space *mapping, loff_t to)
static int adfs_write_begin(struct file *file, struct address_space *mapping, static int adfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
*pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
adfs_get_block, adfs_get_block,
&ADFS_I(mapping->host)->mmu_private); &ADFS_I(mapping->host)->mmu_private);
if (unlikely(ret)) if (unlikely(ret))

View File

@ -417,12 +417,11 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
static int affs_write_begin(struct file *file, struct address_space *mapping, static int affs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
*pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
affs_get_block, affs_get_block,
&AFFS_I(mapping->host)->mmu_private); &AFFS_I(mapping->host)->mmu_private);
if (unlikely(ret)) if (unlikely(ret))
@ -433,12 +432,12 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
static int affs_write_end(struct file *file, struct address_space *mapping, static int affs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied, loff_t pos, unsigned int len, unsigned int copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
int ret; int ret;
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
/* Clear Archived bit on file writes, as AmigaOS would do */ /* Clear Archived bit on file writes, as AmigaOS would do */
if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
@ -648,7 +647,7 @@ static int affs_read_folio_ofs(struct file *file, struct folio *folio)
static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct folio *folio; struct folio *folio;
@ -671,7 +670,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
mapping_gfp_mask(mapping)); mapping_gfp_mask(mapping));
if (IS_ERR(folio)) if (IS_ERR(folio))
return PTR_ERR(folio); return PTR_ERR(folio);
*pagep = &folio->page; *foliop = folio;
if (folio_test_uptodate(folio)) if (folio_test_uptodate(folio))
return 0; return 0;
@ -687,9 +686,8 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
static int affs_write_end_ofs(struct file *file, struct address_space *mapping, static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(page);
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct buffer_head *bh, *prev_bh; struct buffer_head *bh, *prev_bh;
@ -882,14 +880,14 @@ affs_truncate(struct inode *inode)
if (inode->i_size > AFFS_I(inode)->mmu_private) { if (inode->i_size > AFFS_I(inode)->mmu_private) {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct page *page; struct folio *folio;
void *fsdata = NULL; void *fsdata = NULL;
loff_t isize = inode->i_size; loff_t isize = inode->i_size;
int res; int res;
res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata); res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &folio, &fsdata);
if (!res) if (!res)
res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata); res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, folio, fsdata);
else else
inode->i_size = AFFS_I(inode)->mmu_private; inode->i_size = AFFS_I(inode)->mmu_private;
mark_inode_dirty(inode); mark_inode_dirty(inode);

View File

@ -659,7 +659,7 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc
int bch2_write_begin(struct file *file, struct address_space *mapping, int bch2_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct bch_inode_info *inode = to_bch_ei(mapping->host); struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_fs *c = inode->v.i_sb->s_fs_info;
@ -728,12 +728,11 @@ out:
goto err; goto err;
} }
*pagep = &folio->page; *foliop = folio;
return 0; return 0;
err: err:
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
*pagep = NULL;
err_unlock: err_unlock:
bch2_pagecache_add_put(inode); bch2_pagecache_add_put(inode);
kfree(res); kfree(res);
@ -743,12 +742,11 @@ err_unlock:
int bch2_write_end(struct file *file, struct address_space *mapping, int bch2_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct bch_inode_info *inode = to_bch_ei(mapping->host); struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch2_folio_reservation *res = fsdata; struct bch2_folio_reservation *res = fsdata;
struct folio *folio = page_folio(page);
unsigned offset = pos - folio_pos(folio); unsigned offset = pos - folio_pos(folio);
lockdep_assert_held(&inode->v.i_rwsem); lockdep_assert_held(&inode->v.i_rwsem);

View File

@ -10,10 +10,10 @@ int bch2_read_folio(struct file *, struct folio *);
int bch2_writepages(struct address_space *, struct writeback_control *); int bch2_writepages(struct address_space *, struct writeback_control *);
void bch2_readahead(struct readahead_control *); void bch2_readahead(struct readahead_control *);
int bch2_write_begin(struct file *, struct address_space *, loff_t, int bch2_write_begin(struct file *, struct address_space *, loff_t pos,
unsigned, struct page **, void **); unsigned len, struct folio **, void **);
int bch2_write_end(struct file *, struct address_space *, loff_t, int bch2_write_end(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page *, void *); unsigned len, unsigned copied, struct folio *, void *);
ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *); ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *);

View File

@ -172,11 +172,11 @@ static void bfs_write_failed(struct address_space *mapping, loff_t to)
static int bfs_write_begin(struct file *file, struct address_space *mapping, static int bfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block); ret = block_write_begin(mapping, pos, len, foliop, bfs_get_block);
if (unlikely(ret)) if (unlikely(ret))
bfs_write_failed(mapping, pos + len); bfs_write_failed(mapping, pos + len);

View File

@ -2164,11 +2164,10 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
return err; return err;
} }
int __block_write_begin(struct page *page, loff_t pos, unsigned len, int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block) get_block_t *get_block)
{ {
return __block_write_begin_int(page_folio(page), pos, len, get_block, return __block_write_begin_int(folio, pos, len, get_block, NULL);
NULL);
} }
EXPORT_SYMBOL(__block_write_begin); EXPORT_SYMBOL(__block_write_begin);
@ -2218,33 +2217,33 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to)
* The filesystem needs to handle block truncation upon failure. * The filesystem needs to handle block truncation upon failure.
*/ */
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
struct page **pagep, get_block_t *get_block) struct folio **foliop, get_block_t *get_block)
{ {
pgoff_t index = pos >> PAGE_SHIFT; pgoff_t index = pos >> PAGE_SHIFT;
struct page *page; struct folio *folio;
int status; int status;
page = grab_cache_page_write_begin(mapping, index); folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
if (!page) mapping_gfp_mask(mapping));
return -ENOMEM; if (IS_ERR(folio))
return PTR_ERR(folio);
status = __block_write_begin(page, pos, len, get_block); status = __block_write_begin_int(folio, pos, len, get_block, NULL);
if (unlikely(status)) { if (unlikely(status)) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
page = NULL; folio = NULL;
} }
*pagep = page; *foliop = folio;
return status; return status;
} }
EXPORT_SYMBOL(block_write_begin); EXPORT_SYMBOL(block_write_begin);
int block_write_end(struct file *file, struct address_space *mapping, int block_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(page);
size_t start = pos - folio_pos(folio); size_t start = pos - folio_pos(folio);
if (unlikely(copied < len)) { if (unlikely(copied < len)) {
@ -2276,19 +2275,19 @@ EXPORT_SYMBOL(block_write_end);
int generic_write_end(struct file *file, struct address_space *mapping, int generic_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
loff_t old_size = inode->i_size; loff_t old_size = inode->i_size;
bool i_size_changed = false; bool i_size_changed = false;
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
/* /*
* No need to use i_size_read() here, the i_size cannot change under us * No need to use i_size_read() here, the i_size cannot change under us
* because we hold i_rwsem. * because we hold i_rwsem.
* *
* But it's important to update i_size while still holding page lock: * But it's important to update i_size while still holding folio lock:
* page writeout could otherwise come in and zero beyond i_size. * page writeout could otherwise come in and zero beyond i_size.
*/ */
if (pos + copied > inode->i_size) { if (pos + copied > inode->i_size) {
@ -2296,8 +2295,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
i_size_changed = true; i_size_changed = true;
} }
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
if (old_size < pos) if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos); pagecache_isize_extended(inode, old_size, pos);
@ -2463,7 +2462,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
{ {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops; const struct address_space_operations *aops = mapping->a_ops;
struct page *page; struct folio *folio;
void *fsdata = NULL; void *fsdata = NULL;
int err; int err;
@ -2471,11 +2470,11 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
if (err) if (err)
goto out; goto out;
err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata); err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
if (err) if (err)
goto out; goto out;
err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata); err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
BUG_ON(err > 0); BUG_ON(err > 0);
out: out:
@ -2489,7 +2488,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
const struct address_space_operations *aops = mapping->a_ops; const struct address_space_operations *aops = mapping->a_ops;
unsigned int blocksize = i_blocksize(inode); unsigned int blocksize = i_blocksize(inode);
struct page *page; struct folio *folio;
void *fsdata = NULL; void *fsdata = NULL;
pgoff_t index, curidx; pgoff_t index, curidx;
loff_t curpos; loff_t curpos;
@ -2508,12 +2507,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
len = PAGE_SIZE - zerofrom; len = PAGE_SIZE - zerofrom;
err = aops->write_begin(file, mapping, curpos, len, err = aops->write_begin(file, mapping, curpos, len,
&page, &fsdata); &folio, &fsdata);
if (err) if (err)
goto out; goto out;
zero_user(page, zerofrom, len); folio_zero_range(folio, offset_in_folio(folio, curpos), len);
err = aops->write_end(file, mapping, curpos, len, len, err = aops->write_end(file, mapping, curpos, len, len,
page, fsdata); folio, fsdata);
if (err < 0) if (err < 0)
goto out; goto out;
BUG_ON(err != len); BUG_ON(err != len);
@ -2541,12 +2540,12 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
len = offset - zerofrom; len = offset - zerofrom;
err = aops->write_begin(file, mapping, curpos, len, err = aops->write_begin(file, mapping, curpos, len,
&page, &fsdata); &folio, &fsdata);
if (err) if (err)
goto out; goto out;
zero_user(page, zerofrom, len); folio_zero_range(folio, offset_in_folio(folio, curpos), len);
err = aops->write_end(file, mapping, curpos, len, len, err = aops->write_end(file, mapping, curpos, len, len,
page, fsdata); folio, fsdata);
if (err < 0) if (err < 0)
goto out; goto out;
BUG_ON(err != len); BUG_ON(err != len);
@ -2562,7 +2561,7 @@ out:
*/ */
int cont_write_begin(struct file *file, struct address_space *mapping, int cont_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata, struct folio **foliop, void **fsdata,
get_block_t *get_block, loff_t *bytes) get_block_t *get_block, loff_t *bytes)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
@ -2580,7 +2579,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
(*bytes)++; (*bytes)++;
} }
return block_write_begin(mapping, pos, len, pagep, get_block); return block_write_begin(mapping, pos, len, foliop, get_block);
} }
EXPORT_SYMBOL(cont_write_begin); EXPORT_SYMBOL(cont_write_begin);

View File

@ -1508,20 +1508,18 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
*/ */
static int ceph_write_begin(struct file *file, struct address_space *mapping, static int ceph_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
struct folio *folio = NULL;
int r; int r;
r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL); r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, foliop, NULL);
if (r < 0) if (r < 0)
return r; return r;
folio_wait_private_2(folio); /* [DEPRECATED] */ folio_wait_private_2(*foliop); /* [DEPRECATED] */
WARN_ON_ONCE(!folio_test_locked(folio)); WARN_ON_ONCE(!folio_test_locked(*foliop));
*pagep = &folio->page;
return 0; return 0;
} }
@ -1531,9 +1529,8 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
*/ */
static int ceph_write_end(struct file *file, struct address_space *mapping, static int ceph_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *subpage, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(subpage);
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct ceph_client *cl = ceph_inode_to_client(inode); struct ceph_client *cl = ceph_inode_to_client(inode);
bool check_cap = false; bool check_cap = false;

View File

@ -234,17 +234,17 @@ out:
/* /*
* Called with lower inode mutex held. * Called with lower inode mutex held.
*/ */
static int fill_zeros_to_end_of_page(struct page *page, unsigned int to) static int fill_zeros_to_end_of_page(struct folio *folio, unsigned int to)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = folio->mapping->host;
int end_byte_in_page; int end_byte_in_page;
if ((i_size_read(inode) / PAGE_SIZE) != page->index) if ((i_size_read(inode) / PAGE_SIZE) != folio->index)
goto out; goto out;
end_byte_in_page = i_size_read(inode) % PAGE_SIZE; end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
if (to > end_byte_in_page) if (to > end_byte_in_page)
end_byte_in_page = to; end_byte_in_page = to;
zero_user_segment(page, end_byte_in_page, PAGE_SIZE); folio_zero_segment(folio, end_byte_in_page, PAGE_SIZE);
out: out:
return 0; return 0;
} }
@ -255,7 +255,7 @@ out:
* @mapping: The eCryptfs object * @mapping: The eCryptfs object
* @pos: The file offset at which to start writing * @pos: The file offset at which to start writing
* @len: Length of the write * @len: Length of the write
* @pagep: Pointer to return the page * @foliop: Pointer to return the folio
* @fsdata: Pointer to return fs data (unused) * @fsdata: Pointer to return fs data (unused)
* *
* This function must zero any hole we create * This function must zero any hole we create
@ -265,38 +265,39 @@ out:
static int ecryptfs_write_begin(struct file *file, static int ecryptfs_write_begin(struct file *file,
struct address_space *mapping, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
pgoff_t index = pos >> PAGE_SHIFT; pgoff_t index = pos >> PAGE_SHIFT;
struct page *page; struct folio *folio;
loff_t prev_page_end_size; loff_t prev_page_end_size;
int rc = 0; int rc = 0;
page = grab_cache_page_write_begin(mapping, index); folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
if (!page) mapping_gfp_mask(mapping));
return -ENOMEM; if (IS_ERR(folio))
*pagep = page; return PTR_ERR(folio);
*foliop = folio;
prev_page_end_size = ((loff_t)index << PAGE_SHIFT); prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
struct ecryptfs_crypt_stat *crypt_stat = struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(mapping->host)->crypt_stat; &ecryptfs_inode_to_private(mapping->host)->crypt_stat;
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment( rc = ecryptfs_read_lower_page_segment(
page, index, 0, PAGE_SIZE, mapping->host); &folio->page, index, 0, PAGE_SIZE, mapping->host);
if (rc) { if (rc) {
printk(KERN_ERR "%s: Error attempting to read " printk(KERN_ERR "%s: Error attempting to read "
"lower page segment; rc = [%d]\n", "lower page segment; rc = [%d]\n",
__func__, rc); __func__, rc);
ClearPageUptodate(page); folio_clear_uptodate(folio);
goto out; goto out;
} else } else
SetPageUptodate(page); folio_mark_uptodate(folio);
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) { } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
rc = ecryptfs_copy_up_encrypted_with_header( rc = ecryptfs_copy_up_encrypted_with_header(
page, crypt_stat); &folio->page, crypt_stat);
if (rc) { if (rc) {
printk(KERN_ERR "%s: Error attempting " printk(KERN_ERR "%s: Error attempting "
"to copy the encrypted content " "to copy the encrypted content "
@ -304,46 +305,46 @@ static int ecryptfs_write_begin(struct file *file,
"inserting the metadata from " "inserting the metadata from "
"the xattr into the header; rc " "the xattr into the header; rc "
"= [%d]\n", __func__, rc); "= [%d]\n", __func__, rc);
ClearPageUptodate(page); folio_clear_uptodate(folio);
goto out; goto out;
} }
SetPageUptodate(page); folio_mark_uptodate(folio);
} else { } else {
rc = ecryptfs_read_lower_page_segment( rc = ecryptfs_read_lower_page_segment(
page, index, 0, PAGE_SIZE, &folio->page, index, 0, PAGE_SIZE,
mapping->host); mapping->host);
if (rc) { if (rc) {
printk(KERN_ERR "%s: Error reading " printk(KERN_ERR "%s: Error reading "
"page; rc = [%d]\n", "page; rc = [%d]\n",
__func__, rc); __func__, rc);
ClearPageUptodate(page); folio_clear_uptodate(folio);
goto out; goto out;
} }
SetPageUptodate(page); folio_mark_uptodate(folio);
} }
} else { } else {
if (prev_page_end_size if (prev_page_end_size
>= i_size_read(page->mapping->host)) { >= i_size_read(mapping->host)) {
zero_user(page, 0, PAGE_SIZE); folio_zero_range(folio, 0, PAGE_SIZE);
SetPageUptodate(page); folio_mark_uptodate(folio);
} else if (len < PAGE_SIZE) { } else if (len < PAGE_SIZE) {
rc = ecryptfs_decrypt_page(page); rc = ecryptfs_decrypt_page(&folio->page);
if (rc) { if (rc) {
printk(KERN_ERR "%s: Error decrypting " printk(KERN_ERR "%s: Error decrypting "
"page at index [%ld]; " "page at index [%ld]; "
"rc = [%d]\n", "rc = [%d]\n",
__func__, page->index, rc); __func__, folio->index, rc);
ClearPageUptodate(page); folio_clear_uptodate(folio);
goto out; goto out;
} }
SetPageUptodate(page); folio_mark_uptodate(folio);
} }
} }
} }
/* If creating a page or more of holes, zero them out via truncate. /* If creating a page or more of holes, zero them out via truncate.
* Note, this will increase i_size. */ * Note, this will increase i_size. */
if (index != 0) { if (index != 0) {
if (prev_page_end_size > i_size_read(page->mapping->host)) { if (prev_page_end_size > i_size_read(mapping->host)) {
rc = ecryptfs_truncate(file->f_path.dentry, rc = ecryptfs_truncate(file->f_path.dentry,
prev_page_end_size); prev_page_end_size);
if (rc) { if (rc) {
@ -359,12 +360,11 @@ static int ecryptfs_write_begin(struct file *file,
* of page? Zero it out. */ * of page? Zero it out. */
if ((i_size_read(mapping->host) == prev_page_end_size) if ((i_size_read(mapping->host) == prev_page_end_size)
&& (pos != 0)) && (pos != 0))
zero_user(page, 0, PAGE_SIZE); folio_zero_range(folio, 0, PAGE_SIZE);
out: out:
if (unlikely(rc)) { if (unlikely(rc)) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
*pagep = NULL;
} }
return rc; return rc;
} }
@ -457,13 +457,13 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
* @pos: The file position * @pos: The file position
* @len: The length of the data (unused) * @len: The length of the data (unused)
* @copied: The amount of data copied * @copied: The amount of data copied
* @page: The eCryptfs page * @folio: The eCryptfs folio
* @fsdata: The fsdata (unused) * @fsdata: The fsdata (unused)
*/ */
static int ecryptfs_write_end(struct file *file, static int ecryptfs_write_end(struct file *file,
struct address_space *mapping, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
pgoff_t index = pos >> PAGE_SHIFT; pgoff_t index = pos >> PAGE_SHIFT;
unsigned from = pos & (PAGE_SIZE - 1); unsigned from = pos & (PAGE_SIZE - 1);
@ -476,8 +476,8 @@ static int ecryptfs_write_end(struct file *file,
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to); "(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0, rc = ecryptfs_write_lower_page_segment(ecryptfs_inode,
to); &folio->page, 0, to);
if (!rc) { if (!rc) {
rc = copied; rc = copied;
fsstack_copy_inode_size(ecryptfs_inode, fsstack_copy_inode_size(ecryptfs_inode,
@ -485,21 +485,21 @@ static int ecryptfs_write_end(struct file *file,
} }
goto out; goto out;
} }
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
if (copied < PAGE_SIZE) { if (copied < PAGE_SIZE) {
rc = 0; rc = 0;
goto out; goto out;
} }
SetPageUptodate(page); folio_mark_uptodate(folio);
} }
/* Fills in zeros if 'to' goes beyond inode size */ /* Fills in zeros if 'to' goes beyond inode size */
rc = fill_zeros_to_end_of_page(page, to); rc = fill_zeros_to_end_of_page(folio, to);
if (rc) { if (rc) {
ecryptfs_printk(KERN_WARNING, "Error attempting to fill " ecryptfs_printk(KERN_WARNING, "Error attempting to fill "
"zeros in page with index = [0x%.16lx]\n", index); "zeros in page with index = [0x%.16lx]\n", index);
goto out; goto out;
} }
rc = ecryptfs_encrypt_page(page); rc = ecryptfs_encrypt_page(&folio->page);
if (rc) { if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper " ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
"index [0x%.16lx])\n", index); "index [0x%.16lx])\n", index);
@ -518,8 +518,8 @@ static int ecryptfs_write_end(struct file *file,
else else
rc = copied; rc = copied;
out: out:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return rc; return rc;
} }

View File

@ -535,20 +535,20 @@ static int exfat_file_zeroed_range(struct file *file, loff_t start, loff_t end)
while (start < end) { while (start < end) {
u32 zerofrom, len; u32 zerofrom, len;
struct page *page = NULL; struct folio *folio;
zerofrom = start & (PAGE_SIZE - 1); zerofrom = start & (PAGE_SIZE - 1);
len = PAGE_SIZE - zerofrom; len = PAGE_SIZE - zerofrom;
if (start + len > end) if (start + len > end)
len = end - start; len = end - start;
err = ops->write_begin(file, mapping, start, len, &page, NULL); err = ops->write_begin(file, mapping, start, len, &folio, NULL);
if (err) if (err)
goto out; goto out;
zero_user_segment(page, zerofrom, zerofrom + len); folio_zero_range(folio, offset_in_folio(folio, start), len);
err = ops->write_end(file, mapping, start, len, len, page, NULL); err = ops->write_end(file, mapping, start, len, len, folio, NULL);
if (err < 0) if (err < 0)
goto out; goto out;
start += len; start += len;

View File

@ -448,12 +448,11 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
static int exfat_write_begin(struct file *file, struct address_space *mapping, static int exfat_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, loff_t pos, unsigned int len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
*pagep = NULL; ret = block_write_begin(mapping, pos, len, foliop, exfat_get_block);
ret = block_write_begin(mapping, pos, len, pagep, exfat_get_block);
if (ret < 0) if (ret < 0)
exfat_write_failed(mapping, pos+len); exfat_write_failed(mapping, pos+len);
@ -463,13 +462,13 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping,
static int exfat_write_end(struct file *file, struct address_space *mapping, static int exfat_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied, loff_t pos, unsigned int len, unsigned int copied,
struct page *pagep, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct exfat_inode_info *ei = EXFAT_I(inode); struct exfat_inode_info *ei = EXFAT_I(inode);
int err; int err;
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (ei->i_size_aligned < i_size_read(inode)) { if (ei->i_size_aligned < i_size_read(inode)) {
exfat_fs_error(inode->i_sb, exfat_fs_error(inode->i_sb,

View File

@ -87,7 +87,7 @@ static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
struct inode *dir = mapping->host; struct inode *dir = mapping->host;
inode_inc_iversion(dir); inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL); block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) { if (pos+len > dir->i_size) {
i_size_write(dir, pos+len); i_size_write(dir, pos+len);
@ -434,7 +434,7 @@ int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino)
static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len) static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{ {
return __block_write_begin(&folio->page, pos, len, ext2_get_block); return __block_write_begin(folio, pos, len, ext2_get_block);
} }
static int ext2_handle_dirsync(struct inode *dir) static int ext2_handle_dirsync(struct inode *dir)

View File

@ -916,11 +916,11 @@ static void ext2_readahead(struct readahead_control *rac)
static int static int
ext2_write_begin(struct file *file, struct address_space *mapping, ext2_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata) loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
ret = block_write_begin(mapping, pos, len, pagep, ext2_get_block); ret = block_write_begin(mapping, pos, len, foliop, ext2_get_block);
if (ret < 0) if (ret < 0)
ext2_write_failed(mapping, pos + len); ext2_write_failed(mapping, pos + len);
return ret; return ret;
@ -928,11 +928,11 @@ ext2_write_begin(struct file *file, struct address_space *mapping,
static int ext2_write_end(struct file *file, struct address_space *mapping, static int ext2_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
int ret; int ret;
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (ret < len) if (ret < len)
ext2_write_failed(mapping, pos + len); ext2_write_failed(mapping, pos + len);
return ret; return ret;

View File

@ -3563,13 +3563,13 @@ int ext4_readpage_inline(struct inode *inode, struct folio *folio);
extern int ext4_try_to_write_inline_data(struct address_space *mapping, extern int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode, struct inode *inode,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep); struct folio **foliop);
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
unsigned copied, struct folio *folio); unsigned copied, struct folio *folio);
extern int ext4_da_write_inline_data_begin(struct address_space *mapping, extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct inode *inode, struct inode *inode,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, struct folio **foliop,
void **fsdata); void **fsdata);
extern int ext4_try_add_inline_entry(handle_t *handle, extern int ext4_try_add_inline_entry(handle_t *handle,
struct ext4_filename *fname, struct ext4_filename *fname,

View File

@ -601,10 +601,10 @@ retry:
goto out; goto out;
if (ext4_should_dioread_nolock(inode)) { if (ext4_should_dioread_nolock(inode)) {
ret = __block_write_begin(&folio->page, from, to, ret = __block_write_begin(folio, from, to,
ext4_get_block_unwritten); ext4_get_block_unwritten);
} else } else
ret = __block_write_begin(&folio->page, from, to, ext4_get_block); ret = __block_write_begin(folio, from, to, ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) { if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, inode, ret = ext4_walk_page_buffers(handle, inode,
@ -660,7 +660,7 @@ out_nofolio:
int ext4_try_to_write_inline_data(struct address_space *mapping, int ext4_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode, struct inode *inode,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep) struct folio **foliop)
{ {
int ret; int ret;
handle_t *handle; handle_t *handle;
@ -708,7 +708,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
goto out; goto out;
} }
*pagep = &folio->page; *foliop = folio;
down_read(&EXT4_I(inode)->xattr_sem); down_read(&EXT4_I(inode)->xattr_sem);
if (!ext4_has_inline_data(inode)) { if (!ext4_has_inline_data(inode)) {
ret = 0; ret = 0;
@ -856,7 +856,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
goto out; goto out;
} }
ret = __block_write_begin(&folio->page, 0, inline_size, ret = __block_write_begin(folio, 0, inline_size,
ext4_da_get_block_prep); ext4_da_get_block_prep);
if (ret) { if (ret) {
up_read(&EXT4_I(inode)->xattr_sem); up_read(&EXT4_I(inode)->xattr_sem);
@ -891,7 +891,7 @@ out:
int ext4_da_write_inline_data_begin(struct address_space *mapping, int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct inode *inode, struct inode *inode,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, struct folio **foliop,
void **fsdata) void **fsdata)
{ {
int ret; int ret;
@ -954,7 +954,7 @@ retry_journal:
goto out_release_page; goto out_release_page;
up_read(&EXT4_I(inode)->xattr_sem); up_read(&EXT4_I(inode)->xattr_sem);
*pagep = &folio->page; *foliop = folio;
brelse(iloc.bh); brelse(iloc.bh);
return 1; return 1;
out_release_page: out_release_page:

View File

@ -1145,7 +1145,7 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
*/ */
static int ext4_write_begin(struct file *file, struct address_space *mapping, static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
int ret, needed_blocks; int ret, needed_blocks;
@ -1170,7 +1170,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
pagep); foliop);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret == 1) if (ret == 1)
@ -1224,10 +1224,10 @@ retry_journal:
ret = ext4_block_write_begin(folio, pos, len, ext4_get_block); ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
#else #else
if (ext4_should_dioread_nolock(inode)) if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(&folio->page, pos, len, ret = __block_write_begin(folio, pos, len,
ext4_get_block_unwritten); ext4_get_block_unwritten);
else else
ret = __block_write_begin(&folio->page, pos, len, ext4_get_block); ret = __block_write_begin(folio, pos, len, ext4_get_block);
#endif #endif
if (!ret && ext4_should_journal_data(inode)) { if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, inode, ret = ext4_walk_page_buffers(handle, inode,
@ -1270,7 +1270,7 @@ retry_journal:
folio_put(folio); folio_put(folio);
return ret; return ret;
} }
*pagep = &folio->page; *foliop = folio;
return ret; return ret;
} }
@ -1298,9 +1298,8 @@ static int write_end_fn(handle_t *handle, struct inode *inode,
static int ext4_write_end(struct file *file, static int ext4_write_end(struct file *file,
struct address_space *mapping, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(page);
handle_t *handle = ext4_journal_current_handle(); handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
loff_t old_size = inode->i_size; loff_t old_size = inode->i_size;
@ -1315,7 +1314,7 @@ static int ext4_write_end(struct file *file,
return ext4_write_inline_data_end(inode, pos, len, copied, return ext4_write_inline_data_end(inode, pos, len, copied,
folio); folio);
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
/* /*
* it's important to update i_size while still holding folio lock: * it's important to update i_size while still holding folio lock:
* page writeout could otherwise come in and zero beyond i_size. * page writeout could otherwise come in and zero beyond i_size.
@ -1402,9 +1401,8 @@ static void ext4_journalled_zero_new_buffers(handle_t *handle,
static int ext4_journalled_write_end(struct file *file, static int ext4_journalled_write_end(struct file *file,
struct address_space *mapping, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(page);
handle_t *handle = ext4_journal_current_handle(); handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
loff_t old_size = inode->i_size; loff_t old_size = inode->i_size;
@ -2926,7 +2924,7 @@ static int ext4_nonda_switch(struct super_block *sb)
static int ext4_da_write_begin(struct file *file, struct address_space *mapping, static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret, retries = 0; int ret, retries = 0;
struct folio *folio; struct folio *folio;
@ -2941,14 +2939,14 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) { if (ext4_nonda_switch(inode->i_sb) || ext4_verity_in_progress(inode)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC; *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos, return ext4_write_begin(file, mapping, pos,
len, pagep, fsdata); len, foliop, fsdata);
} }
*fsdata = (void *)0; *fsdata = (void *)0;
trace_ext4_da_write_begin(inode, pos, len); trace_ext4_da_write_begin(inode, pos, len);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len, ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len,
pagep, fsdata); foliop, fsdata);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret == 1) if (ret == 1)
@ -2964,7 +2962,7 @@ retry:
#ifdef CONFIG_FS_ENCRYPTION #ifdef CONFIG_FS_ENCRYPTION
ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep); ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
#else #else
ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep); ret = __block_write_begin(folio, pos, len, ext4_da_get_block_prep);
#endif #endif
if (ret < 0) { if (ret < 0) {
folio_unlock(folio); folio_unlock(folio);
@ -2983,7 +2981,7 @@ retry:
return ret; return ret;
} }
*pagep = &folio->page; *foliop = folio;
return ret; return ret;
} }
@ -3029,7 +3027,7 @@ static int ext4_da_do_write_end(struct address_space *mapping,
* flag, which all that's needed to trigger page writeback. * flag, which all that's needed to trigger page writeback.
*/ */
copied = block_write_end(NULL, mapping, pos, len, copied, copied = block_write_end(NULL, mapping, pos, len, copied,
&folio->page, NULL); folio, NULL);
new_i_size = pos + copied; new_i_size = pos + copied;
/* /*
@ -3080,15 +3078,14 @@ static int ext4_da_do_write_end(struct address_space *mapping,
static int ext4_da_write_end(struct file *file, static int ext4_da_write_end(struct file *file,
struct address_space *mapping, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
int write_mode = (int)(unsigned long)fsdata; int write_mode = (int)(unsigned long)fsdata;
struct folio *folio = page_folio(page);
if (write_mode == FALL_BACK_TO_NONDELALLOC) if (write_mode == FALL_BACK_TO_NONDELALLOC)
return ext4_write_end(file, mapping, pos, return ext4_write_end(file, mapping, pos,
len, copied, &folio->page, fsdata); len, copied, folio, fsdata);
trace_ext4_da_write_end(inode, pos, len, copied); trace_ext4_da_write_end(inode, pos, len, copied);
@ -6219,7 +6216,7 @@ retry_alloc:
if (folio_pos(folio) + len > size) if (folio_pos(folio) + len > size)
len = size - folio_pos(folio); len = size - folio_pos(folio);
err = __block_write_begin(&folio->page, 0, len, ext4_get_block); err = __block_write_begin(folio, 0, len, ext4_get_block);
if (!err) { if (!err) {
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
if (ext4_journal_folio_buffers(handle, folio, len)) if (ext4_journal_folio_buffers(handle, folio, len))

View File

@ -76,17 +76,17 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
while (count) { while (count) {
size_t n = min_t(size_t, count, size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos)); PAGE_SIZE - offset_in_page(pos));
struct page *page; struct folio *folio;
void *fsdata = NULL; void *fsdata = NULL;
int res; int res;
res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata); res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
if (res) if (res)
return res; return res;
memcpy_to_page(page, offset_in_page(pos), buf, n); memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n);
res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata); res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
if (res < 0) if (res < 0)
return res; return res;
if (res != n) if (res != n)

View File

@ -3552,12 +3552,12 @@ reserve_block:
} }
static int f2fs_write_begin(struct file *file, struct address_space *mapping, static int f2fs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata) loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL; struct folio *folio;
pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT; pgoff_t index = pos >> PAGE_SHIFT;
bool need_balance = false; bool need_balance = false;
bool use_cow = false; bool use_cow = false;
block_t blkaddr = NULL_ADDR; block_t blkaddr = NULL_ADDR;
@ -3573,7 +3573,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
/* /*
* We should check this at this moment to avoid deadlock on inode page * We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be: * and #0 page. The locking rule for inline_data conversion should be:
* lock_page(page #0) -> lock_page(inode_page) * folio_lock(folio #0) -> folio_lock(inode_page)
*/ */
if (index != 0) { if (index != 0) {
err = f2fs_convert_inline_inode(inode); err = f2fs_convert_inline_inode(inode);
@ -3584,18 +3584,20 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
#ifdef CONFIG_F2FS_FS_COMPRESSION #ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) { if (f2fs_compressed_file(inode)) {
int ret; int ret;
struct page *page;
*fsdata = NULL; *fsdata = NULL;
if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode))) if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
goto repeat; goto repeat;
ret = f2fs_prepare_compress_overwrite(inode, pagep, ret = f2fs_prepare_compress_overwrite(inode, &page,
index, fsdata); index, fsdata);
if (ret < 0) { if (ret < 0) {
err = ret; err = ret;
goto fail; goto fail;
} else if (ret) { } else if (ret) {
*foliop = page_folio(page);
return 0; return 0;
} }
} }
@ -3603,81 +3605,85 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
repeat: repeat:
/* /*
* Do not use grab_cache_page_write_begin() to avoid deadlock due to * Do not use FGP_STABLE to avoid deadlock.
* wait_for_stable_page. Will wait that below with our IO control. * Will wait that below with our IO control.
*/ */
page = f2fs_pagecache_get_page(mapping, index, folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS); FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
if (!page) { if (IS_ERR(folio)) {
err = -ENOMEM; err = PTR_ERR(folio);
goto fail; goto fail;
} }
/* TODO: cluster can be compressed due to race with .writepage */ /* TODO: cluster can be compressed due to race with .writepage */
*pagep = page; *foliop = folio;
if (f2fs_is_atomic_file(inode)) if (f2fs_is_atomic_file(inode))
err = prepare_atomic_write_begin(sbi, page, pos, len, err = prepare_atomic_write_begin(sbi, &folio->page, pos, len,
&blkaddr, &need_balance, &use_cow); &blkaddr, &need_balance, &use_cow);
else else
err = prepare_write_begin(sbi, page, pos, len, err = prepare_write_begin(sbi, &folio->page, pos, len,
&blkaddr, &need_balance); &blkaddr, &need_balance);
if (err) if (err)
goto fail; goto put_folio;
if (need_balance && !IS_NOQUOTA(inode) && if (need_balance && !IS_NOQUOTA(inode) &&
has_not_enough_free_secs(sbi, 0, 0)) { has_not_enough_free_secs(sbi, 0, 0)) {
unlock_page(page); folio_unlock(folio);
f2fs_balance_fs(sbi, true); f2fs_balance_fs(sbi, true);
lock_page(page); folio_lock(folio);
if (page->mapping != mapping) { if (folio->mapping != mapping) {
/* The page got truncated from under us */ /* The folio got truncated from under us */
f2fs_put_page(page, 1); folio_unlock(folio);
folio_put(folio);
goto repeat; goto repeat;
} }
} }
f2fs_wait_on_page_writeback(page, DATA, false, true); f2fs_wait_on_page_writeback(&folio->page, DATA, false, true);
if (len == PAGE_SIZE || PageUptodate(page)) if (len == folio_size(folio) || folio_test_uptodate(folio))
return 0; return 0;
if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) && if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) { !f2fs_verity_in_progress(inode)) {
zero_user_segment(page, len, PAGE_SIZE); folio_zero_segment(folio, len, PAGE_SIZE);
return 0; return 0;
} }
if (blkaddr == NEW_ADDR) { if (blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE); folio_zero_segment(folio, 0, folio_size(folio));
SetPageUptodate(page); folio_mark_uptodate(folio);
} else { } else {
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
DATA_GENERIC_ENHANCE_READ)) { DATA_GENERIC_ENHANCE_READ)) {
err = -EFSCORRUPTED; err = -EFSCORRUPTED;
goto fail; goto put_folio;
} }
err = f2fs_submit_page_read(use_cow ? err = f2fs_submit_page_read(use_cow ?
F2FS_I(inode)->cow_inode : inode, page, F2FS_I(inode)->cow_inode : inode, &folio->page,
blkaddr, 0, true); blkaddr, 0, true);
if (err) if (err)
goto fail; goto put_folio;
lock_page(page); folio_lock(folio);
if (unlikely(page->mapping != mapping)) { if (unlikely(folio->mapping != mapping)) {
f2fs_put_page(page, 1); folio_unlock(folio);
folio_put(folio);
goto repeat; goto repeat;
} }
if (unlikely(!PageUptodate(page))) { if (unlikely(!folio_test_uptodate(folio))) {
err = -EIO; err = -EIO;
goto fail; goto put_folio;
} }
} }
return 0; return 0;
put_folio:
folio_unlock(folio);
folio_put(folio);
fail: fail:
f2fs_put_page(page, 1);
f2fs_write_failed(inode, pos + len); f2fs_write_failed(inode, pos + len);
return err; return err;
} }
@ -3685,9 +3691,9 @@ fail:
static int f2fs_write_end(struct file *file, static int f2fs_write_end(struct file *file,
struct address_space *mapping, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = folio->mapping->host;
trace_f2fs_write_end(inode, pos, len, copied); trace_f2fs_write_end(inode, pos, len, copied);
@ -3696,17 +3702,17 @@ static int f2fs_write_end(struct file *file,
* should be PAGE_SIZE. Otherwise, we treat it with zero copied and * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
* let generic_perform_write() try to copy data again through copied=0. * let generic_perform_write() try to copy data again through copied=0.
*/ */
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
if (unlikely(copied != len)) if (unlikely(copied != len))
copied = 0; copied = 0;
else else
SetPageUptodate(page); folio_mark_uptodate(folio);
} }
#ifdef CONFIG_F2FS_FS_COMPRESSION #ifdef CONFIG_F2FS_FS_COMPRESSION
/* overwrite compressed file */ /* overwrite compressed file */
if (f2fs_compressed_file(inode) && fsdata) { if (f2fs_compressed_file(inode) && fsdata) {
f2fs_compress_write_end(inode, fsdata, page->index, copied); f2fs_compress_write_end(inode, fsdata, folio->index, copied);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
if (pos + copied > i_size_read(inode) && if (pos + copied > i_size_read(inode) &&
@ -3719,7 +3725,7 @@ static int f2fs_write_end(struct file *file,
if (!copied) if (!copied)
goto unlock_out; goto unlock_out;
set_page_dirty(page); folio_mark_dirty(folio);
if (pos + copied > i_size_read(inode) && if (pos + copied > i_size_read(inode) &&
!f2fs_verity_in_progress(inode)) { !f2fs_verity_in_progress(inode)) {
@ -3729,7 +3735,8 @@ static int f2fs_write_end(struct file *file,
pos + copied); pos + copied);
} }
unlock_out: unlock_out:
f2fs_put_page(page, 1); folio_unlock(folio);
folio_put(folio);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied; return copied;
} }

View File

@ -2677,7 +2677,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
const struct address_space_operations *a_ops = mapping->a_ops; const struct address_space_operations *a_ops = mapping->a_ops;
int offset = off & (sb->s_blocksize - 1); int offset = off & (sb->s_blocksize - 1);
size_t towrite = len; size_t towrite = len;
struct page *page; struct folio *folio;
void *fsdata = NULL; void *fsdata = NULL;
int err = 0; int err = 0;
int tocopy; int tocopy;
@ -2687,7 +2687,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
towrite); towrite);
retry: retry:
err = a_ops->write_begin(NULL, mapping, off, tocopy, err = a_ops->write_begin(NULL, mapping, off, tocopy,
&page, &fsdata); &folio, &fsdata);
if (unlikely(err)) { if (unlikely(err)) {
if (err == -ENOMEM) { if (err == -ENOMEM) {
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
@ -2697,10 +2697,10 @@ retry:
break; break;
} }
memcpy_to_page(page, offset, data, tocopy); memcpy_to_folio(folio, offset_in_folio(folio, off), data, tocopy);
a_ops->write_end(NULL, mapping, off, tocopy, tocopy, a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
page, fsdata); folio, fsdata);
offset = 0; offset = 0;
towrite -= tocopy; towrite -= tocopy;
off += tocopy; off += tocopy;

View File

@ -80,17 +80,17 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
while (count) { while (count) {
size_t n = min_t(size_t, count, size_t n = min_t(size_t, count,
PAGE_SIZE - offset_in_page(pos)); PAGE_SIZE - offset_in_page(pos));
struct page *page; struct folio *folio;
void *fsdata = NULL; void *fsdata = NULL;
int res; int res;
res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata); res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
if (res) if (res)
return res; return res;
memcpy_to_page(page, offset_in_page(pos), buf, n); memcpy_to_folio(folio, offset_in_folio(folio, pos), buf, n);
res = aops->write_end(NULL, mapping, pos, n, n, page, fsdata); res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
if (res < 0) if (res < 0)
return res; return res;
if (res != n) if (res != n)

View File

@ -221,13 +221,12 @@ static void fat_write_failed(struct address_space *mapping, loff_t to)
static int fat_write_begin(struct file *file, struct address_space *mapping, static int fat_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int err; int err;
*pagep = NULL;
err = cont_write_begin(file, mapping, pos, len, err = cont_write_begin(file, mapping, pos, len,
pagep, fsdata, fat_get_block, foliop, fsdata, fat_get_block,
&MSDOS_I(mapping->host)->mmu_private); &MSDOS_I(mapping->host)->mmu_private);
if (err < 0) if (err < 0)
fat_write_failed(mapping, pos + len); fat_write_failed(mapping, pos + len);
@ -236,11 +235,11 @@ static int fat_write_begin(struct file *file, struct address_space *mapping,
static int fat_write_end(struct file *file, struct address_space *mapping, static int fat_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *pagep, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
int err; int err;
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (err < len) if (err < len)
fat_write_failed(mapping, pos + len); fat_write_failed(mapping, pos + len);
if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) { if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {

View File

@ -2393,76 +2393,77 @@ out:
* but how to implement it without killing performance need more thinking. * but how to implement it without killing performance need more thinking.
*/ */
static int fuse_write_begin(struct file *file, struct address_space *mapping, static int fuse_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata) loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{ {
pgoff_t index = pos >> PAGE_SHIFT; pgoff_t index = pos >> PAGE_SHIFT;
struct fuse_conn *fc = get_fuse_conn(file_inode(file)); struct fuse_conn *fc = get_fuse_conn(file_inode(file));
struct page *page; struct folio *folio;
loff_t fsize; loff_t fsize;
int err = -ENOMEM; int err = -ENOMEM;
WARN_ON(!fc->writeback_cache); WARN_ON(!fc->writeback_cache);
page = grab_cache_page_write_begin(mapping, index); folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
if (!page) mapping_gfp_mask(mapping));
if (IS_ERR(folio))
goto error; goto error;
fuse_wait_on_page_writeback(mapping->host, page->index); fuse_wait_on_page_writeback(mapping->host, folio->index);
if (PageUptodate(page) || len == PAGE_SIZE) if (folio_test_uptodate(folio) || len >= folio_size(folio))
goto success; goto success;
/* /*
* Check if the start this page comes after the end of file, in which * Check if the start of this folio comes after the end of file,
* case the readpage can be optimized away. * in which case the readpage can be optimized away.
*/ */
fsize = i_size_read(mapping->host); fsize = i_size_read(mapping->host);
if (fsize <= (pos & PAGE_MASK)) { if (fsize <= folio_pos(folio)) {
size_t off = pos & ~PAGE_MASK; size_t off = offset_in_folio(folio, pos);
if (off) if (off)
zero_user_segment(page, 0, off); folio_zero_segment(folio, 0, off);
goto success; goto success;
} }
err = fuse_do_readpage(file, page); err = fuse_do_readpage(file, &folio->page);
if (err) if (err)
goto cleanup; goto cleanup;
success: success:
*pagep = page; *foliop = folio;
return 0; return 0;
cleanup: cleanup:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
error: error:
return err; return err;
} }
static int fuse_write_end(struct file *file, struct address_space *mapping, static int fuse_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = folio->mapping->host;
/* Haven't copied anything? Skip zeroing, size extending, dirtying. */ /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
if (!copied) if (!copied)
goto unlock; goto unlock;
pos += copied; pos += copied;
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
/* Zero any unwritten bytes at the end of the page */ /* Zero any unwritten bytes at the end of the page */
size_t endoff = pos & ~PAGE_MASK; size_t endoff = pos & ~PAGE_MASK;
if (endoff) if (endoff)
zero_user_segment(page, endoff, PAGE_SIZE); folio_zero_segment(folio, endoff, PAGE_SIZE);
SetPageUptodate(page); folio_mark_uptodate(folio);
} }
if (pos > inode->i_size) if (pos > inode->i_size)
i_size_write(inode, pos); i_size_write(inode, pos);
set_page_dirty(page); folio_mark_dirty(folio);
unlock: unlock:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return copied; return copied;
} }

View File

@ -487,15 +487,15 @@ void hfs_file_truncate(struct inode *inode)
if (inode->i_size > HFS_I(inode)->phys_size) { if (inode->i_size > HFS_I(inode)->phys_size) {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
void *fsdata = NULL; void *fsdata = NULL;
struct page *page; struct folio *folio;
/* XXX: Can use generic_cont_expand? */ /* XXX: Can use generic_cont_expand? */
size = inode->i_size - 1; size = inode->i_size - 1;
res = hfs_write_begin(NULL, mapping, size + 1, 0, &page, res = hfs_write_begin(NULL, mapping, size + 1, 0, &folio,
&fsdata); &fsdata);
if (!res) { if (!res) {
res = generic_write_end(NULL, mapping, size + 1, 0, 0, res = generic_write_end(NULL, mapping, size + 1, 0, 0,
page, fsdata); folio, fsdata);
} }
if (res) if (res)
inode->i_size = HFS_I(inode)->phys_size; inode->i_size = HFS_I(inode)->phys_size;

View File

@ -202,7 +202,7 @@ extern const struct address_space_operations hfs_aops;
extern const struct address_space_operations hfs_btree_aops; extern const struct address_space_operations hfs_btree_aops;
int hfs_write_begin(struct file *file, struct address_space *mapping, int hfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata); loff_t pos, unsigned len, struct folio **foliop, void **fsdata);
extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t); extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *); extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
extern int hfs_write_inode(struct inode *, struct writeback_control *); extern int hfs_write_inode(struct inode *, struct writeback_control *);

View File

@ -45,12 +45,11 @@ static void hfs_write_failed(struct address_space *mapping, loff_t to)
} }
int hfs_write_begin(struct file *file, struct address_space *mapping, int hfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata) loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
*pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hfs_get_block, hfs_get_block,
&HFS_I(mapping->host)->phys_size); &HFS_I(mapping->host)->phys_size);
if (unlikely(ret)) if (unlikely(ret))

View File

@ -554,16 +554,16 @@ void hfsplus_file_truncate(struct inode *inode)
if (inode->i_size > hip->phys_size) { if (inode->i_size > hip->phys_size) {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct page *page; struct folio *folio;
void *fsdata = NULL; void *fsdata = NULL;
loff_t size = inode->i_size; loff_t size = inode->i_size;
res = hfsplus_write_begin(NULL, mapping, size, 0, res = hfsplus_write_begin(NULL, mapping, size, 0,
&page, &fsdata); &folio, &fsdata);
if (res) if (res)
return; return;
res = generic_write_end(NULL, mapping, size, 0, 0, res = generic_write_end(NULL, mapping, size, 0, 0,
page, fsdata); folio, fsdata);
if (res < 0) if (res < 0)
return; return;
mark_inode_dirty(inode); mark_inode_dirty(inode);

View File

@ -472,7 +472,7 @@ extern const struct address_space_operations hfsplus_btree_aops;
extern const struct dentry_operations hfsplus_dentry_operations; extern const struct dentry_operations hfsplus_dentry_operations;
int hfsplus_write_begin(struct file *file, struct address_space *mapping, int hfsplus_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata); loff_t pos, unsigned len, struct folio **foliop, void **fsdata);
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir, struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode); umode_t mode);
void hfsplus_delete_inode(struct inode *inode); void hfsplus_delete_inode(struct inode *inode);

View File

@ -39,12 +39,11 @@ static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
} }
int hfsplus_write_begin(struct file *file, struct address_space *mapping, int hfsplus_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, void **fsdata) loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
*pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hfsplus_get_block, hfsplus_get_block,
&HFSPLUS_I(mapping->host)->phys_size); &HFSPLUS_I(mapping->host)->phys_size);
if (unlikely(ret)) if (unlikely(ret))

View File

@ -465,31 +465,32 @@ static int hostfs_read_folio(struct file *file, struct folio *folio)
static int hostfs_write_begin(struct file *file, struct address_space *mapping, static int hostfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
pgoff_t index = pos >> PAGE_SHIFT; pgoff_t index = pos >> PAGE_SHIFT;
*pagep = grab_cache_page_write_begin(mapping, index); *foliop = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
if (!*pagep) mapping_gfp_mask(mapping));
if (!*foliop)
return -ENOMEM; return -ENOMEM;
return 0; return 0;
} }
static int hostfs_write_end(struct file *file, struct address_space *mapping, static int hostfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
void *buffer; void *buffer;
unsigned from = pos & (PAGE_SIZE - 1); size_t from = offset_in_folio(folio, pos);
int err; int err;
buffer = kmap_local_page(page); buffer = kmap_local_folio(folio, from);
err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer, copied);
kunmap_local(buffer); kunmap_local(buffer);
if (!PageUptodate(page) && err == PAGE_SIZE) if (!folio_test_uptodate(folio) && err == folio_size(folio))
SetPageUptodate(page); folio_mark_uptodate(folio);
/* /*
* If err > 0, write_file has added err to pos, so we are comparing * If err > 0, write_file has added err to pos, so we are comparing
@ -497,8 +498,8 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
*/ */
if (err > 0 && (pos > inode->i_size)) if (err > 0 && (pos > inode->i_size))
inode->i_size = pos; inode->i_size = pos;
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return err; return err;
} }

View File

@ -190,12 +190,11 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to)
static int hpfs_write_begin(struct file *file, struct address_space *mapping, static int hpfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
*pagep = NULL; ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
hpfs_get_block, hpfs_get_block,
&hpfs_i(mapping->host)->mmu_private); &hpfs_i(mapping->host)->mmu_private);
if (unlikely(ret)) if (unlikely(ret))
@ -206,11 +205,11 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping,
static int hpfs_write_end(struct file *file, struct address_space *mapping, static int hpfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *pagep, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
int err; int err;
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); err = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (err < len) if (err < len)
hpfs_write_failed(mapping, pos + len); hpfs_write_failed(mapping, pos + len);
if (!(err < 0)) { if (!(err < 0)) {

View File

@ -388,14 +388,14 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
static int hugetlbfs_write_begin(struct file *file, static int hugetlbfs_write_begin(struct file *file,
struct address_space *mapping, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
return -EINVAL; return -EINVAL;
} }
static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
BUG(); BUG();
return -EINVAL; return -EINVAL;

View File

@ -900,7 +900,7 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t bh_written; size_t bh_written;
bh_written = block_write_end(NULL, iter->inode->i_mapping, pos, bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
len, copied, &folio->page, NULL); len, copied, folio, NULL);
WARN_ON_ONCE(bh_written != copied && bh_written != 0); WARN_ON_ONCE(bh_written != copied && bh_written != 0);
return bh_written == copied; return bh_written == copied;
} }

View File

@ -23,10 +23,10 @@
static int jffs2_write_end(struct file *filp, struct address_space *mapping, static int jffs2_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *pg, void *fsdata); struct folio *folio, void *fsdata);
static int jffs2_write_begin(struct file *filp, struct address_space *mapping, static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata); struct folio **foliop, void **fsdata);
static int jffs2_read_folio(struct file *filp, struct folio *folio); static int jffs2_read_folio(struct file *filp, struct folio *folio);
int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
@ -77,29 +77,27 @@ const struct address_space_operations jffs2_file_address_operations =
.write_end = jffs2_write_end, .write_end = jffs2_write_end,
}; };
static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) static int jffs2_do_readpage_nolock(struct inode *inode, struct folio *folio)
{ {
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
unsigned char *pg_buf; unsigned char *kaddr;
int ret; int ret;
jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
__func__, inode->i_ino, pg->index << PAGE_SHIFT); __func__, inode->i_ino, folio->index << PAGE_SHIFT);
BUG_ON(!PageLocked(pg)); BUG_ON(!folio_test_locked(folio));
pg_buf = kmap(pg); kaddr = kmap_local_folio(folio, 0);
/* FIXME: Can kmap fail? */ ret = jffs2_read_inode_range(c, f, kaddr, folio->index << PAGE_SHIFT,
ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
PAGE_SIZE); PAGE_SIZE);
kunmap_local(kaddr);
if (!ret) if (!ret)
SetPageUptodate(pg); folio_mark_uptodate(folio);
flush_dcache_page(pg); flush_dcache_folio(folio);
kunmap(pg);
jffs2_dbg(2, "readpage finished\n"); jffs2_dbg(2, "readpage finished\n");
return ret; return ret;
@ -107,7 +105,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
int __jffs2_read_folio(struct file *file, struct folio *folio) int __jffs2_read_folio(struct file *file, struct folio *folio)
{ {
int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page); int ret = jffs2_do_readpage_nolock(folio->mapping->host, folio);
folio_unlock(folio); folio_unlock(folio);
return ret; return ret;
} }
@ -125,9 +123,9 @@ static int jffs2_read_folio(struct file *file, struct folio *folio)
static int jffs2_write_begin(struct file *filp, struct address_space *mapping, static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct page *pg; struct folio *folio;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
@ -206,29 +204,30 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
* page in read_cache_page(), which causes a deadlock. * page in read_cache_page(), which causes a deadlock.
*/ */
mutex_lock(&c->alloc_sem); mutex_lock(&c->alloc_sem);
pg = grab_cache_page_write_begin(mapping, index); folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
if (!pg) { mapping_gfp_mask(mapping));
ret = -ENOMEM; if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
goto release_sem; goto release_sem;
} }
*pagep = pg; *foliop = folio;
/* /*
* Read in the page if it wasn't already present. Cannot optimize away * Read in the folio if it wasn't already present. Cannot optimize away
* the whole page write case until jffs2_write_end can handle the * the whole folio write case until jffs2_write_end can handle the
* case of a short-copy. * case of a short-copy.
*/ */
if (!PageUptodate(pg)) { if (!folio_test_uptodate(folio)) {
mutex_lock(&f->sem); mutex_lock(&f->sem);
ret = jffs2_do_readpage_nolock(inode, pg); ret = jffs2_do_readpage_nolock(inode, folio);
mutex_unlock(&f->sem); mutex_unlock(&f->sem);
if (ret) { if (ret) {
unlock_page(pg); folio_unlock(folio);
put_page(pg); folio_put(folio);
goto release_sem; goto release_sem;
} }
} }
jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); jffs2_dbg(1, "end write_begin(). folio->flags %lx\n", folio->flags);
release_sem: release_sem:
mutex_unlock(&c->alloc_sem); mutex_unlock(&c->alloc_sem);
@ -238,7 +237,7 @@ out_err:
static int jffs2_write_end(struct file *filp, struct address_space *mapping, static int jffs2_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *pg, void *fsdata) struct folio *folio, void *fsdata)
{ {
/* Actually commit the write from the page cache page we're looking at. /* Actually commit the write from the page cache page we're looking at.
* For now, we write the full page out each time. It sucks, but it's simple * For now, we write the full page out each time. It sucks, but it's simple
@ -252,16 +251,17 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
unsigned aligned_start = start & ~3; unsigned aligned_start = start & ~3;
int ret = 0; int ret = 0;
uint32_t writtenlen = 0; uint32_t writtenlen = 0;
void *buf;
jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", jffs2_dbg(1, "%s(): ino #%lu, page at 0x%llx, range %d-%d, flags %lx\n",
__func__, inode->i_ino, pg->index << PAGE_SHIFT, __func__, inode->i_ino, folio_pos(folio),
start, end, pg->flags); start, end, folio->flags);
/* We need to avoid deadlock with page_cache_read() in /* We need to avoid deadlock with page_cache_read() in
jffs2_garbage_collect_pass(). So the page must be jffs2_garbage_collect_pass(). So the folio must be
up to date to prevent page_cache_read() from trying up to date to prevent page_cache_read() from trying
to re-lock it. */ to re-lock it. */
BUG_ON(!PageUptodate(pg)); BUG_ON(!folio_test_uptodate(folio));
if (end == PAGE_SIZE) { if (end == PAGE_SIZE) {
/* When writing out the end of a page, write out the /* When writing out the end of a page, write out the
@ -276,8 +276,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
if (!ri) { if (!ri) {
jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
__func__); __func__);
unlock_page(pg); folio_unlock(folio);
put_page(pg); folio_put(folio);
return -ENOMEM; return -ENOMEM;
} }
@ -289,15 +289,11 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
ri->isize = cpu_to_je32((uint32_t)inode->i_size); ri->isize = cpu_to_je32((uint32_t)inode->i_size);
ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW()); ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW());
/* In 2.4, it was already kmapped by generic_file_write(). Doesn't buf = kmap_local_folio(folio, aligned_start);
hurt to do it again. The alternative is ifdefs, which are ugly. */ ret = jffs2_write_inode_range(c, f, ri, buf,
kmap(pg); folio_pos(folio) + aligned_start,
ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
(pg->index << PAGE_SHIFT) + aligned_start,
end - aligned_start, &writtenlen); end - aligned_start, &writtenlen);
kunmap_local(buf);
kunmap(pg);
if (ret) if (ret)
mapping_set_error(mapping, ret); mapping_set_error(mapping, ret);
@ -323,12 +319,12 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
it gets reread */ it gets reread */
jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n", jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n",
__func__); __func__);
ClearPageUptodate(pg); folio_clear_uptodate(folio);
} }
jffs2_dbg(1, "%s() returning %d\n", jffs2_dbg(1, "%s() returning %d\n",
__func__, writtenlen > 0 ? writtenlen : ret); __func__, writtenlen > 0 ? writtenlen : ret);
unlock_page(pg); folio_unlock(folio);
put_page(pg); folio_put(folio);
return writtenlen > 0 ? writtenlen : ret; return writtenlen > 0 ? writtenlen : ret;
} }

View File

@ -1171,7 +1171,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
uint32_t alloclen, offset, orig_end, orig_start; uint32_t alloclen, offset, orig_end, orig_start;
int ret = 0; int ret = 0;
unsigned char *comprbuf = NULL, *writebuf; unsigned char *comprbuf = NULL, *writebuf;
struct page *page; struct folio *folio;
unsigned char *pg_ptr; unsigned char *pg_ptr;
memset(&ri, 0, sizeof(ri)); memset(&ri, 0, sizeof(ri));
@ -1317,25 +1317,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
BUG_ON(start > orig_start); BUG_ON(start > orig_start);
} }
/* The rules state that we must obtain the page lock *before* f->sem, so /* The rules state that we must obtain the folio lock *before* f->sem, so
* drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
* actually going to *change* so we're safe; we only allow reading. * actually going to *change* so we're safe; we only allow reading.
* *
* It is important to note that jffs2_write_begin() will ensure that its * It is important to note that jffs2_write_begin() will ensure that its
* page is marked Uptodate before allocating space. That means that if we * folio is marked uptodate before allocating space. That means that if we
* end up here trying to GC the *same* page that jffs2_write_begin() is * end up here trying to GC the *same* folio that jffs2_write_begin() is
* trying to write out, read_cache_page() will not deadlock. */ * trying to write out, read_cache_folio() will not deadlock. */
mutex_unlock(&f->sem); mutex_unlock(&f->sem);
page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT, folio = read_cache_folio(inode->i_mapping, start >> PAGE_SHIFT,
__jffs2_read_folio, NULL); __jffs2_read_folio, NULL);
if (IS_ERR(page)) { if (IS_ERR(folio)) {
pr_warn("read_cache_page() returned error: %ld\n", pr_warn("read_cache_folio() returned error: %ld\n",
PTR_ERR(page)); PTR_ERR(folio));
mutex_lock(&f->sem); mutex_lock(&f->sem);
return PTR_ERR(page); return PTR_ERR(folio);
} }
pg_ptr = kmap(page); pg_ptr = kmap_local_folio(folio, 0);
mutex_lock(&f->sem); mutex_lock(&f->sem);
offset = start; offset = start;
@ -1400,7 +1400,6 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
} }
} }
kunmap(page); folio_release_kmap(folio, pg_ptr);
put_page(page);
return ret; return ret;
} }

View File

@ -292,11 +292,11 @@ static void jfs_write_failed(struct address_space *mapping, loff_t to)
static int jfs_write_begin(struct file *file, struct address_space *mapping, static int jfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
ret = block_write_begin(mapping, pos, len, pagep, jfs_get_block); ret = block_write_begin(mapping, pos, len, foliop, jfs_get_block);
if (unlikely(ret)) if (unlikely(ret))
jfs_write_failed(mapping, pos + len); jfs_write_failed(mapping, pos + len);
@ -304,12 +304,12 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping,
} }
static int jfs_write_end(struct file *file, struct address_space *mapping, static int jfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, struct page *page, loff_t pos, unsigned len, unsigned copied, struct folio *folio,
void *fsdata) void *fsdata)
{ {
int ret; int ret;
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (ret < len) if (ret < len)
jfs_write_failed(mapping, pos + len); jfs_write_failed(mapping, pos + len);
return ret; return ret;

View File

@ -914,7 +914,7 @@ static int simple_read_folio(struct file *file, struct folio *folio)
int simple_write_begin(struct file *file, struct address_space *mapping, int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct folio *folio; struct folio *folio;
@ -923,7 +923,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
if (IS_ERR(folio)) if (IS_ERR(folio))
return PTR_ERR(folio); return PTR_ERR(folio);
*pagep = &folio->page; *foliop = folio;
if (!folio_test_uptodate(folio) && (len != folio_size(folio))) { if (!folio_test_uptodate(folio) && (len != folio_size(folio))) {
size_t from = offset_in_folio(folio, pos); size_t from = offset_in_folio(folio, pos);
@ -942,11 +942,11 @@ EXPORT_SYMBOL(simple_write_begin);
* @pos: " * @pos: "
* @len: " * @len: "
* @copied: " * @copied: "
* @page: " * @folio: "
* @fsdata: " * @fsdata: "
* *
* simple_write_end does the minimum needed for updating a page after writing is * simple_write_end does the minimum needed for updating a folio after
* done. It has the same API signature as the .write_end of * writing is done. It has the same API signature as the .write_end of
* address_space_operations vector. So it can just be set onto .write_end for * address_space_operations vector. So it can just be set onto .write_end for
* FSes that don't need any other processing. i_mutex is assumed to be held. * FSes that don't need any other processing. i_mutex is assumed to be held.
* Block based filesystems should use generic_write_end(). * Block based filesystems should use generic_write_end().
@ -959,9 +959,8 @@ EXPORT_SYMBOL(simple_write_begin);
*/ */
static int simple_write_end(struct file *file, struct address_space *mapping, static int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(page);
struct inode *inode = folio->mapping->host; struct inode *inode = folio->mapping->host;
loff_t last_pos = pos + copied; loff_t last_pos = pos + copied;

View File

@ -40,18 +40,18 @@ minix_last_byte(struct inode *inode, unsigned long page_nr)
return last_byte; return last_byte;
} }
static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len) static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host; struct inode *dir = mapping->host;
block_write_end(NULL, mapping, pos, len, len, page, NULL); block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) { if (pos+len > dir->i_size) {
i_size_write(dir, pos+len); i_size_write(dir, pos+len);
mark_inode_dirty(dir); mark_inode_dirty(dir);
} }
unlock_page(page); folio_unlock(folio);
} }
static int minix_handle_dirsync(struct inode *dir) static int minix_handle_dirsync(struct inode *dir)
@ -64,14 +64,15 @@ static int minix_handle_dirsync(struct inode *dir)
return err; return err;
} }
static void *dir_get_page(struct inode *dir, unsigned long n, struct page **p) static void *dir_get_folio(struct inode *dir, unsigned long n,
struct folio **foliop)
{ {
struct address_space *mapping = dir->i_mapping; struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
struct page *page = read_mapping_page(mapping, n, NULL);
if (IS_ERR(page)) if (IS_ERR(folio))
return ERR_CAST(page); return ERR_CAST(folio);
*p = page; *foliop = folio;
return kmap_local_page(page); return kmap_local_folio(folio, 0);
} }
static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
@ -99,9 +100,9 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
for ( ; n < npages; n++, offset = 0) { for ( ; n < npages; n++, offset = 0) {
char *p, *kaddr, *limit; char *p, *kaddr, *limit;
struct page *page; struct folio *folio;
kaddr = dir_get_page(inode, n, &page); kaddr = dir_get_folio(inode, n, &folio);
if (IS_ERR(kaddr)) if (IS_ERR(kaddr))
continue; continue;
p = kaddr+offset; p = kaddr+offset;
@ -122,13 +123,13 @@ static int minix_readdir(struct file *file, struct dir_context *ctx)
unsigned l = strnlen(name, sbi->s_namelen); unsigned l = strnlen(name, sbi->s_namelen);
if (!dir_emit(ctx, name, l, if (!dir_emit(ctx, name, l,
inumber, DT_UNKNOWN)) { inumber, DT_UNKNOWN)) {
unmap_and_put_page(page, p); folio_release_kmap(folio, p);
return 0; return 0;
} }
} }
ctx->pos += chunk_size; ctx->pos += chunk_size;
} }
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
} }
return 0; return 0;
} }
@ -144,12 +145,13 @@ static inline int namecompare(int len, int maxlen,
/* /*
* minix_find_entry() * minix_find_entry()
* *
* finds an entry in the specified directory with the wanted name. It * finds an entry in the specified directory with the wanted name.
* returns the cache buffer in which the entry was found, and the entry * It does NOT read the inode of the
* itself (as a parameter - res_dir). It does NOT read the inode of the
* entry - you'll have to do that yourself if you want to. * entry - you'll have to do that yourself if you want to.
*
* On Success folio_release_kmap() should be called on *foliop.
*/ */
minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page) minix_dirent *minix_find_entry(struct dentry *dentry, struct folio **foliop)
{ {
const char * name = dentry->d_name.name; const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len; int namelen = dentry->d_name.len;
@ -158,17 +160,15 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
struct minix_sb_info * sbi = minix_sb(sb); struct minix_sb_info * sbi = minix_sb(sb);
unsigned long n; unsigned long n;
unsigned long npages = dir_pages(dir); unsigned long npages = dir_pages(dir);
struct page *page = NULL;
char *p; char *p;
char *namx; char *namx;
__u32 inumber; __u32 inumber;
*res_page = NULL;
for (n = 0; n < npages; n++) { for (n = 0; n < npages; n++) {
char *kaddr, *limit; char *kaddr, *limit;
kaddr = dir_get_page(dir, n, &page); kaddr = dir_get_folio(dir, n, foliop);
if (IS_ERR(kaddr)) if (IS_ERR(kaddr))
continue; continue;
@ -188,12 +188,11 @@ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
if (namecompare(namelen, sbi->s_namelen, name, namx)) if (namecompare(namelen, sbi->s_namelen, name, namx))
goto found; goto found;
} }
unmap_and_put_page(page, kaddr); folio_release_kmap(*foliop, kaddr);
} }
return NULL; return NULL;
found: found:
*res_page = page;
return (minix_dirent *)p; return (minix_dirent *)p;
} }
@ -204,7 +203,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
int namelen = dentry->d_name.len; int namelen = dentry->d_name.len;
struct super_block * sb = dir->i_sb; struct super_block * sb = dir->i_sb;
struct minix_sb_info * sbi = minix_sb(sb); struct minix_sb_info * sbi = minix_sb(sb);
struct page *page = NULL; struct folio *folio = NULL;
unsigned long npages = dir_pages(dir); unsigned long npages = dir_pages(dir);
unsigned long n; unsigned long n;
char *kaddr, *p; char *kaddr, *p;
@ -223,10 +222,10 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
for (n = 0; n <= npages; n++) { for (n = 0; n <= npages; n++) {
char *limit, *dir_end; char *limit, *dir_end;
kaddr = dir_get_page(dir, n, &page); kaddr = dir_get_folio(dir, n, &folio);
if (IS_ERR(kaddr)) if (IS_ERR(kaddr))
return PTR_ERR(kaddr); return PTR_ERR(kaddr);
lock_page(page); folio_lock(folio);
dir_end = kaddr + minix_last_byte(dir, n); dir_end = kaddr + minix_last_byte(dir, n);
limit = kaddr + PAGE_SIZE - sbi->s_dirsize; limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
@ -253,15 +252,15 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
if (namecompare(namelen, sbi->s_namelen, name, namx)) if (namecompare(namelen, sbi->s_namelen, name, namx))
goto out_unlock; goto out_unlock;
} }
unlock_page(page); folio_unlock(folio);
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
} }
BUG(); BUG();
return -EINVAL; return -EINVAL;
got_it: got_it:
pos = page_offset(page) + offset_in_page(p); pos = folio_pos(folio) + offset_in_folio(folio, p);
err = minix_prepare_chunk(page, pos, sbi->s_dirsize); err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
if (err) if (err)
goto out_unlock; goto out_unlock;
memcpy (namx, name, namelen); memcpy (namx, name, namelen);
@ -272,37 +271,37 @@ got_it:
memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2); memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
de->inode = inode->i_ino; de->inode = inode->i_ino;
} }
dir_commit_chunk(page, pos, sbi->s_dirsize); dir_commit_chunk(folio, pos, sbi->s_dirsize);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir); mark_inode_dirty(dir);
err = minix_handle_dirsync(dir); err = minix_handle_dirsync(dir);
out_put: out_put:
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
return err; return err;
out_unlock: out_unlock:
unlock_page(page); folio_unlock(folio);
goto out_put; goto out_put;
} }
int minix_delete_entry(struct minix_dir_entry *de, struct page *page) int minix_delete_entry(struct minix_dir_entry *de, struct folio *folio)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = folio->mapping->host;
loff_t pos = page_offset(page) + offset_in_page(de); loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
struct minix_sb_info *sbi = minix_sb(inode->i_sb); struct minix_sb_info *sbi = minix_sb(inode->i_sb);
unsigned len = sbi->s_dirsize; unsigned len = sbi->s_dirsize;
int err; int err;
lock_page(page); folio_lock(folio);
err = minix_prepare_chunk(page, pos, len); err = minix_prepare_chunk(folio, pos, len);
if (err) { if (err) {
unlock_page(page); folio_unlock(folio);
return err; return err;
} }
if (sbi->s_version == MINIX_V3) if (sbi->s_version == MINIX_V3)
((minix3_dirent *)de)->inode = 0; ((minix3_dirent *)de)->inode = 0;
else else
de->inode = 0; de->inode = 0;
dir_commit_chunk(page, pos, len); dir_commit_chunk(folio, pos, len);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode); mark_inode_dirty(inode);
return minix_handle_dirsync(inode); return minix_handle_dirsync(inode);
@ -310,21 +309,21 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
int minix_make_empty(struct inode *inode, struct inode *dir) int minix_make_empty(struct inode *inode, struct inode *dir)
{ {
struct page *page = grab_cache_page(inode->i_mapping, 0); struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
struct minix_sb_info *sbi = minix_sb(inode->i_sb); struct minix_sb_info *sbi = minix_sb(inode->i_sb);
char *kaddr; char *kaddr;
int err; int err;
if (!page) if (IS_ERR(folio))
return -ENOMEM; return PTR_ERR(folio);
err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize); err = minix_prepare_chunk(folio, 0, 2 * sbi->s_dirsize);
if (err) { if (err) {
unlock_page(page); folio_unlock(folio);
goto fail; goto fail;
} }
kaddr = kmap_local_page(page); kaddr = kmap_local_folio(folio, 0);
memset(kaddr, 0, PAGE_SIZE); memset(kaddr, 0, folio_size(folio));
if (sbi->s_version == MINIX_V3) { if (sbi->s_version == MINIX_V3) {
minix3_dirent *de3 = (minix3_dirent *)kaddr; minix3_dirent *de3 = (minix3_dirent *)kaddr;
@ -345,10 +344,10 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
} }
kunmap_local(kaddr); kunmap_local(kaddr);
dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); dir_commit_chunk(folio, 0, 2 * sbi->s_dirsize);
err = minix_handle_dirsync(inode); err = minix_handle_dirsync(inode);
fail: fail:
put_page(page); folio_put(folio);
return err; return err;
} }
@ -357,7 +356,7 @@ fail:
*/ */
int minix_empty_dir(struct inode * inode) int minix_empty_dir(struct inode * inode)
{ {
struct page *page = NULL; struct folio *folio = NULL;
unsigned long i, npages = dir_pages(inode); unsigned long i, npages = dir_pages(inode);
struct minix_sb_info *sbi = minix_sb(inode->i_sb); struct minix_sb_info *sbi = minix_sb(inode->i_sb);
char *name, *kaddr; char *name, *kaddr;
@ -366,7 +365,7 @@ int minix_empty_dir(struct inode * inode)
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
char *p, *limit; char *p, *limit;
kaddr = dir_get_page(inode, i, &page); kaddr = dir_get_folio(inode, i, &folio);
if (IS_ERR(kaddr)) if (IS_ERR(kaddr))
continue; continue;
@ -395,44 +394,44 @@ int minix_empty_dir(struct inode * inode)
goto not_empty; goto not_empty;
} }
} }
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
} }
return 1; return 1;
not_empty: not_empty:
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
return 0; return 0;
} }
/* Releases the page */ /* Releases the page */
int minix_set_link(struct minix_dir_entry *de, struct page *page, int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
struct inode *inode) struct inode *inode)
{ {
struct inode *dir = page->mapping->host; struct inode *dir = folio->mapping->host;
struct minix_sb_info *sbi = minix_sb(dir->i_sb); struct minix_sb_info *sbi = minix_sb(dir->i_sb);
loff_t pos = page_offset(page) + offset_in_page(de); loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
int err; int err;
lock_page(page); folio_lock(folio);
err = minix_prepare_chunk(page, pos, sbi->s_dirsize); err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
if (err) { if (err) {
unlock_page(page); folio_unlock(folio);
return err; return err;
} }
if (sbi->s_version == MINIX_V3) if (sbi->s_version == MINIX_V3)
((minix3_dirent *)de)->inode = inode->i_ino; ((minix3_dirent *)de)->inode = inode->i_ino;
else else
de->inode = inode->i_ino; de->inode = inode->i_ino;
dir_commit_chunk(page, pos, sbi->s_dirsize); dir_commit_chunk(folio, pos, sbi->s_dirsize);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir); mark_inode_dirty(dir);
return minix_handle_dirsync(dir); return minix_handle_dirsync(dir);
} }
struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p) struct minix_dir_entry *minix_dotdot(struct inode *dir, struct folio **foliop)
{ {
struct minix_sb_info *sbi = minix_sb(dir->i_sb); struct minix_sb_info *sbi = minix_sb(dir->i_sb);
struct minix_dir_entry *de = dir_get_page(dir, 0, p); struct minix_dir_entry *de = dir_get_folio(dir, 0, foliop);
if (!IS_ERR(de)) if (!IS_ERR(de))
return minix_next_entry(de, sbi); return minix_next_entry(de, sbi);
@ -441,20 +440,19 @@ struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
ino_t minix_inode_by_name(struct dentry *dentry) ino_t minix_inode_by_name(struct dentry *dentry)
{ {
struct page *page; struct folio *folio;
struct minix_dir_entry *de = minix_find_entry(dentry, &page); struct minix_dir_entry *de = minix_find_entry(dentry, &folio);
ino_t res = 0; ino_t res = 0;
if (de) { if (de) {
struct address_space *mapping = page->mapping; struct inode *inode = folio->mapping->host;
struct inode *inode = mapping->host;
struct minix_sb_info *sbi = minix_sb(inode->i_sb); struct minix_sb_info *sbi = minix_sb(inode->i_sb);
if (sbi->s_version == MINIX_V3) if (sbi->s_version == MINIX_V3)
res = ((minix3_dirent *) de)->inode; res = ((minix3_dirent *) de)->inode;
else else
res = de->inode; res = de->inode;
unmap_and_put_page(page, de); folio_release_kmap(folio, de);
} }
return res; return res;
} }

View File

@ -427,9 +427,9 @@ static int minix_read_folio(struct file *file, struct folio *folio)
return block_read_full_folio(folio, minix_get_block); return block_read_full_folio(folio, minix_get_block);
} }
int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len) int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{ {
return __block_write_begin(page, pos, len, minix_get_block); return __block_write_begin(folio, pos, len, minix_get_block);
} }
static void minix_write_failed(struct address_space *mapping, loff_t to) static void minix_write_failed(struct address_space *mapping, loff_t to)
@ -444,11 +444,11 @@ static void minix_write_failed(struct address_space *mapping, loff_t to)
static int minix_write_begin(struct file *file, struct address_space *mapping, static int minix_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
ret = block_write_begin(mapping, pos, len, pagep, minix_get_block); ret = block_write_begin(mapping, pos, len, foliop, minix_get_block);
if (unlikely(ret)) if (unlikely(ret))
minix_write_failed(mapping, pos + len); minix_write_failed(mapping, pos + len);

View File

@ -42,18 +42,18 @@ struct minix_sb_info {
unsigned short s_version; unsigned short s_version;
}; };
extern struct inode *minix_iget(struct super_block *, unsigned long); struct inode *minix_iget(struct super_block *, unsigned long);
extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **); struct minix_inode *minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); struct minix2_inode *minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
extern struct inode * minix_new_inode(const struct inode *, umode_t); struct inode *minix_new_inode(const struct inode *, umode_t);
extern void minix_free_inode(struct inode * inode); void minix_free_inode(struct inode *inode);
extern unsigned long minix_count_free_inodes(struct super_block *sb); unsigned long minix_count_free_inodes(struct super_block *sb);
extern int minix_new_block(struct inode * inode); int minix_new_block(struct inode *inode);
extern void minix_free_block(struct inode *inode, unsigned long block); void minix_free_block(struct inode *inode, unsigned long block);
extern unsigned long minix_count_free_blocks(struct super_block *sb); unsigned long minix_count_free_blocks(struct super_block *sb);
extern int minix_getattr(struct mnt_idmap *, const struct path *, int minix_getattr(struct mnt_idmap *, const struct path *,
struct kstat *, u32, unsigned int); struct kstat *, u32, unsigned int);
extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len); int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
extern void V1_minix_truncate(struct inode *); extern void V1_minix_truncate(struct inode *);
extern void V2_minix_truncate(struct inode *); extern void V2_minix_truncate(struct inode *);
@ -64,15 +64,15 @@ extern int V2_minix_get_block(struct inode *, long, struct buffer_head *, int);
extern unsigned V1_minix_blocks(loff_t, struct super_block *); extern unsigned V1_minix_blocks(loff_t, struct super_block *);
extern unsigned V2_minix_blocks(loff_t, struct super_block *); extern unsigned V2_minix_blocks(loff_t, struct super_block *);
extern struct minix_dir_entry *minix_find_entry(struct dentry*, struct page**); struct minix_dir_entry *minix_find_entry(struct dentry *, struct folio **);
extern int minix_add_link(struct dentry*, struct inode*); int minix_add_link(struct dentry*, struct inode*);
extern int minix_delete_entry(struct minix_dir_entry*, struct page*); int minix_delete_entry(struct minix_dir_entry *, struct folio *);
extern int minix_make_empty(struct inode*, struct inode*); int minix_make_empty(struct inode*, struct inode*);
extern int minix_empty_dir(struct inode*); int minix_empty_dir(struct inode*);
int minix_set_link(struct minix_dir_entry *de, struct page *page, int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
struct inode *inode); struct inode *inode);
extern struct minix_dir_entry *minix_dotdot(struct inode*, struct page**); struct minix_dir_entry *minix_dotdot(struct inode*, struct folio **);
extern ino_t minix_inode_by_name(struct dentry*); ino_t minix_inode_by_name(struct dentry*);
extern const struct inode_operations minix_file_inode_operations; extern const struct inode_operations minix_file_inode_operations;
extern const struct inode_operations minix_dir_inode_operations; extern const struct inode_operations minix_dir_inode_operations;

View File

@ -141,15 +141,15 @@ out_fail:
static int minix_unlink(struct inode * dir, struct dentry *dentry) static int minix_unlink(struct inode * dir, struct dentry *dentry)
{ {
struct inode * inode = d_inode(dentry); struct inode * inode = d_inode(dentry);
struct page * page; struct folio *folio;
struct minix_dir_entry * de; struct minix_dir_entry * de;
int err; int err;
de = minix_find_entry(dentry, &page); de = minix_find_entry(dentry, &folio);
if (!de) if (!de)
return -ENOENT; return -ENOENT;
err = minix_delete_entry(de, page); err = minix_delete_entry(de, folio);
unmap_and_put_page(page, de); folio_release_kmap(folio, de);
if (err) if (err)
return err; return err;
@ -180,28 +180,28 @@ static int minix_rename(struct mnt_idmap *idmap,
{ {
struct inode * old_inode = d_inode(old_dentry); struct inode * old_inode = d_inode(old_dentry);
struct inode * new_inode = d_inode(new_dentry); struct inode * new_inode = d_inode(new_dentry);
struct page * dir_page = NULL; struct folio * dir_folio = NULL;
struct minix_dir_entry * dir_de = NULL; struct minix_dir_entry * dir_de = NULL;
struct page * old_page; struct folio *old_folio;
struct minix_dir_entry * old_de; struct minix_dir_entry * old_de;
int err = -ENOENT; int err = -ENOENT;
if (flags & ~RENAME_NOREPLACE) if (flags & ~RENAME_NOREPLACE)
return -EINVAL; return -EINVAL;
old_de = minix_find_entry(old_dentry, &old_page); old_de = minix_find_entry(old_dentry, &old_folio);
if (!old_de) if (!old_de)
goto out; goto out;
if (S_ISDIR(old_inode->i_mode)) { if (S_ISDIR(old_inode->i_mode)) {
err = -EIO; err = -EIO;
dir_de = minix_dotdot(old_inode, &dir_page); dir_de = minix_dotdot(old_inode, &dir_folio);
if (!dir_de) if (!dir_de)
goto out_old; goto out_old;
} }
if (new_inode) { if (new_inode) {
struct page * new_page; struct folio *new_folio;
struct minix_dir_entry * new_de; struct minix_dir_entry * new_de;
err = -ENOTEMPTY; err = -ENOTEMPTY;
@ -209,11 +209,11 @@ static int minix_rename(struct mnt_idmap *idmap,
goto out_dir; goto out_dir;
err = -ENOENT; err = -ENOENT;
new_de = minix_find_entry(new_dentry, &new_page); new_de = minix_find_entry(new_dentry, &new_folio);
if (!new_de) if (!new_de)
goto out_dir; goto out_dir;
err = minix_set_link(new_de, new_page, old_inode); err = minix_set_link(new_de, new_folio, old_inode);
unmap_and_put_page(new_page, new_de); folio_release_kmap(new_folio, new_de);
if (err) if (err)
goto out_dir; goto out_dir;
inode_set_ctime_current(new_inode); inode_set_ctime_current(new_inode);
@ -228,22 +228,22 @@ static int minix_rename(struct mnt_idmap *idmap,
inode_inc_link_count(new_dir); inode_inc_link_count(new_dir);
} }
err = minix_delete_entry(old_de, old_page); err = minix_delete_entry(old_de, old_folio);
if (err) if (err)
goto out_dir; goto out_dir;
mark_inode_dirty(old_inode); mark_inode_dirty(old_inode);
if (dir_de) { if (dir_de) {
err = minix_set_link(dir_de, dir_page, new_dir); err = minix_set_link(dir_de, dir_folio, new_dir);
if (!err) if (!err)
inode_dec_link_count(old_dir); inode_dec_link_count(old_dir);
} }
out_dir: out_dir:
if (dir_de) if (dir_de)
unmap_and_put_page(dir_page, dir_de); folio_release_kmap(dir_folio, dir_de);
out_old: out_old:
unmap_and_put_page(old_page, old_de); folio_release_kmap(old_folio, old_de);
out: out:
return err; return err;
} }

View File

@ -5351,7 +5351,7 @@ int page_symlink(struct inode *inode, const char *symname, int len)
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops; const struct address_space_operations *aops = mapping->a_ops;
bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS); bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS);
struct page *page; struct folio *folio;
void *fsdata = NULL; void *fsdata = NULL;
int err; int err;
unsigned int flags; unsigned int flags;
@ -5359,16 +5359,16 @@ int page_symlink(struct inode *inode, const char *symname, int len)
retry: retry:
if (nofs) if (nofs)
flags = memalloc_nofs_save(); flags = memalloc_nofs_save();
err = aops->write_begin(NULL, mapping, 0, len-1, &page, &fsdata); err = aops->write_begin(NULL, mapping, 0, len-1, &folio, &fsdata);
if (nofs) if (nofs)
memalloc_nofs_restore(flags); memalloc_nofs_restore(flags);
if (err) if (err)
goto fail; goto fail;
memcpy(page_address(page), symname, len-1); memcpy(folio_address(folio), symname, len - 1);
err = aops->write_end(NULL, mapping, 0, len-1, len-1, err = aops->write_end(NULL, mapping, 0, len - 1, len - 1,
page, fsdata); folio, fsdata);
if (err < 0) if (err < 0)
goto fail; goto fail;
if (err < len-1) if (err < len-1)

View File

@ -336,7 +336,7 @@ static bool nfs_want_read_modify_write(struct file *file, struct folio *folio,
* increment the page use counts until he is done with the page. * increment the page use counts until he is done with the page.
*/ */
static int nfs_write_begin(struct file *file, struct address_space *mapping, static int nfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep, loff_t pos, unsigned len, struct folio **foliop,
void **fsdata) void **fsdata)
{ {
fgf_t fgp = FGP_WRITEBEGIN; fgf_t fgp = FGP_WRITEBEGIN;
@ -353,7 +353,7 @@ start:
mapping_gfp_mask(mapping)); mapping_gfp_mask(mapping));
if (IS_ERR(folio)) if (IS_ERR(folio))
return PTR_ERR(folio); return PTR_ERR(folio);
*pagep = &folio->page; *foliop = folio;
ret = nfs_flush_incompatible(file, folio); ret = nfs_flush_incompatible(file, folio);
if (ret) { if (ret) {
@ -372,10 +372,9 @@ start:
static int nfs_write_end(struct file *file, struct address_space *mapping, static int nfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct nfs_open_context *ctx = nfs_file_open_context(file); struct nfs_open_context *ctx = nfs_file_open_context(file);
struct folio *folio = page_folio(page);
unsigned offset = offset_in_folio(folio, pos); unsigned offset = offset_in_folio(folio, pos);
int status; int status;

View File

@ -83,7 +83,7 @@ static int nilfs_prepare_chunk(struct folio *folio, unsigned int from,
{ {
loff_t pos = folio_pos(folio) + from; loff_t pos = folio_pos(folio) + from;
return __block_write_begin(&folio->page, pos, to - from, nilfs_get_block); return __block_write_begin(folio, pos, to - from, nilfs_get_block);
} }
static void nilfs_commit_chunk(struct folio *folio, static void nilfs_commit_chunk(struct folio *folio,
@ -96,7 +96,7 @@ static void nilfs_commit_chunk(struct folio *folio,
int err; int err;
nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to); nr_dirty = nilfs_page_count_clean_buffers(&folio->page, from, to);
copied = block_write_end(NULL, mapping, pos, len, len, &folio->page, NULL); copied = block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos + copied > dir->i_size) if (pos + copied > dir->i_size)
i_size_write(dir, pos + copied); i_size_write(dir, pos + copied);
if (IS_DIRSYNC(dir)) if (IS_DIRSYNC(dir))

View File

@ -250,7 +250,7 @@ void nilfs_write_failed(struct address_space *mapping, loff_t to)
static int nilfs_write_begin(struct file *file, struct address_space *mapping, static int nilfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
@ -259,7 +259,7 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
if (unlikely(err)) if (unlikely(err))
return err; return err;
err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block); err = block_write_begin(mapping, pos, len, foliop, nilfs_get_block);
if (unlikely(err)) { if (unlikely(err)) {
nilfs_write_failed(mapping, pos + len); nilfs_write_failed(mapping, pos + len);
nilfs_transaction_abort(inode->i_sb); nilfs_transaction_abort(inode->i_sb);
@ -269,16 +269,16 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
static int nilfs_write_end(struct file *file, struct address_space *mapping, static int nilfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
unsigned int start = pos & (PAGE_SIZE - 1); unsigned int start = pos & (PAGE_SIZE - 1);
unsigned int nr_dirty; unsigned int nr_dirty;
int err; int err;
nr_dirty = nilfs_page_count_clean_buffers(page, start, nr_dirty = nilfs_page_count_clean_buffers(&folio->page, start,
start + copied); start + copied);
copied = generic_write_end(file, mapping, pos, len, copied, page, copied = generic_write_end(file, mapping, pos, len, copied, folio,
fsdata); fsdata);
nilfs_set_file_dirty(inode, nr_dirty); nilfs_set_file_dirty(inode, nr_dirty);
err = nilfs_transaction_commit(inode->i_sb); err = nilfs_transaction_commit(inode->i_sb);

View File

@ -498,7 +498,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
struct inode *inode; struct inode *inode;
struct nilfs_recovery_block *rb, *n; struct nilfs_recovery_block *rb, *n;
unsigned int blocksize = nilfs->ns_blocksize; unsigned int blocksize = nilfs->ns_blocksize;
struct page *page; struct folio *folio;
loff_t pos; loff_t pos;
int err = 0, err2 = 0; int err = 0, err2 = 0;
@ -512,7 +512,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
pos = rb->blkoff << inode->i_blkbits; pos = rb->blkoff << inode->i_blkbits;
err = block_write_begin(inode->i_mapping, pos, blocksize, err = block_write_begin(inode->i_mapping, pos, blocksize,
&page, nilfs_get_block); &folio, nilfs_get_block);
if (unlikely(err)) { if (unlikely(err)) {
loff_t isize = inode->i_size; loff_t isize = inode->i_size;
@ -522,7 +522,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
goto failed_inode; goto failed_inode;
} }
err = nilfs_recovery_copy_block(nilfs, rb, pos, page); err = nilfs_recovery_copy_block(nilfs, rb, pos, &folio->page);
if (unlikely(err)) if (unlikely(err))
goto failed_page; goto failed_page;
@ -531,17 +531,17 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
goto failed_page; goto failed_page;
block_write_end(NULL, inode->i_mapping, pos, blocksize, block_write_end(NULL, inode->i_mapping, pos, blocksize,
blocksize, page, NULL); blocksize, folio, NULL);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
(*nr_salvaged_blocks)++; (*nr_salvaged_blocks)++;
goto next; goto next;
failed_page: failed_page:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
failed_inode: failed_inode:
nilfs_warn(sb, nilfs_warn(sb,

View File

@ -182,7 +182,7 @@ static int ntfs_extend_initialized_size(struct file *file,
for (;;) { for (;;) {
u32 zerofrom, len; u32 zerofrom, len;
struct page *page; struct folio *folio;
u8 bits; u8 bits;
CLST vcn, lcn, clen; CLST vcn, lcn, clen;
@ -208,14 +208,13 @@ static int ntfs_extend_initialized_size(struct file *file,
if (pos + len > new_valid) if (pos + len > new_valid)
len = new_valid - pos; len = new_valid - pos;
err = ntfs_write_begin(file, mapping, pos, len, &page, NULL); err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL);
if (err) if (err)
goto out; goto out;
zero_user_segment(page, zerofrom, PAGE_SIZE); folio_zero_range(folio, zerofrom, folio_size(folio));
/* This function in any case puts page. */ err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL);
err = ntfs_write_end(file, mapping, pos, len, len, page, NULL);
if (err < 0) if (err < 0)
goto out; goto out;
pos += len; pos += len;

View File

@ -901,7 +901,7 @@ static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
} }
int ntfs_write_begin(struct file *file, struct address_space *mapping, int ntfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, u32 len, struct page **pagep, void **fsdata) loff_t pos, u32 len, struct folio **foliop, void **fsdata)
{ {
int err; int err;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
@ -910,7 +910,6 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
return -EIO; return -EIO;
*pagep = NULL;
if (is_resident(ni)) { if (is_resident(ni)) {
struct folio *folio = __filemap_get_folio( struct folio *folio = __filemap_get_folio(
mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN, mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN,
@ -926,7 +925,7 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
ni_unlock(ni); ni_unlock(ni);
if (!err) { if (!err) {
*pagep = &folio->page; *foliop = folio;
goto out; goto out;
} }
folio_unlock(folio); folio_unlock(folio);
@ -936,7 +935,7 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
goto out; goto out;
} }
err = block_write_begin(mapping, pos, len, pagep, err = block_write_begin(mapping, pos, len, foliop,
ntfs_get_block_write_begin); ntfs_get_block_write_begin);
out: out:
@ -947,9 +946,8 @@ out:
* ntfs_write_end - Address_space_operations::write_end. * ntfs_write_end - Address_space_operations::write_end.
*/ */
int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
u32 len, u32 copied, struct page *page, void *fsdata) u32 len, u32 copied, struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(page);
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode); struct ntfs_inode *ni = ntfs_i(inode);
u64 valid = ni->i_valid; u64 valid = ni->i_valid;
@ -979,7 +977,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
} else { } else {
err = generic_write_end(file, mapping, pos, len, copied, page, err = generic_write_end(file, mapping, pos, len, copied, folio,
fsdata); fsdata);
} }
@ -1008,45 +1006,6 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
return err; return err;
} }
int reset_log_file(struct inode *inode)
{
int err;
loff_t pos = 0;
u32 log_size = inode->i_size;
struct address_space *mapping = inode->i_mapping;
for (;;) {
u32 len;
void *kaddr;
struct page *page;
len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
err = block_write_begin(mapping, pos, len, &page,
ntfs_get_block_write_begin);
if (err)
goto out;
kaddr = kmap_atomic(page);
memset(kaddr, -1, len);
kunmap_atomic(kaddr);
flush_dcache_page(page);
err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
if (err < 0)
goto out;
pos += len;
if (pos >= log_size)
break;
balance_dirty_pages_ratelimited(mapping);
}
out:
mark_inode_dirty_sync(inode);
return err;
}
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc) int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
{ {
return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);

View File

@ -708,13 +708,12 @@ int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref, struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
const struct cpu_str *name); const struct cpu_str *name);
int ntfs_set_size(struct inode *inode, u64 new_size); int ntfs_set_size(struct inode *inode, u64 new_size);
int reset_log_file(struct inode *inode);
int ntfs_get_block(struct inode *inode, sector_t vbn, int ntfs_get_block(struct inode *inode, sector_t vbn,
struct buffer_head *bh_result, int create); struct buffer_head *bh_result, int create);
int ntfs_write_begin(struct file *file, struct address_space *mapping, int ntfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, u32 len, struct page **pagep, void **fsdata); loff_t pos, u32 len, struct folio **foliop, void **fsdata);
int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
u32 len, u32 copied, struct page *page, void *fsdata); u32 len, u32 copied, struct folio *folio, void *fsdata);
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc); int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode); int ntfs_sync_inode(struct inode *inode);
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1, int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,

View File

@ -1643,7 +1643,7 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
int ocfs2_write_begin_nolock(struct address_space *mapping, int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type, loff_t pos, unsigned len, ocfs2_write_type_t type,
struct page **pagep, void **fsdata, struct folio **foliop, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page) struct buffer_head *di_bh, struct page *mmap_page)
{ {
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
@ -1826,8 +1826,8 @@ try_again:
ocfs2_free_alloc_context(meta_ac); ocfs2_free_alloc_context(meta_ac);
success: success:
if (pagep) if (foliop)
*pagep = wc->w_target_page; *foliop = page_folio(wc->w_target_page);
*fsdata = wc; *fsdata = wc;
return 0; return 0;
out_quota: out_quota:
@ -1879,7 +1879,7 @@ out:
static int ocfs2_write_begin(struct file *file, struct address_space *mapping, static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
struct buffer_head *di_bh = NULL; struct buffer_head *di_bh = NULL;
@ -1901,7 +1901,7 @@ static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
down_write(&OCFS2_I(inode)->ip_alloc_sem); down_write(&OCFS2_I(inode)->ip_alloc_sem);
ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
pagep, fsdata, di_bh, NULL); foliop, fsdata, di_bh, NULL);
if (ret) { if (ret) {
mlog_errno(ret); mlog_errno(ret);
goto out_fail; goto out_fail;
@ -2076,7 +2076,7 @@ out:
static int ocfs2_write_end(struct file *file, struct address_space *mapping, static int ocfs2_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
int ret; int ret;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;

View File

@ -38,7 +38,7 @@ typedef enum {
int ocfs2_write_begin_nolock(struct address_space *mapping, int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type, loff_t pos, unsigned len, ocfs2_write_type_t type,
struct page **pagep, void **fsdata, struct folio **foliop, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page); struct buffer_head *di_bh, struct page *mmap_page);
int ocfs2_read_inline_data(struct inode *inode, struct page *page, int ocfs2_read_inline_data(struct inode *inode, struct page *page,

View File

@ -755,7 +755,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
u64 abs_to, struct buffer_head *di_bh) u64 abs_to, struct buffer_head *di_bh)
{ {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct page *page; struct folio *folio;
unsigned long index = abs_from >> PAGE_SHIFT; unsigned long index = abs_from >> PAGE_SHIFT;
handle_t *handle; handle_t *handle;
int ret = 0; int ret = 0;
@ -774,9 +774,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
goto out; goto out;
} }
page = find_or_create_page(mapping, index, GFP_NOFS); folio = __filemap_get_folio(mapping, index,
if (!page) { FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
ret = -ENOMEM; if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
mlog_errno(ret); mlog_errno(ret);
goto out_commit_trans; goto out_commit_trans;
} }
@ -803,7 +804,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
* __block_write_begin and block_commit_write to zero the * __block_write_begin and block_commit_write to zero the
* whole block. * whole block.
*/ */
ret = __block_write_begin(page, block_start + 1, 0, ret = __block_write_begin(folio, block_start + 1, 0,
ocfs2_get_block); ocfs2_get_block);
if (ret < 0) { if (ret < 0) {
mlog_errno(ret); mlog_errno(ret);
@ -812,7 +813,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
/* must not update i_size! */ /* must not update i_size! */
block_commit_write(page, block_start + 1, block_start + 1); block_commit_write(&folio->page, block_start + 1, block_start + 1);
} }
/* /*
@ -833,8 +834,8 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
} }
out_unlock: out_unlock:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
out_commit_trans: out_commit_trans:
if (handle) if (handle)
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);

View File

@ -53,7 +53,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
loff_t pos = page_offset(page); loff_t pos = page_offset(page);
unsigned int len = PAGE_SIZE; unsigned int len = PAGE_SIZE;
pgoff_t last_index; pgoff_t last_index;
struct page *locked_page = NULL; struct folio *locked_folio = NULL;
void *fsdata; void *fsdata;
loff_t size = i_size_read(inode); loff_t size = i_size_read(inode);
@ -91,7 +91,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
len = ((size - 1) & ~PAGE_MASK) + 1; len = ((size - 1) & ~PAGE_MASK) + 1;
err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
&locked_page, &fsdata, di_bh, page); &locked_folio, &fsdata, di_bh, page);
if (err) { if (err) {
if (err != -ENOSPC) if (err != -ENOSPC)
mlog_errno(err); mlog_errno(err);
@ -99,7 +99,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
goto out; goto out;
} }
if (!locked_page) { if (!locked_folio) {
ret = VM_FAULT_NOPAGE; ret = VM_FAULT_NOPAGE;
goto out; goto out;
} }

View File

@ -312,11 +312,11 @@ static void omfs_write_failed(struct address_space *mapping, loff_t to)
static int omfs_write_begin(struct file *file, struct address_space *mapping, static int omfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block); ret = block_write_begin(mapping, pos, len, foliop, omfs_get_block);
if (unlikely(ret)) if (unlikely(ret))
omfs_write_failed(mapping, pos + len); omfs_write_failed(mapping, pos + len);

View File

@ -309,22 +309,18 @@ static int orangefs_read_folio(struct file *file, struct folio *folio)
static int orangefs_write_begin(struct file *file, static int orangefs_write_begin(struct file *file,
struct address_space *mapping, loff_t pos, unsigned len, struct address_space *mapping, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct orangefs_write_range *wr; struct orangefs_write_range *wr;
struct folio *folio; struct folio *folio;
struct page *page;
pgoff_t index;
int ret; int ret;
index = pos >> PAGE_SHIFT; folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
return PTR_ERR(folio);
page = grab_cache_page_write_begin(mapping, index); *foliop = folio;
if (!page)
return -ENOMEM;
*pagep = page;
folio = page_folio(page);
if (folio_test_dirty(folio) && !folio_test_private(folio)) { if (folio_test_dirty(folio) && !folio_test_private(folio)) {
/* /*
@ -365,9 +361,10 @@ okay:
} }
static int orangefs_write_end(struct file *file, struct address_space *mapping, static int orangefs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) loff_t pos, unsigned len, unsigned copied, struct folio *folio,
void *fsdata)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = folio->mapping->host;
loff_t last_pos = pos + copied; loff_t last_pos = pos + copied;
/* /*
@ -377,23 +374,23 @@ static int orangefs_write_end(struct file *file, struct address_space *mapping,
if (last_pos > inode->i_size) if (last_pos > inode->i_size)
i_size_write(inode, last_pos); i_size_write(inode, last_pos);
/* zero the stale part of the page if we did a short copy */ /* zero the stale part of the folio if we did a short copy */
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
unsigned from = pos & (PAGE_SIZE - 1); unsigned from = pos & (PAGE_SIZE - 1);
if (copied < len) { if (copied < len) {
zero_user(page, from + copied, len - copied); folio_zero_range(folio, from + copied, len - copied);
} }
/* Set fully written pages uptodate. */ /* Set fully written pages uptodate. */
if (pos == page_offset(page) && if (pos == folio_pos(folio) &&
(len == PAGE_SIZE || pos + len == inode->i_size)) { (len == PAGE_SIZE || pos + len == inode->i_size)) {
zero_user_segment(page, from + copied, PAGE_SIZE); folio_zero_segment(folio, from + copied, PAGE_SIZE);
SetPageUptodate(page); folio_mark_uptodate(folio);
} }
} }
set_page_dirty(page); folio_mark_dirty(folio);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
mark_inode_dirty_sync(file_inode(file)); mark_inode_dirty_sync(file_inode(file));
return copied; return copied;

View File

@ -24,13 +24,15 @@ static unsigned qnx6_lfile_checksum(char *name, unsigned size)
return crc; return crc;
} }
static struct page *qnx6_get_page(struct inode *dir, unsigned long n) static void *qnx6_get_folio(struct inode *dir, unsigned long n,
struct folio **foliop)
{ {
struct address_space *mapping = dir->i_mapping; struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) if (IS_ERR(folio))
kmap(page); return folio;
return page; *foliop = folio;
return kmap_local_folio(folio, 0);
} }
static unsigned last_entry(struct inode *inode, unsigned long page_nr) static unsigned last_entry(struct inode *inode, unsigned long page_nr)
@ -44,19 +46,20 @@ static unsigned last_entry(struct inode *inode, unsigned long page_nr)
static struct qnx6_long_filename *qnx6_longname(struct super_block *sb, static struct qnx6_long_filename *qnx6_longname(struct super_block *sb,
struct qnx6_long_dir_entry *de, struct qnx6_long_dir_entry *de,
struct page **p) struct folio **foliop)
{ {
struct qnx6_sb_info *sbi = QNX6_SB(sb); struct qnx6_sb_info *sbi = QNX6_SB(sb);
u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */ u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
/* within page */ u32 offs;
u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
struct address_space *mapping = sbi->longfile->i_mapping; struct address_space *mapping = sbi->longfile->i_mapping;
struct page *page = read_mapping_page(mapping, n, NULL); struct folio *folio = read_mapping_folio(mapping, n, NULL);
if (IS_ERR(page))
return ERR_CAST(page); if (IS_ERR(folio))
kmap(*p = page); return ERR_CAST(folio);
return (struct qnx6_long_filename *)(page_address(page) + offs); offs = offset_in_folio(folio, s << sb->s_blocksize_bits);
*foliop = folio;
return kmap_local_folio(folio, offs);
} }
static int qnx6_dir_longfilename(struct inode *inode, static int qnx6_dir_longfilename(struct inode *inode,
@ -67,7 +70,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
struct qnx6_long_filename *lf; struct qnx6_long_filename *lf;
struct super_block *s = inode->i_sb; struct super_block *s = inode->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s); struct qnx6_sb_info *sbi = QNX6_SB(s);
struct page *page; struct folio *folio;
int lf_size; int lf_size;
if (de->de_size != 0xff) { if (de->de_size != 0xff) {
@ -76,7 +79,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
pr_err("invalid direntry size (%i).\n", de->de_size); pr_err("invalid direntry size (%i).\n", de->de_size);
return 0; return 0;
} }
lf = qnx6_longname(s, de, &page); lf = qnx6_longname(s, de, &folio);
if (IS_ERR(lf)) { if (IS_ERR(lf)) {
pr_err("Error reading longname\n"); pr_err("Error reading longname\n");
return 0; return 0;
@ -87,7 +90,7 @@ static int qnx6_dir_longfilename(struct inode *inode,
if (lf_size > QNX6_LONG_NAME_MAX) { if (lf_size > QNX6_LONG_NAME_MAX) {
pr_debug("file %s\n", lf->lf_fname); pr_debug("file %s\n", lf->lf_fname);
pr_err("Filename too long (%i)\n", lf_size); pr_err("Filename too long (%i)\n", lf_size);
qnx6_put_page(page); folio_release_kmap(folio, lf);
return 0; return 0;
} }
@ -100,11 +103,11 @@ static int qnx6_dir_longfilename(struct inode *inode,
pr_debug("qnx6_readdir:%.*s inode:%u\n", pr_debug("qnx6_readdir:%.*s inode:%u\n",
lf_size, lf->lf_fname, de_inode); lf_size, lf->lf_fname, de_inode);
if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) { if (!dir_emit(ctx, lf->lf_fname, lf_size, de_inode, DT_UNKNOWN)) {
qnx6_put_page(page); folio_release_kmap(folio, lf);
return 0; return 0;
} }
qnx6_put_page(page); folio_release_kmap(folio, lf);
/* success */ /* success */
return 1; return 1;
} }
@ -117,26 +120,27 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1); loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
unsigned long npages = dir_pages(inode); unsigned long npages = dir_pages(inode);
unsigned long n = pos >> PAGE_SHIFT; unsigned long n = pos >> PAGE_SHIFT;
unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE; unsigned offset = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
bool done = false; bool done = false;
ctx->pos = pos; ctx->pos = pos;
if (ctx->pos >= inode->i_size) if (ctx->pos >= inode->i_size)
return 0; return 0;
for ( ; !done && n < npages; n++, start = 0) { for ( ; !done && n < npages; n++, offset = 0) {
struct page *page = qnx6_get_page(inode, n);
int limit = last_entry(inode, n);
struct qnx6_dir_entry *de; struct qnx6_dir_entry *de;
int i = start; struct folio *folio;
char *kaddr = qnx6_get_folio(inode, n, &folio);
char *limit;
if (IS_ERR(page)) { if (IS_ERR(kaddr)) {
pr_err("%s(): read failed\n", __func__); pr_err("%s(): read failed\n", __func__);
ctx->pos = (n + 1) << PAGE_SHIFT; ctx->pos = (n + 1) << PAGE_SHIFT;
return PTR_ERR(page); return PTR_ERR(kaddr);
} }
de = ((struct qnx6_dir_entry *)page_address(page)) + start; de = (struct qnx6_dir_entry *)(kaddr + offset);
for (; i < limit; i++, de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) { limit = kaddr + last_entry(inode, n);
for (; (char *)de < limit; de++, ctx->pos += QNX6_DIR_ENTRY_SIZE) {
int size = de->de_size; int size = de->de_size;
u32 no_inode = fs32_to_cpu(sbi, de->de_inode); u32 no_inode = fs32_to_cpu(sbi, de->de_inode);
@ -164,7 +168,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx)
} }
} }
} }
qnx6_put_page(page); folio_release_kmap(folio, kaddr);
} }
return 0; return 0;
} }
@ -177,23 +181,23 @@ static unsigned qnx6_long_match(int len, const char *name,
{ {
struct super_block *s = dir->i_sb; struct super_block *s = dir->i_sb;
struct qnx6_sb_info *sbi = QNX6_SB(s); struct qnx6_sb_info *sbi = QNX6_SB(s);
struct page *page; struct folio *folio;
int thislen; int thislen;
struct qnx6_long_filename *lf = qnx6_longname(s, de, &page); struct qnx6_long_filename *lf = qnx6_longname(s, de, &folio);
if (IS_ERR(lf)) if (IS_ERR(lf))
return 0; return 0;
thislen = fs16_to_cpu(sbi, lf->lf_size); thislen = fs16_to_cpu(sbi, lf->lf_size);
if (len != thislen) { if (len != thislen) {
qnx6_put_page(page); folio_release_kmap(folio, lf);
return 0; return 0;
} }
if (memcmp(name, lf->lf_fname, len) == 0) { if (memcmp(name, lf->lf_fname, len) == 0) {
qnx6_put_page(page); folio_release_kmap(folio, lf);
return fs32_to_cpu(sbi, de->de_inode); return fs32_to_cpu(sbi, de->de_inode);
} }
qnx6_put_page(page); folio_release_kmap(folio, lf);
return 0; return 0;
} }
@ -210,20 +214,17 @@ static unsigned qnx6_match(struct super_block *s, int len, const char *name,
} }
unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, unsigned qnx6_find_ino(int len, struct inode *dir, const char *name)
struct page **res_page)
{ {
struct super_block *s = dir->i_sb; struct super_block *s = dir->i_sb;
struct qnx6_inode_info *ei = QNX6_I(dir); struct qnx6_inode_info *ei = QNX6_I(dir);
struct page *page = NULL; struct folio *folio;
unsigned long start, n; unsigned long start, n;
unsigned long npages = dir_pages(dir); unsigned long npages = dir_pages(dir);
unsigned ino; unsigned ino;
struct qnx6_dir_entry *de; struct qnx6_dir_entry *de;
struct qnx6_long_dir_entry *lde; struct qnx6_long_dir_entry *lde;
*res_page = NULL;
if (npages == 0) if (npages == 0)
return 0; return 0;
start = ei->i_dir_start_lookup; start = ei->i_dir_start_lookup;
@ -232,12 +233,11 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
n = start; n = start;
do { do {
page = qnx6_get_page(dir, n); de = qnx6_get_folio(dir, n, &folio);
if (!IS_ERR(page)) { if (!IS_ERR(de)) {
int limit = last_entry(dir, n); int limit = last_entry(dir, n);
int i; int i;
de = (struct qnx6_dir_entry *)page_address(page);
for (i = 0; i < limit; i++, de++) { for (i = 0; i < limit; i++, de++) {
if (len <= QNX6_SHORT_NAME_MAX) { if (len <= QNX6_SHORT_NAME_MAX) {
/* short filename */ /* short filename */
@ -256,7 +256,7 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
} else } else
pr_err("undefined filename size in inode.\n"); pr_err("undefined filename size in inode.\n");
} }
qnx6_put_page(page); folio_release_kmap(folio, de - i);
} }
if (++n >= npages) if (++n >= npages)
@ -265,8 +265,8 @@ unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
return 0; return 0;
found: found:
*res_page = page;
ei->i_dir_start_lookup = n; ei->i_dir_start_lookup = n;
folio_release_kmap(folio, de);
return ino; return ino;
} }

View File

@ -184,17 +184,17 @@ static const char *qnx6_checkroot(struct super_block *s)
struct qnx6_dir_entry *dir_entry; struct qnx6_dir_entry *dir_entry;
struct inode *root = d_inode(s->s_root); struct inode *root = d_inode(s->s_root);
struct address_space *mapping = root->i_mapping; struct address_space *mapping = root->i_mapping;
struct page *page = read_mapping_page(mapping, 0, NULL); struct folio *folio = read_mapping_folio(mapping, 0, NULL);
if (IS_ERR(page))
if (IS_ERR(folio))
return "error reading root directory"; return "error reading root directory";
kmap(page); dir_entry = kmap_local_folio(folio, 0);
dir_entry = page_address(page);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
/* maximum 3 bytes - due to match_root limitation */ /* maximum 3 bytes - due to match_root limitation */
if (strncmp(dir_entry[i].de_fname, match_root[i], 3)) if (strncmp(dir_entry[i].de_fname, match_root[i], 3))
error = 1; error = 1;
} }
qnx6_put_page(page); folio_release_kmap(folio, dir_entry);
if (error) if (error)
return "error reading root directory."; return "error reading root directory.";
return NULL; return NULL;
@ -518,7 +518,7 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
struct inode *inode; struct inode *inode;
struct qnx6_inode_info *ei; struct qnx6_inode_info *ei;
struct address_space *mapping; struct address_space *mapping;
struct page *page; struct folio *folio;
u32 n, offs; u32 n, offs;
inode = iget_locked(sb, ino); inode = iget_locked(sb, ino);
@ -538,17 +538,16 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS); n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
mapping = sbi->inodes->i_mapping; mapping = sbi->inodes->i_mapping;
page = read_mapping_page(mapping, n, NULL); folio = read_mapping_folio(mapping, n, NULL);
if (IS_ERR(page)) { if (IS_ERR(folio)) {
pr_err("major problem: unable to read inode from dev %s\n", pr_err("major problem: unable to read inode from dev %s\n",
sb->s_id); sb->s_id);
iget_failed(inode); iget_failed(inode);
return ERR_CAST(page); return ERR_CAST(folio);
} }
kmap(page); offs = offset_in_folio(folio, (ino - 1) << QNX6_INODE_SIZE_BITS);
raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs; raw_inode = kmap_local_folio(folio, offs);
inode->i_mode = fs16_to_cpu(sbi, raw_inode->di_mode); inode->i_mode = fs16_to_cpu(sbi, raw_inode->di_mode);
i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid)); i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid));
@ -578,7 +577,7 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
inode->i_mapping->a_ops = &qnx6_aops; inode->i_mapping->a_ops = &qnx6_aops;
} else } else
init_special_inode(inode, inode->i_mode, 0); init_special_inode(inode, inode->i_mode, 0);
qnx6_put_page(page); folio_release_kmap(folio, raw_inode);
unlock_new_inode(inode); unlock_new_inode(inode);
return inode; return inode;
} }

View File

@ -17,7 +17,6 @@ struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags) unsigned int flags)
{ {
unsigned ino; unsigned ino;
struct page *page;
struct inode *foundinode = NULL; struct inode *foundinode = NULL;
const char *name = dentry->d_name.name; const char *name = dentry->d_name.name;
int len = dentry->d_name.len; int len = dentry->d_name.len;
@ -25,10 +24,9 @@ struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry,
if (len > QNX6_LONG_NAME_MAX) if (len > QNX6_LONG_NAME_MAX)
return ERR_PTR(-ENAMETOOLONG); return ERR_PTR(-ENAMETOOLONG);
ino = qnx6_find_entry(len, dir, name, &page); ino = qnx6_find_ino(len, dir, name);
if (ino) { if (ino) {
foundinode = qnx6_iget(dir->i_sb, ino); foundinode = qnx6_iget(dir->i_sb, ino);
qnx6_put_page(page);
if (IS_ERR(foundinode)) if (IS_ERR(foundinode))
pr_debug("lookup->iget -> error %ld\n", pr_debug("lookup->iget -> error %ld\n",
PTR_ERR(foundinode)); PTR_ERR(foundinode));

View File

@ -126,11 +126,4 @@ static inline __fs16 cpu_to_fs16(struct qnx6_sb_info *sbi, __u16 n)
extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s, extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s,
int silent); int silent);
static inline void qnx6_put_page(struct page *page) unsigned qnx6_find_ino(int len, struct inode *dir, const char *name);
{
kunmap(page);
put_page(page);
}
extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
struct page **res_page);

View File

@ -2178,7 +2178,7 @@ static int grab_tail_page(struct inode *inode,
unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1); unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
struct buffer_head *bh; struct buffer_head *bh;
struct buffer_head *head; struct buffer_head *head;
struct page *page; struct folio *folio;
int error; int error;
/* /*
@ -2190,20 +2190,20 @@ static int grab_tail_page(struct inode *inode,
if ((offset & (blocksize - 1)) == 0) { if ((offset & (blocksize - 1)) == 0) {
return -ENOENT; return -ENOENT;
} }
page = grab_cache_page(inode->i_mapping, index); folio = __filemap_get_folio(inode->i_mapping, index,
error = -ENOMEM; FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
if (!page) { mapping_gfp_mask(inode->i_mapping));
goto out; if (IS_ERR(folio))
} return PTR_ERR(folio);
/* start within the page of the last block in the file */ /* start within the page of the last block in the file */
start = (offset / blocksize) * blocksize; start = (offset / blocksize) * blocksize;
error = __block_write_begin(page, start, offset - start, error = __block_write_begin(folio, start, offset - start,
reiserfs_get_block_create_0); reiserfs_get_block_create_0);
if (error) if (error)
goto unlock; goto unlock;
head = page_buffers(page); head = folio_buffers(folio);
bh = head; bh = head;
do { do {
if (pos >= start) { if (pos >= start) {
@ -2226,14 +2226,13 @@ static int grab_tail_page(struct inode *inode,
goto unlock; goto unlock;
} }
*bh_result = bh; *bh_result = bh;
*page_result = page; *page_result = &folio->page;
out:
return error; return error;
unlock: unlock:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return error; return error;
} }
@ -2736,23 +2735,24 @@ static void reiserfs_truncate_failed_write(struct inode *inode)
static int reiserfs_write_begin(struct file *file, static int reiserfs_write_begin(struct file *file,
struct address_space *mapping, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct inode *inode; struct inode *inode;
struct page *page; struct folio *folio;
pgoff_t index; pgoff_t index;
int ret; int ret;
int old_ref = 0; int old_ref = 0;
inode = mapping->host; inode = mapping->host;
index = pos >> PAGE_SHIFT; index = pos >> PAGE_SHIFT;
page = grab_cache_page_write_begin(mapping, index); folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
if (!page) mapping_gfp_mask(mapping));
return -ENOMEM; if (IS_ERR(folio))
*pagep = page; return PTR_ERR(folio);
*foliop = folio;
reiserfs_wait_on_write_block(inode->i_sb); reiserfs_wait_on_write_block(inode->i_sb);
fix_tail_page_for_writing(page); fix_tail_page_for_writing(&folio->page);
if (reiserfs_transaction_running(inode->i_sb)) { if (reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th; struct reiserfs_transaction_handle *th;
th = (struct reiserfs_transaction_handle *)current-> th = (struct reiserfs_transaction_handle *)current->
@ -2762,7 +2762,7 @@ static int reiserfs_write_begin(struct file *file,
old_ref = th->t_refcount; old_ref = th->t_refcount;
th->t_refcount++; th->t_refcount++;
} }
ret = __block_write_begin(page, pos, len, reiserfs_get_block); ret = __block_write_begin(folio, pos, len, reiserfs_get_block);
if (ret && reiserfs_transaction_running(inode->i_sb)) { if (ret && reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th = current->journal_info; struct reiserfs_transaction_handle *th = current->journal_info;
/* /*
@ -2792,8 +2792,8 @@ static int reiserfs_write_begin(struct file *file,
} }
} }
if (ret) { if (ret) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
/* Truncate allocated blocks */ /* Truncate allocated blocks */
reiserfs_truncate_failed_write(inode); reiserfs_truncate_failed_write(inode);
} }
@ -2822,7 +2822,7 @@ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
th->t_refcount++; th->t_refcount++;
} }
ret = __block_write_begin(page, from, len, reiserfs_get_block); ret = __block_write_begin(page_folio(page), from, len, reiserfs_get_block);
if (ret && reiserfs_transaction_running(inode->i_sb)) { if (ret && reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th = current->journal_info; struct reiserfs_transaction_handle *th = current->journal_info;
/* /*
@ -2862,10 +2862,9 @@ static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block)
static int reiserfs_write_end(struct file *file, struct address_space *mapping, static int reiserfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(page); struct inode *inode = folio->mapping->host;
struct inode *inode = page->mapping->host;
int ret = 0; int ret = 0;
int update_sd = 0; int update_sd = 0;
struct reiserfs_transaction_handle *th; struct reiserfs_transaction_handle *th;
@ -2887,7 +2886,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
} }
flush_dcache_folio(folio); flush_dcache_folio(folio);
reiserfs_commit_page(inode, page, start, start + copied); reiserfs_commit_page(inode, &folio->page, start, start + copied);
/* /*
* generic_commit_write does this for us, but does not update the * generic_commit_write does this for us, but does not update the
@ -2942,8 +2941,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
out: out:
if (locked) if (locked)
reiserfs_write_unlock(inode->i_sb); reiserfs_write_unlock(inode->i_sb);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
if (pos + len > inode->i_size) if (pos + len > inode->i_size)
reiserfs_truncate_failed_write(inode); reiserfs_truncate_failed_write(inode);

View File

@ -494,39 +494,73 @@ out:
} }
static int squashfs_readahead_fragment(struct page **page, static int squashfs_readahead_fragment(struct page **page,
unsigned int pages, unsigned int expected) unsigned int pages, unsigned int expected, loff_t start)
{ {
struct inode *inode = page[0]->mapping->host; struct inode *inode = page[0]->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size); squashfs_i(inode)->fragment_size);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; int i, bytes, copied;
int error = buffer->error; struct squashfs_page_actor *actor;
unsigned int offset;
void *addr;
struct page *last_page;
if (error) if (buffer->error)
goto out; goto out;
expected += squashfs_i(inode)->fragment_offset; actor = squashfs_page_actor_init_special(msblk, page, pages,
expected, start);
if (!actor)
goto out;
for (n = 0; n < pages; n++) { squashfs_actor_nobuff(actor);
unsigned int base = (page[n]->index & mask) << PAGE_SHIFT; addr = squashfs_first_page(actor);
unsigned int offset = base + squashfs_i(inode)->fragment_offset;
if (expected > offset) { for (copied = offset = 0; offset < expected; offset += PAGE_SIZE) {
unsigned int avail = min_t(unsigned int, expected - int avail = min_t(int, expected - offset, PAGE_SIZE);
offset, PAGE_SIZE);
squashfs_fill_page(page[n], buffer, offset, avail); if (!IS_ERR(addr)) {
bytes = squashfs_copy_data(addr, buffer, offset +
squashfs_i(inode)->fragment_offset, avail);
if (bytes != avail)
goto failed;
} }
unlock_page(page[n]); copied += avail;
put_page(page[n]); addr = squashfs_next_page(actor);
} }
last_page = squashfs_page_actor_free(actor);
if (copied == expected && !IS_ERR(last_page)) {
/* Last page (if present) may have trailing bytes not filled */
bytes = copied % PAGE_SIZE;
if (bytes && last_page)
memzero_page(last_page, bytes, PAGE_SIZE - bytes);
for (i = 0; i < pages; i++) {
flush_dcache_page(page[i]);
SetPageUptodate(page[i]);
}
}
for (i = 0; i < pages; i++) {
unlock_page(page[i]);
put_page(page[i]);
}
squashfs_cache_put(buffer);
return 0;
failed:
squashfs_page_actor_free(actor);
out: out:
squashfs_cache_put(buffer); squashfs_cache_put(buffer);
return error; return 1;
} }
static void squashfs_readahead(struct readahead_control *ractl) static void squashfs_readahead(struct readahead_control *ractl)
@ -551,7 +585,6 @@ static void squashfs_readahead(struct readahead_control *ractl)
return; return;
for (;;) { for (;;) {
pgoff_t index;
int res, bsize; int res, bsize;
u64 block = 0; u64 block = 0;
unsigned int expected; unsigned int expected;
@ -570,26 +603,21 @@ static void squashfs_readahead(struct readahead_control *ractl)
if (readahead_pos(ractl) >= i_size_read(inode)) if (readahead_pos(ractl) >= i_size_read(inode))
goto skip_pages; goto skip_pages;
index = pages[0]->index >> shift; if (start >> msblk->block_log == file_end &&
squashfs_i(inode)->fragment_block != SQUASHFS_INVALID_BLK) {
if ((pages[nr_pages - 1]->index >> shift) != index)
goto skip_pages;
if (index == file_end && squashfs_i(inode)->fragment_block !=
SQUASHFS_INVALID_BLK) {
res = squashfs_readahead_fragment(pages, nr_pages, res = squashfs_readahead_fragment(pages, nr_pages,
expected); expected, start);
if (res) if (res)
goto skip_pages; goto skip_pages;
continue; continue;
} }
bsize = read_blocklist(inode, index, &block); bsize = read_blocklist(inode, start >> msblk->block_log, &block);
if (bsize == 0) if (bsize == 0)
goto skip_pages; goto skip_pages;
actor = squashfs_page_actor_init_special(msblk, pages, nr_pages, actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
expected); expected, start);
if (!actor) if (!actor)
goto skip_pages; goto skip_pages;
@ -597,12 +625,12 @@ static void squashfs_readahead(struct readahead_control *ractl)
last_page = squashfs_page_actor_free(actor); last_page = squashfs_page_actor_free(actor);
if (res == expected) { if (res == expected && !IS_ERR(last_page)) {
int bytes; int bytes;
/* Last page (if present) may have trailing bytes not filled */ /* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE; bytes = res % PAGE_SIZE;
if (index == file_end && bytes && last_page) if (start >> msblk->block_log == file_end && bytes && last_page)
memzero_page(last_page, bytes, memzero_page(last_page, bytes,
PAGE_SIZE - bytes); PAGE_SIZE - bytes);
@ -616,6 +644,8 @@ static void squashfs_readahead(struct readahead_control *ractl)
unlock_page(pages[i]); unlock_page(pages[i]);
put_page(pages[i]); put_page(pages[i]);
} }
start += readahead_batch_length(ractl);
} }
kfree(pages); kfree(pages);

View File

@ -23,15 +23,15 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int expected) int expected)
{ {
struct folio *folio = page_folio(target_page);
struct inode *inode = target_page->mapping->host; struct inode *inode = target_page->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
loff_t start_index = target_page->index & ~mask; loff_t start_index = folio->index & ~mask;
loff_t end_index = start_index | mask; loff_t end_index = start_index | mask;
int i, n, pages, bytes, res = -ENOMEM; int i, n, pages, bytes, res = -ENOMEM;
struct page **page; struct page **page, *last_page;
struct squashfs_page_actor *actor; struct squashfs_page_actor *actor;
void *pageaddr; void *pageaddr;
@ -46,7 +46,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
/* Try to grab all the pages covered by the Squashfs block */ /* Try to grab all the pages covered by the Squashfs block */
for (i = 0, n = start_index; n <= end_index; n++) { for (i = 0, n = start_index; n <= end_index; n++) {
page[i] = (n == target_page->index) ? target_page : page[i] = (n == folio->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, n); grab_cache_page_nowait(target_page->mapping, n);
if (page[i] == NULL) if (page[i] == NULL)
@ -67,27 +67,28 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
* Create a "page actor" which will kmap and kunmap the * Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor * page cache pages appropriately within the decompressor
*/ */
actor = squashfs_page_actor_init_special(msblk, page, pages, expected); actor = squashfs_page_actor_init_special(msblk, page, pages, expected,
start_index << PAGE_SHIFT);
if (actor == NULL) if (actor == NULL)
goto out; goto out;
/* Decompress directly into the page cache buffers */ /* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
squashfs_page_actor_free(actor); last_page = squashfs_page_actor_free(actor);
if (res < 0) if (res < 0)
goto mark_errored; goto mark_errored;
if (res != expected) { if (res != expected || IS_ERR(last_page)) {
res = -EIO; res = -EIO;
goto mark_errored; goto mark_errored;
} }
/* Last page (if present) may have trailing bytes not filled */ /* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE; bytes = res % PAGE_SIZE;
if (page[pages - 1]->index == end_index && bytes) { if (end_index == file_end && last_page && bytes) {
pageaddr = kmap_local_page(page[pages - 1]); pageaddr = kmap_local_page(last_page);
memset(pageaddr + bytes, 0, PAGE_SIZE - bytes); memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
kunmap_local(pageaddr); kunmap_local(pageaddr);
} }

View File

@ -60,6 +60,11 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
} }
/* Implementation of page_actor for decompressing directly into page cache. */ /* Implementation of page_actor for decompressing directly into page cache. */
static loff_t page_next_index(struct squashfs_page_actor *actor)
{
return page_folio(actor->page[actor->next_page])->index;
}
static void *handle_next_page(struct squashfs_page_actor *actor) static void *handle_next_page(struct squashfs_page_actor *actor)
{ {
int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT; int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
@ -68,7 +73,7 @@ static void *handle_next_page(struct squashfs_page_actor *actor)
return NULL; return NULL;
if ((actor->next_page == actor->pages) || if ((actor->next_page == actor->pages) ||
(actor->next_index != actor->page[actor->next_page]->index)) { (actor->next_index != page_next_index(actor))) {
actor->next_index++; actor->next_index++;
actor->returned_pages++; actor->returned_pages++;
actor->last_page = NULL; actor->last_page = NULL;
@ -103,7 +108,7 @@ static void direct_finish_page(struct squashfs_page_actor *actor)
} }
struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk, struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
struct page **page, int pages, int length) struct page **page, int pages, int length, loff_t start_index)
{ {
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
@ -125,7 +130,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
actor->pages = pages; actor->pages = pages;
actor->next_page = 0; actor->next_page = 0;
actor->returned_pages = 0; actor->returned_pages = 0;
actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1); actor->next_index = start_index >> PAGE_SHIFT;
actor->pageaddr = NULL; actor->pageaddr = NULL;
actor->last_page = NULL; actor->last_page = NULL;
actor->alloc_buffer = msblk->decompressor->alloc_buffer; actor->alloc_buffer = msblk->decompressor->alloc_buffer;

View File

@ -29,13 +29,15 @@ extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
int pages, int length); int pages, int length);
extern struct squashfs_page_actor *squashfs_page_actor_init_special( extern struct squashfs_page_actor *squashfs_page_actor_init_special(
struct squashfs_sb_info *msblk, struct squashfs_sb_info *msblk,
struct page **page, int pages, int length); struct page **page, int pages, int length,
loff_t start_index);
static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor) static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor)
{ {
struct page *last_page = actor->last_page; struct page *last_page = actor->next_page == actor->pages ? actor->last_page : ERR_PTR(-EIO);
kfree(actor->tmp_buffer); kfree(actor->tmp_buffer);
kfree(actor); kfree(actor);
return last_page; return last_page;
} }
static inline void *squashfs_first_page(struct squashfs_page_actor *actor) static inline void *squashfs_first_page(struct squashfs_page_actor *actor)

View File

@ -28,17 +28,17 @@ const struct file_operations sysv_dir_operations = {
.fsync = generic_file_fsync, .fsync = generic_file_fsync,
}; };
static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len) static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host; struct inode *dir = mapping->host;
block_write_end(NULL, mapping, pos, len, len, page, NULL); block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) { if (pos+len > dir->i_size) {
i_size_write(dir, pos+len); i_size_write(dir, pos+len);
mark_inode_dirty(dir); mark_inode_dirty(dir);
} }
unlock_page(page); folio_unlock(folio);
} }
static int sysv_handle_dirsync(struct inode *dir) static int sysv_handle_dirsync(struct inode *dir)
@ -52,20 +52,21 @@ static int sysv_handle_dirsync(struct inode *dir)
} }
/* /*
* Calls to dir_get_page()/unmap_and_put_page() must be nested according to the * Calls to dir_get_folio()/folio_release_kmap() must be nested according to the
* rules documented in mm/highmem.rst. * rules documented in mm/highmem.rst.
* *
* NOTE: sysv_find_entry() and sysv_dotdot() act as calls to dir_get_page() * NOTE: sysv_find_entry() and sysv_dotdot() act as calls to dir_get_folio()
* and must be treated accordingly for nesting purposes. * and must be treated accordingly for nesting purposes.
*/ */
static void *dir_get_page(struct inode *dir, unsigned long n, struct page **p) static void *dir_get_folio(struct inode *dir, unsigned long n,
struct folio **foliop)
{ {
struct address_space *mapping = dir->i_mapping; struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
struct page *page = read_mapping_page(mapping, n, NULL);
if (IS_ERR(page)) if (IS_ERR(folio))
return ERR_CAST(page); return ERR_CAST(folio);
*p = page; *foliop = folio;
return kmap_local_page(page); return kmap_local_folio(folio, 0);
} }
static int sysv_readdir(struct file *file, struct dir_context *ctx) static int sysv_readdir(struct file *file, struct dir_context *ctx)
@ -87,9 +88,9 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
for ( ; n < npages; n++, offset = 0) { for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit; char *kaddr, *limit;
struct sysv_dir_entry *de; struct sysv_dir_entry *de;
struct page *page; struct folio *folio;
kaddr = dir_get_page(inode, n, &page); kaddr = dir_get_folio(inode, n, &folio);
if (IS_ERR(kaddr)) if (IS_ERR(kaddr))
continue; continue;
de = (struct sysv_dir_entry *)(kaddr+offset); de = (struct sysv_dir_entry *)(kaddr+offset);
@ -103,11 +104,11 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx)
if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN), if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN),
fs16_to_cpu(SYSV_SB(sb), de->inode), fs16_to_cpu(SYSV_SB(sb), de->inode),
DT_UNKNOWN)) { DT_UNKNOWN)) {
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
return 0; return 0;
} }
} }
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
} }
return 0; return 0;
} }
@ -126,39 +127,35 @@ static inline int namecompare(int len, int maxlen,
/* /*
* sysv_find_entry() * sysv_find_entry()
* *
* finds an entry in the specified directory with the wanted name. It * finds an entry in the specified directory with the wanted name.
* returns the cache buffer in which the entry was found, and the entry * It does NOT read the inode of the
* itself (as a parameter - res_dir). It does NOT read the inode of the
* entry - you'll have to do that yourself if you want to. * entry - you'll have to do that yourself if you want to.
* *
* On Success unmap_and_put_page() should be called on *res_page. * On Success folio_release_kmap() should be called on *foliop.
* *
* sysv_find_entry() acts as a call to dir_get_page() and must be treated * sysv_find_entry() acts as a call to dir_get_folio() and must be treated
* accordingly for nesting purposes. * accordingly for nesting purposes.
*/ */
struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_page) struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct folio **foliop)
{ {
const char * name = dentry->d_name.name; const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len; int namelen = dentry->d_name.len;
struct inode * dir = d_inode(dentry->d_parent); struct inode * dir = d_inode(dentry->d_parent);
unsigned long start, n; unsigned long start, n;
unsigned long npages = dir_pages(dir); unsigned long npages = dir_pages(dir);
struct page *page = NULL;
struct sysv_dir_entry *de; struct sysv_dir_entry *de;
*res_page = NULL;
start = SYSV_I(dir)->i_dir_start_lookup; start = SYSV_I(dir)->i_dir_start_lookup;
if (start >= npages) if (start >= npages)
start = 0; start = 0;
n = start; n = start;
do { do {
char *kaddr = dir_get_page(dir, n, &page); char *kaddr = dir_get_folio(dir, n, foliop);
if (!IS_ERR(kaddr)) { if (!IS_ERR(kaddr)) {
de = (struct sysv_dir_entry *)kaddr; de = (struct sysv_dir_entry *)kaddr;
kaddr += PAGE_SIZE - SYSV_DIRSIZE; kaddr += folio_size(*foliop) - SYSV_DIRSIZE;
for ( ; (char *) de <= kaddr ; de++) { for ( ; (char *) de <= kaddr ; de++) {
if (!de->inode) if (!de->inode)
continue; continue;
@ -166,7 +163,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
name, de->name)) name, de->name))
goto found; goto found;
} }
unmap_and_put_page(page, kaddr); folio_release_kmap(*foliop, kaddr);
} }
if (++n >= npages) if (++n >= npages)
@ -177,7 +174,6 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_
found: found:
SYSV_I(dir)->i_dir_start_lookup = n; SYSV_I(dir)->i_dir_start_lookup = n;
*res_page = page;
return de; return de;
} }
@ -186,7 +182,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
struct inode *dir = d_inode(dentry->d_parent); struct inode *dir = d_inode(dentry->d_parent);
const char * name = dentry->d_name.name; const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len; int namelen = dentry->d_name.len;
struct page *page = NULL; struct folio *folio = NULL;
struct sysv_dir_entry * de; struct sysv_dir_entry * de;
unsigned long npages = dir_pages(dir); unsigned long npages = dir_pages(dir);
unsigned long n; unsigned long n;
@ -196,7 +192,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
/* We take care of directory expansion in the same loop */ /* We take care of directory expansion in the same loop */
for (n = 0; n <= npages; n++) { for (n = 0; n <= npages; n++) {
kaddr = dir_get_page(dir, n, &page); kaddr = dir_get_folio(dir, n, &folio);
if (IS_ERR(kaddr)) if (IS_ERR(kaddr))
return PTR_ERR(kaddr); return PTR_ERR(kaddr);
de = (struct sysv_dir_entry *)kaddr; de = (struct sysv_dir_entry *)kaddr;
@ -206,49 +202,49 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode)
goto got_it; goto got_it;
err = -EEXIST; err = -EEXIST;
if (namecompare(namelen, SYSV_NAMELEN, name, de->name)) if (namecompare(namelen, SYSV_NAMELEN, name, de->name))
goto out_page; goto out_folio;
de++; de++;
} }
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
} }
BUG(); BUG();
return -EINVAL; return -EINVAL;
got_it: got_it:
pos = page_offset(page) + offset_in_page(de); pos = folio_pos(folio) + offset_in_folio(folio, de);
lock_page(page); folio_lock(folio);
err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
if (err) if (err)
goto out_unlock; goto out_unlock;
memcpy (de->name, name, namelen); memcpy (de->name, name, namelen);
memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2); memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
dir_commit_chunk(page, pos, SYSV_DIRSIZE); dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir); mark_inode_dirty(dir);
err = sysv_handle_dirsync(dir); err = sysv_handle_dirsync(dir);
out_page: out_folio:
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
return err; return err;
out_unlock: out_unlock:
unlock_page(page); folio_unlock(folio);
goto out_page; goto out_folio;
} }
int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page) int sysv_delete_entry(struct sysv_dir_entry *de, struct folio *folio)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = folio->mapping->host;
loff_t pos = page_offset(page) + offset_in_page(de); loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
int err; int err;
lock_page(page); folio_lock(folio);
err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
if (err) { if (err) {
unlock_page(page); folio_unlock(folio);
return err; return err;
} }
de->inode = 0; de->inode = 0;
dir_commit_chunk(page, pos, SYSV_DIRSIZE); dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode); mark_inode_dirty(inode);
return sysv_handle_dirsync(inode); return sysv_handle_dirsync(inode);
@ -256,33 +252,33 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
int sysv_make_empty(struct inode *inode, struct inode *dir) int sysv_make_empty(struct inode *inode, struct inode *dir)
{ {
struct page *page = grab_cache_page(inode->i_mapping, 0); struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
struct sysv_dir_entry * de; struct sysv_dir_entry * de;
char *base; char *kaddr;
int err; int err;
if (!page) if (IS_ERR(folio))
return -ENOMEM; return PTR_ERR(folio);
err = sysv_prepare_chunk(page, 0, 2 * SYSV_DIRSIZE); err = sysv_prepare_chunk(folio, 0, 2 * SYSV_DIRSIZE);
if (err) { if (err) {
unlock_page(page); folio_unlock(folio);
goto fail; goto fail;
} }
base = kmap_local_page(page); kaddr = kmap_local_folio(folio, 0);
memset(base, 0, PAGE_SIZE); memset(kaddr, 0, folio_size(folio));
de = (struct sysv_dir_entry *) base; de = (struct sysv_dir_entry *)kaddr;
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
strcpy(de->name,"."); strcpy(de->name,".");
de++; de++;
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), dir->i_ino); de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), dir->i_ino);
strcpy(de->name,".."); strcpy(de->name,"..");
kunmap_local(base); kunmap_local(kaddr);
dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); dir_commit_chunk(folio, 0, 2 * SYSV_DIRSIZE);
err = sysv_handle_dirsync(inode); err = sysv_handle_dirsync(inode);
fail: fail:
put_page(page); folio_put(folio);
return err; return err;
} }
@ -292,19 +288,19 @@ fail:
int sysv_empty_dir(struct inode * inode) int sysv_empty_dir(struct inode * inode)
{ {
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct page *page = NULL; struct folio *folio = NULL;
unsigned long i, npages = dir_pages(inode); unsigned long i, npages = dir_pages(inode);
char *kaddr; char *kaddr;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
struct sysv_dir_entry *de; struct sysv_dir_entry *de;
kaddr = dir_get_page(inode, i, &page); kaddr = dir_get_folio(inode, i, &folio);
if (IS_ERR(kaddr)) if (IS_ERR(kaddr))
continue; continue;
de = (struct sysv_dir_entry *)kaddr; de = (struct sysv_dir_entry *)kaddr;
kaddr += PAGE_SIZE-SYSV_DIRSIZE; kaddr += folio_size(folio) - SYSV_DIRSIZE;
for ( ;(char *)de <= kaddr; de++) { for ( ;(char *)de <= kaddr; de++) {
if (!de->inode) if (!de->inode)
@ -321,46 +317,46 @@ int sysv_empty_dir(struct inode * inode)
if (de->name[1] != '.' || de->name[2]) if (de->name[1] != '.' || de->name[2])
goto not_empty; goto not_empty;
} }
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
} }
return 1; return 1;
not_empty: not_empty:
unmap_and_put_page(page, kaddr); folio_release_kmap(folio, kaddr);
return 0; return 0;
} }
/* Releases the page */ /* Releases the page */
int sysv_set_link(struct sysv_dir_entry *de, struct page *page, int sysv_set_link(struct sysv_dir_entry *de, struct folio *folio,
struct inode *inode) struct inode *inode)
{ {
struct inode *dir = page->mapping->host; struct inode *dir = folio->mapping->host;
loff_t pos = page_offset(page) + offset_in_page(de); loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
int err; int err;
lock_page(page); folio_lock(folio);
err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE); err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
if (err) { if (err) {
unlock_page(page); folio_unlock(folio);
return err; return err;
} }
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
dir_commit_chunk(page, pos, SYSV_DIRSIZE); dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir); mark_inode_dirty(dir);
return sysv_handle_dirsync(inode); return sysv_handle_dirsync(inode);
} }
/* /*
* Calls to dir_get_page()/unmap_and_put_page() must be nested according to the * Calls to dir_get_folio()/folio_release_kmap() must be nested according to the
* rules documented in mm/highmem.rst. * rules documented in mm/highmem.rst.
* *
* sysv_dotdot() acts as a call to dir_get_page() and must be treated * sysv_dotdot() acts as a call to dir_get_folio() and must be treated
* accordingly for nesting purposes. * accordingly for nesting purposes.
*/ */
struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct page **p) struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct folio **foliop)
{ {
struct sysv_dir_entry *de = dir_get_page(dir, 0, p); struct sysv_dir_entry *de = dir_get_folio(dir, 0, foliop);
if (IS_ERR(de)) if (IS_ERR(de))
return NULL; return NULL;
@ -370,13 +366,13 @@ struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct page **p)
ino_t sysv_inode_by_name(struct dentry *dentry) ino_t sysv_inode_by_name(struct dentry *dentry)
{ {
struct page *page; struct folio *folio;
struct sysv_dir_entry *de = sysv_find_entry (dentry, &page); struct sysv_dir_entry *de = sysv_find_entry (dentry, &folio);
ino_t res = 0; ino_t res = 0;
if (de) { if (de) {
res = fs16_to_cpu(SYSV_SB(dentry->d_sb), de->inode); res = fs16_to_cpu(SYSV_SB(dentry->d_sb), de->inode);
unmap_and_put_page(page, de); folio_release_kmap(folio, de);
} }
return res; return res;
} }

View File

@ -466,9 +466,9 @@ static int sysv_read_folio(struct file *file, struct folio *folio)
return block_read_full_folio(folio, get_block); return block_read_full_folio(folio, get_block);
} }
int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len) int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{ {
return __block_write_begin(page, pos, len, get_block); return __block_write_begin(folio, pos, len, get_block);
} }
static void sysv_write_failed(struct address_space *mapping, loff_t to) static void sysv_write_failed(struct address_space *mapping, loff_t to)
@ -483,11 +483,11 @@ static void sysv_write_failed(struct address_space *mapping, loff_t to)
static int sysv_write_begin(struct file *file, struct address_space *mapping, static int sysv_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
ret = block_write_begin(mapping, pos, len, pagep, get_block); ret = block_write_begin(mapping, pos, len, foliop, get_block);
if (unlikely(ret)) if (unlikely(ret))
sysv_write_failed(mapping, pos + len); sysv_write_failed(mapping, pos + len);

View File

@ -151,20 +151,20 @@ out_dir:
static int sysv_unlink(struct inode * dir, struct dentry * dentry) static int sysv_unlink(struct inode * dir, struct dentry * dentry)
{ {
struct inode * inode = d_inode(dentry); struct inode * inode = d_inode(dentry);
struct page * page; struct folio *folio;
struct sysv_dir_entry * de; struct sysv_dir_entry * de;
int err; int err;
de = sysv_find_entry(dentry, &page); de = sysv_find_entry(dentry, &folio);
if (!de) if (!de)
return -ENOENT; return -ENOENT;
err = sysv_delete_entry(de, page); err = sysv_delete_entry(de, folio);
if (!err) { if (!err) {
inode_set_ctime_to_ts(inode, inode_get_ctime(dir)); inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode); inode_dec_link_count(inode);
} }
unmap_and_put_page(page, de); folio_release_kmap(folio, de);
return err; return err;
} }
@ -194,28 +194,28 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
{ {
struct inode * old_inode = d_inode(old_dentry); struct inode * old_inode = d_inode(old_dentry);
struct inode * new_inode = d_inode(new_dentry); struct inode * new_inode = d_inode(new_dentry);
struct page * dir_page = NULL; struct folio *dir_folio;
struct sysv_dir_entry * dir_de = NULL; struct sysv_dir_entry * dir_de = NULL;
struct page * old_page; struct folio *old_folio;
struct sysv_dir_entry * old_de; struct sysv_dir_entry * old_de;
int err = -ENOENT; int err = -ENOENT;
if (flags & ~RENAME_NOREPLACE) if (flags & ~RENAME_NOREPLACE)
return -EINVAL; return -EINVAL;
old_de = sysv_find_entry(old_dentry, &old_page); old_de = sysv_find_entry(old_dentry, &old_folio);
if (!old_de) if (!old_de)
goto out; goto out;
if (S_ISDIR(old_inode->i_mode)) { if (S_ISDIR(old_inode->i_mode)) {
err = -EIO; err = -EIO;
dir_de = sysv_dotdot(old_inode, &dir_page); dir_de = sysv_dotdot(old_inode, &dir_folio);
if (!dir_de) if (!dir_de)
goto out_old; goto out_old;
} }
if (new_inode) { if (new_inode) {
struct page * new_page; struct folio *new_folio;
struct sysv_dir_entry * new_de; struct sysv_dir_entry * new_de;
err = -ENOTEMPTY; err = -ENOTEMPTY;
@ -223,11 +223,11 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
goto out_dir; goto out_dir;
err = -ENOENT; err = -ENOENT;
new_de = sysv_find_entry(new_dentry, &new_page); new_de = sysv_find_entry(new_dentry, &new_folio);
if (!new_de) if (!new_de)
goto out_dir; goto out_dir;
err = sysv_set_link(new_de, new_page, old_inode); err = sysv_set_link(new_de, new_folio, old_inode);
unmap_and_put_page(new_page, new_de); folio_release_kmap(new_folio, new_de);
if (err) if (err)
goto out_dir; goto out_dir;
inode_set_ctime_current(new_inode); inode_set_ctime_current(new_inode);
@ -242,23 +242,23 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
inode_inc_link_count(new_dir); inode_inc_link_count(new_dir);
} }
err = sysv_delete_entry(old_de, old_page); err = sysv_delete_entry(old_de, old_folio);
if (err) if (err)
goto out_dir; goto out_dir;
mark_inode_dirty(old_inode); mark_inode_dirty(old_inode);
if (dir_de) { if (dir_de) {
err = sysv_set_link(dir_de, dir_page, new_dir); err = sysv_set_link(dir_de, dir_folio, new_dir);
if (!err) if (!err)
inode_dec_link_count(old_dir); inode_dec_link_count(old_dir);
} }
out_dir: out_dir:
if (dir_de) if (dir_de)
unmap_and_put_page(dir_page, dir_de); folio_release_kmap(dir_folio, dir_de);
out_old: out_old:
unmap_and_put_page(old_page, old_de); folio_release_kmap(old_folio, old_de);
out: out:
return err; return err;
} }

View File

@ -133,8 +133,8 @@ extern void sysv_free_block(struct super_block *, sysv_zone_t);
extern unsigned long sysv_count_free_blocks(struct super_block *); extern unsigned long sysv_count_free_blocks(struct super_block *);
/* itree.c */ /* itree.c */
extern void sysv_truncate(struct inode *); void sysv_truncate(struct inode *);
extern int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len); int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
/* inode.c */ /* inode.c */
extern struct inode *sysv_iget(struct super_block *, unsigned int); extern struct inode *sysv_iget(struct super_block *, unsigned int);
@ -148,15 +148,15 @@ extern void sysv_destroy_icache(void);
/* dir.c */ /* dir.c */
extern struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct page **); struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct folio **);
extern int sysv_add_link(struct dentry *, struct inode *); int sysv_add_link(struct dentry *, struct inode *);
extern int sysv_delete_entry(struct sysv_dir_entry *, struct page *); int sysv_delete_entry(struct sysv_dir_entry *, struct folio *);
extern int sysv_make_empty(struct inode *, struct inode *); int sysv_make_empty(struct inode *, struct inode *);
extern int sysv_empty_dir(struct inode *); int sysv_empty_dir(struct inode *);
extern int sysv_set_link(struct sysv_dir_entry *, struct page *, int sysv_set_link(struct sysv_dir_entry *, struct folio *,
struct inode *); struct inode *);
extern struct sysv_dir_entry *sysv_dotdot(struct inode *, struct page **); struct sysv_dir_entry *sysv_dotdot(struct inode *, struct folio **);
extern ino_t sysv_inode_by_name(struct dentry *); ino_t sysv_inode_by_name(struct dentry *);
extern const struct inode_operations sysv_file_inode_operations; extern const struct inode_operations sysv_file_inode_operations;

View File

@ -211,7 +211,7 @@ static void release_existing_page_budget(struct ubifs_info *c)
} }
static int write_begin_slow(struct address_space *mapping, static int write_begin_slow(struct address_space *mapping,
loff_t pos, unsigned len, struct page **pagep) loff_t pos, unsigned len, struct folio **foliop)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_info *c = inode->i_sb->s_fs_info;
@ -298,7 +298,7 @@ static int write_begin_slow(struct address_space *mapping,
ubifs_release_dirty_inode_budget(c, ui); ubifs_release_dirty_inode_budget(c, ui);
} }
*pagep = &folio->page; *foliop = folio;
return 0; return 0;
} }
@ -414,7 +414,7 @@ static int allocate_budget(struct ubifs_info *c, struct folio *folio,
*/ */
static int ubifs_write_begin(struct file *file, struct address_space *mapping, static int ubifs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_info *c = inode->i_sb->s_fs_info;
@ -483,7 +483,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
return write_begin_slow(mapping, pos, len, pagep); return write_begin_slow(mapping, pos, len, foliop);
} }
/* /*
@ -492,7 +492,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
* with @ui->ui_mutex locked if we are appending pages, and unlocked * with @ui->ui_mutex locked if we are appending pages, and unlocked
* otherwise. This is an optimization (slightly hacky though). * otherwise. This is an optimization (slightly hacky though).
*/ */
*pagep = &folio->page; *foliop = folio;
return 0; return 0;
} }
@ -524,9 +524,8 @@ static void cancel_budget(struct ubifs_info *c, struct folio *folio,
static int ubifs_write_end(struct file *file, struct address_space *mapping, static int ubifs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(page);
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_info *c = inode->i_sb->s_fs_info;

View File

@ -62,7 +62,7 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
end = size & ~PAGE_MASK; end = size & ~PAGE_MASK;
else else
end = PAGE_SIZE; end = PAGE_SIZE;
err = __block_write_begin(&folio->page, 0, end, udf_get_block); err = __block_write_begin(folio, 0, end, udf_get_block);
if (err) { if (err) {
folio_unlock(folio); folio_unlock(folio);
ret = vmf_fs_error(err); ret = vmf_fs_error(err);

View File

@ -246,14 +246,14 @@ static void udf_readahead(struct readahead_control *rac)
static int udf_write_begin(struct file *file, struct address_space *mapping, static int udf_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct udf_inode_info *iinfo = UDF_I(file_inode(file)); struct udf_inode_info *iinfo = UDF_I(file_inode(file));
struct folio *folio; struct folio *folio;
int ret; int ret;
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
ret = block_write_begin(mapping, pos, len, pagep, ret = block_write_begin(mapping, pos, len, foliop,
udf_get_block); udf_get_block);
if (unlikely(ret)) if (unlikely(ret))
udf_write_failed(mapping, pos + len); udf_write_failed(mapping, pos + len);
@ -265,7 +265,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
mapping_gfp_mask(mapping)); mapping_gfp_mask(mapping));
if (IS_ERR(folio)) if (IS_ERR(folio))
return PTR_ERR(folio); return PTR_ERR(folio);
*pagep = &folio->page; *foliop = folio;
if (!folio_test_uptodate(folio)) if (!folio_test_uptodate(folio))
udf_adinicb_read_folio(folio); udf_adinicb_read_folio(folio);
return 0; return 0;
@ -273,16 +273,14 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
static int udf_write_end(struct file *file, struct address_space *mapping, static int udf_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct folio *folio;
loff_t last_pos; loff_t last_pos;
if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
return generic_write_end(file, mapping, pos, len, copied, page, return generic_write_end(file, mapping, pos, len, copied, folio,
fsdata); fsdata);
folio = page_folio(page);
last_pos = pos + copied; last_pos = pos + copied;
if (last_pos > inode->i_size) if (last_pos > inode->i_size)
i_size_write(inode, last_pos); i_size_write(inode, last_pos);

View File

@ -42,18 +42,18 @@ static inline int ufs_match(struct super_block *sb, int len,
return !memcmp(name, de->d_name, len); return !memcmp(name, de->d_name, len);
} }
static void ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = folio->mapping;
struct inode *dir = mapping->host; struct inode *dir = mapping->host;
inode_inc_iversion(dir); inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, page, NULL); block_write_end(NULL, mapping, pos, len, len, folio, NULL);
if (pos+len > dir->i_size) { if (pos+len > dir->i_size) {
i_size_write(dir, pos+len); i_size_write(dir, pos+len);
mark_inode_dirty(dir); mark_inode_dirty(dir);
} }
unlock_page(page); folio_unlock(folio);
} }
static int ufs_handle_dirsync(struct inode *dir) static int ufs_handle_dirsync(struct inode *dir)
@ -66,22 +66,16 @@ static int ufs_handle_dirsync(struct inode *dir)
return err; return err;
} }
static inline void ufs_put_page(struct page *page)
{
kunmap(page);
put_page(page);
}
ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
{ {
ino_t res = 0; ino_t res = 0;
struct ufs_dir_entry *de; struct ufs_dir_entry *de;
struct page *page; struct folio *folio;
de = ufs_find_entry(dir, qstr, &page); de = ufs_find_entry(dir, qstr, &folio);
if (de) { if (de) {
res = fs32_to_cpu(dir->i_sb, de->d_ino); res = fs32_to_cpu(dir->i_sb, de->d_ino);
ufs_put_page(page); folio_release_kmap(folio, de);
} }
return res; return res;
} }
@ -89,43 +83,40 @@ ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
/* Releases the page */ /* Releases the page */
void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
struct page *page, struct inode *inode, struct folio *folio, struct inode *inode,
bool update_times) bool update_times)
{ {
loff_t pos = page_offset(page) + loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
(char *) de - (char *) page_address(page);
unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen); unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen);
int err; int err;
lock_page(page); folio_lock(folio);
err = ufs_prepare_chunk(page, pos, len); err = ufs_prepare_chunk(folio, pos, len);
BUG_ON(err); BUG_ON(err);
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
ufs_set_de_type(dir->i_sb, de, inode->i_mode); ufs_set_de_type(dir->i_sb, de, inode->i_mode);
ufs_commit_chunk(page, pos, len); ufs_commit_chunk(folio, pos, len);
ufs_put_page(page); folio_release_kmap(folio, de);
if (update_times) if (update_times)
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir); mark_inode_dirty(dir);
ufs_handle_dirsync(dir); ufs_handle_dirsync(dir);
} }
static bool ufs_check_folio(struct folio *folio, char *kaddr)
static bool ufs_check_page(struct page *page)
{ {
struct inode *dir = page->mapping->host; struct inode *dir = folio->mapping->host;
struct super_block *sb = dir->i_sb; struct super_block *sb = dir->i_sb;
char *kaddr = page_address(page);
unsigned offs, rec_len; unsigned offs, rec_len;
unsigned limit = PAGE_SIZE; unsigned limit = folio_size(folio);
const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
struct ufs_dir_entry *p; struct ufs_dir_entry *p;
char *error; char *error;
if ((dir->i_size >> PAGE_SHIFT) == page->index) { if (dir->i_size < folio_pos(folio) + limit) {
limit = dir->i_size & ~PAGE_MASK; limit = offset_in_folio(folio, dir->i_size);
if (limit & chunk_mask) if (limit & chunk_mask)
goto Ebadsize; goto Ebadsize;
if (!limit) if (!limit)
@ -150,13 +141,13 @@ static bool ufs_check_page(struct page *page)
if (offs != limit) if (offs != limit)
goto Eend; goto Eend;
out: out:
SetPageChecked(page); folio_set_checked(folio);
return true; return true;
/* Too bad, we had an error */ /* Too bad, we had an error */
Ebadsize: Ebadsize:
ufs_error(sb, "ufs_check_page", ufs_error(sb, __func__,
"size of directory #%lu is not a multiple of chunk size", "size of directory #%lu is not a multiple of chunk size",
dir->i_ino dir->i_ino
); );
@ -176,36 +167,40 @@ Espan:
Einumber: Einumber:
error = "inode out of bounds"; error = "inode out of bounds";
bad_entry: bad_entry:
ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - " ufs_error(sb, __func__, "bad entry in directory #%lu: %s - "
"offset=%lu, rec_len=%d, name_len=%d", "offset=%llu, rec_len=%d, name_len=%d",
dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, dir->i_ino, error, folio_pos(folio) + offs,
rec_len, ufs_get_de_namlen(sb, p)); rec_len, ufs_get_de_namlen(sb, p));
goto fail; goto fail;
Eend: Eend:
p = (struct ufs_dir_entry *)(kaddr + offs); p = (struct ufs_dir_entry *)(kaddr + offs);
ufs_error(sb, __func__, ufs_error(sb, __func__,
"entry in directory #%lu spans the page boundary" "entry in directory #%lu spans the page boundary"
"offset=%lu", "offset=%llu",
dir->i_ino, (page->index<<PAGE_SHIFT)+offs); dir->i_ino, folio_pos(folio) + offs);
fail: fail:
return false; return false;
} }
static struct page *ufs_get_page(struct inode *dir, unsigned long n) static void *ufs_get_folio(struct inode *dir, unsigned long n,
struct folio **foliop)
{ {
struct address_space *mapping = dir->i_mapping; struct address_space *mapping = dir->i_mapping;
struct page *page = read_mapping_page(mapping, n, NULL); struct folio *folio = read_mapping_folio(mapping, n, NULL);
if (!IS_ERR(page)) { void *kaddr;
kmap(page);
if (unlikely(!PageChecked(page))) { if (IS_ERR(folio))
if (!ufs_check_page(page)) return ERR_CAST(folio);
goto fail; kaddr = kmap_local_folio(folio, 0);
} if (unlikely(!folio_test_checked(folio))) {
if (!ufs_check_folio(folio, kaddr))
goto fail;
} }
return page; *foliop = folio;
return kaddr;
fail: fail:
ufs_put_page(page); folio_release_kmap(folio, kaddr);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
@ -231,17 +226,14 @@ ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p)
fs16_to_cpu(sb, p->d_reclen)); fs16_to_cpu(sb, p->d_reclen));
} }
struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p) struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct folio **foliop)
{ {
struct page *page = ufs_get_page(dir, 0); struct ufs_dir_entry *de = ufs_get_folio(dir, 0, foliop);
struct ufs_dir_entry *de = NULL;
if (!IS_ERR(page)) { if (!IS_ERR(de))
de = ufs_next_entry(dir->i_sb, return ufs_next_entry(dir->i_sb, de);
(struct ufs_dir_entry *)page_address(page));
*p = page; return NULL;
}
return de;
} }
/* /*
@ -253,7 +245,7 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
* Entry is guaranteed to be valid. * Entry is guaranteed to be valid.
*/ */
struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr, struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
struct page **res_page) struct folio **foliop)
{ {
struct super_block *sb = dir->i_sb; struct super_block *sb = dir->i_sb;
const unsigned char *name = qstr->name; const unsigned char *name = qstr->name;
@ -261,7 +253,6 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
unsigned reclen = UFS_DIR_REC_LEN(namelen); unsigned reclen = UFS_DIR_REC_LEN(namelen);
unsigned long start, n; unsigned long start, n;
unsigned long npages = dir_pages(dir); unsigned long npages = dir_pages(dir);
struct page *page = NULL;
struct ufs_inode_info *ui = UFS_I(dir); struct ufs_inode_info *ui = UFS_I(dir);
struct ufs_dir_entry *de; struct ufs_dir_entry *de;
@ -270,27 +261,23 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
if (npages == 0 || namelen > UFS_MAXNAMLEN) if (npages == 0 || namelen > UFS_MAXNAMLEN)
goto out; goto out;
/* OFFSET_CACHE */
*res_page = NULL;
start = ui->i_dir_start_lookup; start = ui->i_dir_start_lookup;
if (start >= npages) if (start >= npages)
start = 0; start = 0;
n = start; n = start;
do { do {
char *kaddr; char *kaddr = ufs_get_folio(dir, n, foliop);
page = ufs_get_page(dir, n);
if (!IS_ERR(page)) { if (!IS_ERR(kaddr)) {
kaddr = page_address(page); de = (struct ufs_dir_entry *)kaddr;
de = (struct ufs_dir_entry *) kaddr;
kaddr += ufs_last_byte(dir, n) - reclen; kaddr += ufs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) { while ((char *) de <= kaddr) {
if (ufs_match(sb, namelen, name, de)) if (ufs_match(sb, namelen, name, de))
goto found; goto found;
de = ufs_next_entry(sb, de); de = ufs_next_entry(sb, de);
} }
ufs_put_page(page); folio_release_kmap(*foliop, kaddr);
} }
if (++n >= npages) if (++n >= npages)
n = 0; n = 0;
@ -299,7 +286,6 @@ out:
return NULL; return NULL;
found: found:
*res_page = page;
ui->i_dir_start_lookup = n; ui->i_dir_start_lookup = n;
return de; return de;
} }
@ -316,11 +302,10 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
unsigned reclen = UFS_DIR_REC_LEN(namelen); unsigned reclen = UFS_DIR_REC_LEN(namelen);
const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
unsigned short rec_len, name_len; unsigned short rec_len, name_len;
struct page *page = NULL; struct folio *folio = NULL;
struct ufs_dir_entry *de; struct ufs_dir_entry *de;
unsigned long npages = dir_pages(dir); unsigned long npages = dir_pages(dir);
unsigned long n; unsigned long n;
char *kaddr;
loff_t pos; loff_t pos;
int err; int err;
@ -328,21 +313,19 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
/* /*
* We take care of directory expansion in the same loop. * We take care of directory expansion in the same loop.
* This code plays outside i_size, so it locks the page * This code plays outside i_size, so it locks the folio
* to protect that region. * to protect that region.
*/ */
for (n = 0; n <= npages; n++) { for (n = 0; n <= npages; n++) {
char *kaddr = ufs_get_folio(dir, n, &folio);
char *dir_end; char *dir_end;
page = ufs_get_page(dir, n); if (IS_ERR(kaddr))
err = PTR_ERR(page); return PTR_ERR(kaddr);
if (IS_ERR(page)) folio_lock(folio);
goto out;
lock_page(page);
kaddr = page_address(page);
dir_end = kaddr + ufs_last_byte(dir, n); dir_end = kaddr + ufs_last_byte(dir, n);
de = (struct ufs_dir_entry *)kaddr; de = (struct ufs_dir_entry *)kaddr;
kaddr += PAGE_SIZE - reclen; kaddr += folio_size(folio) - reclen;
while ((char *)de <= kaddr) { while ((char *)de <= kaddr) {
if ((char *)de == dir_end) { if ((char *)de == dir_end) {
/* We hit i_size */ /* We hit i_size */
@ -369,16 +352,15 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
goto got_it; goto got_it;
de = (struct ufs_dir_entry *) ((char *) de + rec_len); de = (struct ufs_dir_entry *) ((char *) de + rec_len);
} }
unlock_page(page); folio_unlock(folio);
ufs_put_page(page); folio_release_kmap(folio, kaddr);
} }
BUG(); BUG();
return -EINVAL; return -EINVAL;
got_it: got_it:
pos = page_offset(page) + pos = folio_pos(folio) + offset_in_folio(folio, de);
(char*)de - (char*)page_address(page); err = ufs_prepare_chunk(folio, pos, rec_len);
err = ufs_prepare_chunk(page, pos, rec_len);
if (err) if (err)
goto out_unlock; goto out_unlock;
if (de->d_ino) { if (de->d_ino) {
@ -395,18 +377,17 @@ got_it:
de->d_ino = cpu_to_fs32(sb, inode->i_ino); de->d_ino = cpu_to_fs32(sb, inode->i_ino);
ufs_set_de_type(sb, de, inode->i_mode); ufs_set_de_type(sb, de, inode->i_mode);
ufs_commit_chunk(page, pos, rec_len); ufs_commit_chunk(folio, pos, rec_len);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir); mark_inode_dirty(dir);
err = ufs_handle_dirsync(dir); err = ufs_handle_dirsync(dir);
/* OFFSET_CACHE */ /* OFFSET_CACHE */
out_put: out_put:
ufs_put_page(page); folio_release_kmap(folio, de);
out:
return err; return err;
out_unlock: out_unlock:
unlock_page(page); folio_unlock(folio);
goto out_put; goto out_put;
} }
@ -444,19 +425,18 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
return 0; return 0;
for ( ; n < npages; n++, offset = 0) { for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
struct ufs_dir_entry *de; struct ufs_dir_entry *de;
struct folio *folio;
char *kaddr = ufs_get_folio(inode, n, &folio);
char *limit;
struct page *page = ufs_get_page(inode, n); if (IS_ERR(kaddr)) {
if (IS_ERR(page)) {
ufs_error(sb, __func__, ufs_error(sb, __func__,
"bad page in #%lu", "bad page in #%lu",
inode->i_ino); inode->i_ino);
ctx->pos += PAGE_SIZE - offset; ctx->pos += PAGE_SIZE - offset;
return -EIO; return PTR_ERR(kaddr);
} }
kaddr = page_address(page);
if (unlikely(need_revalidate)) { if (unlikely(need_revalidate)) {
if (offset) { if (offset) {
offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
@ -482,13 +462,13 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
ufs_get_de_namlen(sb, de), ufs_get_de_namlen(sb, de),
fs32_to_cpu(sb, de->d_ino), fs32_to_cpu(sb, de->d_ino),
d_type)) { d_type)) {
ufs_put_page(page); folio_release_kmap(folio, de);
return 0; return 0;
} }
} }
ctx->pos += fs16_to_cpu(sb, de->d_reclen); ctx->pos += fs16_to_cpu(sb, de->d_reclen);
} }
ufs_put_page(page); folio_release_kmap(folio, kaddr);
} }
return 0; return 0;
} }
@ -499,19 +479,23 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
* previous entry. * previous entry.
*/ */
int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir, int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
struct page * page) struct folio *folio)
{ {
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
char *kaddr = page_address(page); size_t from, to;
unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); char *kaddr;
unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
loff_t pos; loff_t pos;
struct ufs_dir_entry *pde = NULL; struct ufs_dir_entry *de, *pde = NULL;
struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from);
int err; int err;
UFSD("ENTER\n"); UFSD("ENTER\n");
from = offset_in_folio(folio, dir);
to = from + fs16_to_cpu(sb, dir->d_reclen);
kaddr = (char *)dir - from;
from &= ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
de = (struct ufs_dir_entry *) (kaddr + from);
UFSD("ino %u, reclen %u, namlen %u, name %s\n", UFSD("ino %u, reclen %u, namlen %u, name %s\n",
fs32_to_cpu(sb, de->d_ino), fs32_to_cpu(sb, de->d_ino),
fs16_to_cpu(sb, de->d_reclen), fs16_to_cpu(sb, de->d_reclen),
@ -528,21 +512,20 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
de = ufs_next_entry(sb, de); de = ufs_next_entry(sb, de);
} }
if (pde) if (pde)
from = (char*)pde - (char*)page_address(page); from = offset_in_folio(folio, pde);
pos = folio_pos(folio) + from;
pos = page_offset(page) + from; folio_lock(folio);
lock_page(page); err = ufs_prepare_chunk(folio, pos, to - from);
err = ufs_prepare_chunk(page, pos, to - from);
BUG_ON(err); BUG_ON(err);
if (pde) if (pde)
pde->d_reclen = cpu_to_fs16(sb, to - from); pde->d_reclen = cpu_to_fs16(sb, to - from);
dir->d_ino = 0; dir->d_ino = 0;
ufs_commit_chunk(page, pos, to - from); ufs_commit_chunk(folio, pos, to - from);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode); mark_inode_dirty(inode);
err = ufs_handle_dirsync(inode); err = ufs_handle_dirsync(inode);
out: out:
ufs_put_page(page); folio_release_kmap(folio, kaddr);
UFSD("EXIT\n"); UFSD("EXIT\n");
return err; return err;
} }
@ -551,26 +534,25 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
{ {
struct super_block * sb = dir->i_sb; struct super_block * sb = dir->i_sb;
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct page *page = grab_cache_page(mapping, 0); struct folio *folio = filemap_grab_folio(mapping, 0);
const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize;
struct ufs_dir_entry * de; struct ufs_dir_entry * de;
char *base;
int err; int err;
char *kaddr;
if (!page) if (IS_ERR(folio))
return -ENOMEM; return PTR_ERR(folio);
err = ufs_prepare_chunk(page, 0, chunk_size); err = ufs_prepare_chunk(folio, 0, chunk_size);
if (err) { if (err) {
unlock_page(page); folio_unlock(folio);
goto fail; goto fail;
} }
kmap(page); kaddr = kmap_local_folio(folio, 0);
base = (char*)page_address(page); memset(kaddr, 0, folio_size(folio));
memset(base, 0, PAGE_SIZE);
de = (struct ufs_dir_entry *) base; de = (struct ufs_dir_entry *)kaddr;
de->d_ino = cpu_to_fs32(sb, inode->i_ino); de->d_ino = cpu_to_fs32(sb, inode->i_ino);
ufs_set_de_type(sb, de, inode->i_mode); ufs_set_de_type(sb, de, inode->i_mode);
@ -584,12 +566,12 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1)); de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1));
ufs_set_de_namlen(sb, de, 2); ufs_set_de_namlen(sb, de, 2);
strcpy (de->d_name, ".."); strcpy (de->d_name, "..");
kunmap(page); kunmap_local(kaddr);
ufs_commit_chunk(page, 0, chunk_size); ufs_commit_chunk(folio, 0, chunk_size);
err = ufs_handle_dirsync(inode); err = ufs_handle_dirsync(inode);
fail: fail:
put_page(page); folio_put(folio);
return err; return err;
} }
@ -599,18 +581,17 @@ fail:
int ufs_empty_dir(struct inode * inode) int ufs_empty_dir(struct inode * inode)
{ {
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct page *page = NULL; struct folio *folio;
char *kaddr;
unsigned long i, npages = dir_pages(inode); unsigned long i, npages = dir_pages(inode);
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
char *kaddr;
struct ufs_dir_entry *de; struct ufs_dir_entry *de;
page = ufs_get_page(inode, i);
if (IS_ERR(page)) kaddr = ufs_get_folio(inode, i, &folio);
if (IS_ERR(kaddr))
continue; continue;
kaddr = page_address(page);
de = (struct ufs_dir_entry *)kaddr; de = (struct ufs_dir_entry *)kaddr;
kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1); kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1);
@ -637,12 +618,12 @@ int ufs_empty_dir(struct inode * inode)
} }
de = ufs_next_entry(sb, de); de = ufs_next_entry(sb, de);
} }
ufs_put_page(page); folio_release_kmap(folio, kaddr);
} }
return 1; return 1;
not_empty: not_empty:
ufs_put_page(page); folio_release_kmap(folio, kaddr);
return 0; return 0;
} }

View File

@ -479,9 +479,9 @@ static int ufs_read_folio(struct file *file, struct folio *folio)
return block_read_full_folio(folio, ufs_getfrag_block); return block_read_full_folio(folio, ufs_getfrag_block);
} }
int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
{ {
return __block_write_begin(page, pos, len, ufs_getfrag_block); return __block_write_begin(folio, pos, len, ufs_getfrag_block);
} }
static void ufs_truncate_blocks(struct inode *); static void ufs_truncate_blocks(struct inode *);
@ -498,11 +498,11 @@ static void ufs_write_failed(struct address_space *mapping, loff_t to)
static int ufs_write_begin(struct file *file, struct address_space *mapping, static int ufs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
int ret; int ret;
ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block); ret = block_write_begin(mapping, pos, len, foliop, ufs_getfrag_block);
if (unlikely(ret)) if (unlikely(ret))
ufs_write_failed(mapping, pos + len); ufs_write_failed(mapping, pos + len);
@ -511,11 +511,11 @@ static int ufs_write_begin(struct file *file, struct address_space *mapping,
static int ufs_write_end(struct file *file, struct address_space *mapping, static int ufs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
int ret; int ret;
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
if (ret < len) if (ret < len)
ufs_write_failed(mapping, pos + len); ufs_write_failed(mapping, pos + len);
return ret; return ret;

View File

@ -209,14 +209,14 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry)
{ {
struct inode * inode = d_inode(dentry); struct inode * inode = d_inode(dentry);
struct ufs_dir_entry *de; struct ufs_dir_entry *de;
struct page *page; struct folio *folio;
int err = -ENOENT; int err = -ENOENT;
de = ufs_find_entry(dir, &dentry->d_name, &page); de = ufs_find_entry(dir, &dentry->d_name, &folio);
if (!de) if (!de)
goto out; goto out;
err = ufs_delete_entry(dir, de, page); err = ufs_delete_entry(dir, de, folio);
if (err) if (err)
goto out; goto out;
@ -249,28 +249,28 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
{ {
struct inode *old_inode = d_inode(old_dentry); struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry); struct inode *new_inode = d_inode(new_dentry);
struct page *dir_page = NULL; struct folio *dir_folio = NULL;
struct ufs_dir_entry * dir_de = NULL; struct ufs_dir_entry * dir_de = NULL;
struct page *old_page; struct folio *old_folio;
struct ufs_dir_entry *old_de; struct ufs_dir_entry *old_de;
int err = -ENOENT; int err = -ENOENT;
if (flags & ~RENAME_NOREPLACE) if (flags & ~RENAME_NOREPLACE)
return -EINVAL; return -EINVAL;
old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_folio);
if (!old_de) if (!old_de)
goto out; goto out;
if (S_ISDIR(old_inode->i_mode)) { if (S_ISDIR(old_inode->i_mode)) {
err = -EIO; err = -EIO;
dir_de = ufs_dotdot(old_inode, &dir_page); dir_de = ufs_dotdot(old_inode, &dir_folio);
if (!dir_de) if (!dir_de)
goto out_old; goto out_old;
} }
if (new_inode) { if (new_inode) {
struct page *new_page; struct folio *new_folio;
struct ufs_dir_entry *new_de; struct ufs_dir_entry *new_de;
err = -ENOTEMPTY; err = -ENOTEMPTY;
@ -278,10 +278,10 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
goto out_dir; goto out_dir;
err = -ENOENT; err = -ENOENT;
new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_folio);
if (!new_de) if (!new_de)
goto out_dir; goto out_dir;
ufs_set_link(new_dir, new_de, new_page, old_inode, 1); ufs_set_link(new_dir, new_de, new_folio, old_inode, 1);
inode_set_ctime_current(new_inode); inode_set_ctime_current(new_inode);
if (dir_de) if (dir_de)
drop_nlink(new_inode); drop_nlink(new_inode);
@ -300,29 +300,24 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
*/ */
inode_set_ctime_current(old_inode); inode_set_ctime_current(old_inode);
ufs_delete_entry(old_dir, old_de, old_page); ufs_delete_entry(old_dir, old_de, old_folio);
mark_inode_dirty(old_inode); mark_inode_dirty(old_inode);
if (dir_de) { if (dir_de) {
if (old_dir != new_dir) if (old_dir != new_dir)
ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0); ufs_set_link(old_inode, dir_de, dir_folio, new_dir, 0);
else { else
kunmap(dir_page); folio_release_kmap(dir_folio, new_dir);
put_page(dir_page);
}
inode_dec_link_count(old_dir); inode_dec_link_count(old_dir);
} }
return 0; return 0;
out_dir: out_dir:
if (dir_de) { if (dir_de)
kunmap(dir_page); folio_release_kmap(dir_folio, dir_de);
put_page(dir_page);
}
out_old: out_old:
kunmap(old_page); folio_release_kmap(old_folio, old_de);
put_page(old_page);
out: out:
return err; return err;
} }

View File

@ -99,15 +99,17 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
/* dir.c */ /* dir.c */
extern const struct inode_operations ufs_dir_inode_operations; extern const struct inode_operations ufs_dir_inode_operations;
extern int ufs_add_link (struct dentry *, struct inode *);
extern ino_t ufs_inode_by_name(struct inode *, const struct qstr *); int ufs_add_link(struct dentry *, struct inode *);
extern int ufs_make_empty(struct inode *, struct inode *); ino_t ufs_inode_by_name(struct inode *, const struct qstr *);
extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **); int ufs_make_empty(struct inode *, struct inode *);
extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *); struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *,
extern int ufs_empty_dir (struct inode *); struct folio **);
extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **); int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct folio *);
extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, int ufs_empty_dir(struct inode *);
struct page *page, struct inode *inode, bool update_times); struct ufs_dir_entry *ufs_dotdot(struct inode *, struct folio **);
void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
struct folio *folio, struct inode *inode, bool update_times);
/* file.c */ /* file.c */
extern const struct inode_operations ufs_file_inode_operations; extern const struct inode_operations ufs_file_inode_operations;

View File

@ -250,9 +250,9 @@ ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
} }
} }
extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *); dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t); void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
extern int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len); int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
/* /*
* These functions manipulate ufs buffers * These functions manipulate ufs buffers

View File

@ -300,23 +300,23 @@ static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
static int vboxsf_write_end(struct file *file, struct address_space *mapping, static int vboxsf_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied, loff_t pos, unsigned int len, unsigned int copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct vboxsf_handle *sf_handle = file->private_data; struct vboxsf_handle *sf_handle = file->private_data;
unsigned int from = pos & ~PAGE_MASK; size_t from = offset_in_folio(folio, pos);
u32 nwritten = len; u32 nwritten = len;
u8 *buf; u8 *buf;
int err; int err;
/* zero the stale part of the page if we did a short copy */ /* zero the stale part of the folio if we did a short copy */
if (!PageUptodate(page) && copied < len) if (!folio_test_uptodate(folio) && copied < len)
zero_user(page, from + copied, len - copied); folio_zero_range(folio, from + copied, len - copied);
buf = kmap(page); buf = kmap(&folio->page);
err = vboxsf_write(sf_handle->root, sf_handle->handle, err = vboxsf_write(sf_handle->root, sf_handle->handle,
pos, &nwritten, buf + from); pos, &nwritten, buf + from);
kunmap(page); kunmap(&folio->page);
if (err) { if (err) {
nwritten = 0; nwritten = 0;
@ -326,16 +326,16 @@ static int vboxsf_write_end(struct file *file, struct address_space *mapping,
/* mtime changed */ /* mtime changed */
VBOXSF_I(inode)->force_restat = 1; VBOXSF_I(inode)->force_restat = 1;
if (!PageUptodate(page) && nwritten == PAGE_SIZE) if (!folio_test_uptodate(folio) && nwritten == folio_size(folio))
SetPageUptodate(page); folio_mark_uptodate(folio);
pos += nwritten; pos += nwritten;
if (pos > inode->i_size) if (pos > inode->i_size)
i_size_write(inode, pos); i_size_write(inode, pos);
out: out:
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return nwritten; return nwritten;
} }
@ -343,7 +343,7 @@ out:
/* /*
* Note simple_write_begin does not read the page from disk on partial writes * Note simple_write_begin does not read the page from disk on partial writes
* this is ok since vboxsf_write_end only writes the written parts of the * this is ok since vboxsf_write_end only writes the written parts of the
* page and it does not call SetPageUptodate for partial writes. * page and it does not call folio_mark_uptodate for partial writes.
*/ */
const struct address_space_operations vboxsf_reg_aops = { const struct address_space_operations vboxsf_reg_aops = {
.read_folio = vboxsf_read_folio, .read_folio = vboxsf_read_folio,

View File

@ -257,18 +257,18 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
int block_read_full_folio(struct folio *, get_block_t *); int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
struct page **pagep, get_block_t *get_block); struct folio **foliop, get_block_t *get_block);
int __block_write_begin(struct page *page, loff_t pos, unsigned len, int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block); get_block_t *get_block);
int block_write_end(struct file *, struct address_space *, int block_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned, loff_t, unsigned len, unsigned copied,
struct page *, void *); struct folio *, void *);
int generic_write_end(struct file *, struct address_space *, int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned, loff_t, unsigned len, unsigned copied,
struct page *, void *); struct folio *, void *);
void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to); void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
int cont_write_begin(struct file *, struct address_space *, loff_t, int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct page **, void **, unsigned, struct folio **, void **,
get_block_t *, loff_t *); get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size); int generic_cont_expand_simple(struct inode *inode, loff_t size);
void block_commit_write(struct page *page, unsigned int from, unsigned int to); void block_commit_write(struct page *page, unsigned int from, unsigned int to);

View File

@ -408,10 +408,10 @@ struct address_space_operations {
int (*write_begin)(struct file *, struct address_space *mapping, int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata); struct folio **foliop, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping, int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata); struct folio *folio, void *fsdata);
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */ /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t); sector_t (*bmap)(struct address_space *, sector_t);
@ -3363,7 +3363,7 @@ extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *); extern int simple_empty(struct dentry *);
extern int simple_write_begin(struct file *file, struct address_space *mapping, extern int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata); struct folio **foliop, void **fsdata);
extern const struct address_space_operations ram_aops; extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *); extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *); extern struct inode *alloc_anon_inode(struct super_block *);

View File

@ -3987,7 +3987,6 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
ssize_t written = 0; ssize_t written = 0;
do { do {
struct page *page;
struct folio *folio; struct folio *folio;
size_t offset; /* Offset into folio */ size_t offset; /* Offset into folio */
size_t bytes; /* Bytes to write to folio */ size_t bytes; /* Bytes to write to folio */
@ -4017,11 +4016,10 @@ retry:
} }
status = a_ops->write_begin(file, mapping, pos, bytes, status = a_ops->write_begin(file, mapping, pos, bytes,
&page, &fsdata); &folio, &fsdata);
if (unlikely(status < 0)) if (unlikely(status < 0))
break; break;
folio = page_folio(page);
offset = offset_in_folio(folio, pos); offset = offset_in_folio(folio, pos);
if (bytes > folio_size(folio) - offset) if (bytes > folio_size(folio) - offset)
bytes = folio_size(folio) - offset; bytes = folio_size(folio) - offset;
@ -4033,7 +4031,7 @@ retry:
flush_dcache_folio(folio); flush_dcache_folio(folio);
status = a_ops->write_end(file, mapping, pos, bytes, copied, status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata); folio, fsdata);
if (unlikely(status != copied)) { if (unlikely(status != copied)) {
iov_iter_revert(i, copied - max(status, 0L)); iov_iter_revert(i, copied - max(status, 0L));
if (unlikely(status < 0)) if (unlikely(status < 0))

View File

@ -2878,7 +2878,7 @@ static const struct inode_operations shmem_short_symlink_operations;
static int static int
shmem_write_begin(struct file *file, struct address_space *mapping, shmem_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len,
struct page **pagep, void **fsdata) struct folio **foliop, void **fsdata)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
@ -2899,23 +2899,22 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
if (ret) if (ret)
return ret; return ret;
*pagep = folio_file_page(folio, index); if (folio_test_hwpoison(folio) ||
if (PageHWPoison(*pagep)) { (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
*pagep = NULL;
return -EIO; return -EIO;
} }
*foliop = folio;
return 0; return 0;
} }
static int static int
shmem_write_end(struct file *file, struct address_space *mapping, shmem_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct folio *folio, void *fsdata)
{ {
struct folio *folio = page_folio(page);
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
if (pos + copied > inode->i_size) if (pos + copied > inode->i_size)