mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
buffer: Convert __block_write_begin() to take a folio
Almost all callers have a folio now, so change __block_write_begin() to take a folio and remove a call to compound_head(). Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
7f90d7f1bc
commit
9f04609f74
@ -2168,11 +2168,10 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
|
||||
return err;
|
||||
}
|
||||
|
||||
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||
int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
|
||||
get_block_t *get_block)
|
||||
{
|
||||
return __block_write_begin_int(page_folio(page), pos, len, get_block,
|
||||
NULL);
|
||||
return __block_write_begin_int(folio, pos, len, get_block, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(__block_write_begin);
|
||||
|
||||
|
@ -434,7 +434,7 @@ int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino)
|
||||
|
||||
static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
return __block_write_begin(&folio->page, pos, len, ext2_get_block);
|
||||
return __block_write_begin(folio, pos, len, ext2_get_block);
|
||||
}
|
||||
|
||||
static int ext2_handle_dirsync(struct inode *dir)
|
||||
|
@ -601,10 +601,10 @@ retry:
|
||||
goto out;
|
||||
|
||||
if (ext4_should_dioread_nolock(inode)) {
|
||||
ret = __block_write_begin(&folio->page, from, to,
|
||||
ret = __block_write_begin(folio, from, to,
|
||||
ext4_get_block_unwritten);
|
||||
} else
|
||||
ret = __block_write_begin(&folio->page, from, to, ext4_get_block);
|
||||
ret = __block_write_begin(folio, from, to, ext4_get_block);
|
||||
|
||||
if (!ret && ext4_should_journal_data(inode)) {
|
||||
ret = ext4_walk_page_buffers(handle, inode,
|
||||
@ -856,7 +856,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = __block_write_begin(&folio->page, 0, inline_size,
|
||||
ret = __block_write_begin(folio, 0, inline_size,
|
||||
ext4_da_get_block_prep);
|
||||
if (ret) {
|
||||
up_read(&EXT4_I(inode)->xattr_sem);
|
||||
|
@ -1224,10 +1224,10 @@ retry_journal:
|
||||
ret = ext4_block_write_begin(folio, pos, len, ext4_get_block);
|
||||
#else
|
||||
if (ext4_should_dioread_nolock(inode))
|
||||
ret = __block_write_begin(&folio->page, pos, len,
|
||||
ret = __block_write_begin(folio, pos, len,
|
||||
ext4_get_block_unwritten);
|
||||
else
|
||||
ret = __block_write_begin(&folio->page, pos, len, ext4_get_block);
|
||||
ret = __block_write_begin(folio, pos, len, ext4_get_block);
|
||||
#endif
|
||||
if (!ret && ext4_should_journal_data(inode)) {
|
||||
ret = ext4_walk_page_buffers(handle, inode,
|
||||
@ -2962,7 +2962,7 @@ retry:
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
|
||||
#else
|
||||
ret = __block_write_begin(&folio->page, pos, len, ext4_da_get_block_prep);
|
||||
ret = __block_write_begin(folio, pos, len, ext4_da_get_block_prep);
|
||||
#endif
|
||||
if (ret < 0) {
|
||||
folio_unlock(folio);
|
||||
@ -6216,7 +6216,7 @@ retry_alloc:
|
||||
if (folio_pos(folio) + len > size)
|
||||
len = size - folio_pos(folio);
|
||||
|
||||
err = __block_write_begin(&folio->page, 0, len, ext4_get_block);
|
||||
err = __block_write_begin(folio, 0, len, ext4_get_block);
|
||||
if (!err) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
if (ext4_journal_folio_buffers(handle, folio, len))
|
||||
|
@ -429,7 +429,7 @@ static int minix_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
return __block_write_begin(&folio->page, pos, len, minix_get_block);
|
||||
return __block_write_begin(folio, pos, len, minix_get_block);
|
||||
}
|
||||
|
||||
static void minix_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
@ -83,7 +83,7 @@ static int nilfs_prepare_chunk(struct folio *folio, unsigned int from,
|
||||
{
|
||||
loff_t pos = folio_pos(folio) + from;
|
||||
|
||||
return __block_write_begin(&folio->page, pos, to - from, nilfs_get_block);
|
||||
return __block_write_begin(folio, pos, to - from, nilfs_get_block);
|
||||
}
|
||||
|
||||
static void nilfs_commit_chunk(struct folio *folio,
|
||||
|
@ -804,7 +804,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
|
||||
* __block_write_begin and block_commit_write to zero the
|
||||
* whole block.
|
||||
*/
|
||||
ret = __block_write_begin(&folio->page, block_start + 1, 0,
|
||||
ret = __block_write_begin(folio, block_start + 1, 0,
|
||||
ocfs2_get_block);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
|
@ -2198,7 +2198,7 @@ static int grab_tail_page(struct inode *inode,
|
||||
/* start within the page of the last block in the file */
|
||||
start = (offset / blocksize) * blocksize;
|
||||
|
||||
error = __block_write_begin(&folio->page, start, offset - start,
|
||||
error = __block_write_begin(folio, start, offset - start,
|
||||
reiserfs_get_block_create_0);
|
||||
if (error)
|
||||
goto unlock;
|
||||
@ -2762,7 +2762,7 @@ static int reiserfs_write_begin(struct file *file,
|
||||
old_ref = th->t_refcount;
|
||||
th->t_refcount++;
|
||||
}
|
||||
ret = __block_write_begin(&folio->page, pos, len, reiserfs_get_block);
|
||||
ret = __block_write_begin(folio, pos, len, reiserfs_get_block);
|
||||
if (ret && reiserfs_transaction_running(inode->i_sb)) {
|
||||
struct reiserfs_transaction_handle *th = current->journal_info;
|
||||
/*
|
||||
@ -2822,7 +2822,7 @@ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
|
||||
th->t_refcount++;
|
||||
}
|
||||
|
||||
ret = __block_write_begin(page, from, len, reiserfs_get_block);
|
||||
ret = __block_write_begin(page_folio(page), from, len, reiserfs_get_block);
|
||||
if (ret && reiserfs_transaction_running(inode->i_sb)) {
|
||||
struct reiserfs_transaction_handle *th = current->journal_info;
|
||||
/*
|
||||
|
@ -468,7 +468,7 @@ static int sysv_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
return __block_write_begin(&folio->page, pos, len, get_block);
|
||||
return __block_write_begin(folio, pos, len, get_block);
|
||||
}
|
||||
|
||||
static void sysv_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
@ -62,7 +62,7 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
|
||||
end = size & ~PAGE_MASK;
|
||||
else
|
||||
end = PAGE_SIZE;
|
||||
err = __block_write_begin(&folio->page, 0, end, udf_get_block);
|
||||
err = __block_write_begin(folio, 0, end, udf_get_block);
|
||||
if (err) {
|
||||
folio_unlock(folio);
|
||||
ret = vmf_fs_error(err);
|
||||
|
@ -481,7 +481,7 @@ static int ufs_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
|
||||
{
|
||||
return __block_write_begin(&folio->page, pos, len, ufs_getfrag_block);
|
||||
return __block_write_begin(folio, pos, len, ufs_getfrag_block);
|
||||
}
|
||||
|
||||
static void ufs_truncate_blocks(struct inode *);
|
||||
|
@ -259,7 +259,7 @@ int block_read_full_folio(struct folio *, get_block_t *);
|
||||
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
||||
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
|
||||
struct folio **foliop, get_block_t *get_block);
|
||||
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||
int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
|
||||
get_block_t *get_block);
|
||||
int block_write_end(struct file *, struct address_space *,
|
||||
loff_t, unsigned len, unsigned copied,
|
||||
|
Loading…
Reference in New Issue
Block a user