mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
btrfs: Convert btrfs to read_folio
This is a "weak" conversion which converts straight back to using pages. A full conversion should be performed at some point, hopefully by someone familiar with the filesystem. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
parent
a13fe6928a
commit
fb12489b0d
@ -3269,7 +3269,7 @@ void btrfs_split_delalloc_extent(struct inode *inode,
|
||||
struct extent_state *orig, u64 split);
|
||||
void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
|
||||
vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
|
||||
int btrfs_readpage(struct file *file, struct page *page);
|
||||
int btrfs_read_folio(struct file *file, struct folio *folio);
|
||||
void btrfs_evict_inode(struct inode *inode);
|
||||
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
|
||||
struct inode *btrfs_alloc_inode(struct super_block *sb);
|
||||
|
@ -1307,11 +1307,12 @@ static int prepare_uptodate_page(struct inode *inode,
|
||||
struct page *page, u64 pos,
|
||||
bool force_uptodate)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
int ret = 0;
|
||||
|
||||
if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
|
||||
!PageUptodate(page)) {
|
||||
ret = btrfs_readpage(NULL, page);
|
||||
ret = btrfs_read_folio(NULL, folio);
|
||||
if (ret)
|
||||
return ret;
|
||||
lock_page(page);
|
||||
@ -1321,7 +1322,7 @@ static int prepare_uptodate_page(struct inode *inode,
|
||||
}
|
||||
|
||||
/*
|
||||
* Since btrfs_readpage() will unlock the page before it
|
||||
* Since btrfs_read_folio() will unlock the folio before it
|
||||
* returns, there is a window where btrfs_releasepage() can be
|
||||
* called to release the page. Here we check both inode
|
||||
* mapping and PagePrivate() to make sure the page was not
|
||||
|
@ -465,7 +465,7 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
|
||||
|
||||
io_ctl->pages[i] = page;
|
||||
if (uptodate && !PageUptodate(page)) {
|
||||
btrfs_readpage(NULL, page);
|
||||
btrfs_read_folio(NULL, page_folio(page));
|
||||
lock_page(page);
|
||||
if (page->mapping != inode->i_mapping) {
|
||||
btrfs_err(BTRFS_I(inode)->root->fs_info,
|
||||
|
@ -4714,7 +4714,7 @@ again:
|
||||
goto out_unlock;
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
ret = btrfs_readpage(NULL, page);
|
||||
ret = btrfs_read_folio(NULL, page_folio(page));
|
||||
lock_page(page);
|
||||
if (page->mapping != mapping) {
|
||||
unlock_page(page);
|
||||
@ -8113,8 +8113,9 @@ static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
|
||||
}
|
||||
|
||||
int btrfs_readpage(struct file *file, struct page *page)
|
||||
int btrfs_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
|
||||
u64 start = page_offset(page);
|
||||
u64 end = start + PAGE_SIZE - 1;
|
||||
@ -11357,7 +11358,7 @@ static const struct file_operations btrfs_dir_file_operations = {
|
||||
* For now we're avoiding this by dropping bmap.
|
||||
*/
|
||||
static const struct address_space_operations btrfs_aops = {
|
||||
.readpage = btrfs_readpage,
|
||||
.read_folio = btrfs_read_folio,
|
||||
.writepage = btrfs_writepage,
|
||||
.writepages = btrfs_writepages,
|
||||
.readahead = btrfs_readahead,
|
||||
|
@ -1359,7 +1359,7 @@ again:
|
||||
* make it uptodate.
|
||||
*/
|
||||
if (!PageUptodate(page)) {
|
||||
btrfs_readpage(NULL, page);
|
||||
btrfs_read_folio(NULL, page_folio(page));
|
||||
lock_page(page);
|
||||
if (page->mapping != mapping || !PagePrivate(page)) {
|
||||
unlock_page(page);
|
||||
|
@ -1101,7 +1101,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
|
||||
continue;
|
||||
|
||||
/*
|
||||
* if we are modifying block in fs tree, wait for readpage
|
||||
* if we are modifying block in fs tree, wait for read_folio
|
||||
* to complete and drop the extent cache
|
||||
*/
|
||||
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
|
||||
@ -1563,7 +1563,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
|
||||
end = (u64)-1;
|
||||
}
|
||||
|
||||
/* the lock_extent waits for readpage to complete */
|
||||
/* the lock_extent waits for read_folio to complete */
|
||||
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
|
||||
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
|
||||
@ -2818,7 +2818,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
|
||||
* Subpage can't handle page with DIRTY but without UPTODATE
|
||||
* bit as it can lead to the following deadlock:
|
||||
*
|
||||
* btrfs_readpage()
|
||||
* btrfs_read_folio()
|
||||
* | Page already *locked*
|
||||
* |- btrfs_lock_and_flush_ordered_range()
|
||||
* |- btrfs_start_ordered_extent()
|
||||
@ -2972,7 +2972,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
|
||||
last_index + 1 - page_index);
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
btrfs_readpage(NULL, page);
|
||||
btrfs_read_folio(NULL, page_folio(page));
|
||||
lock_page(page);
|
||||
if (!PageUptodate(page)) {
|
||||
ret = -EIO;
|
||||
|
@ -4991,7 +4991,7 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
|
||||
}
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
btrfs_readpage(NULL, page);
|
||||
btrfs_read_folio(NULL, page_folio(page));
|
||||
lock_page(page);
|
||||
if (!PageUptodate(page)) {
|
||||
unlock_page(page);
|
||||
|
Loading…
Reference in New Issue
Block a user