mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
ufs: add ufs_get_locked_folio and ufs_put_locked_folio
Convert the _page variants to call them. Saves a few hidden calls to compound_head(). Link: https://lkml.kernel.org/r/20231016201114.1928083-24-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Andreas Gruenbacher <agruenba@redhat.com> Cc: Pankaj Raghav <p.raghav@samsung.com> Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
44f6857526
commit
5fb7bd50b3
@ -229,43 +229,50 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev
|
||||
ufsi->i_u1.i_data[0] = cpu_to_fs32(sb, fs32);
|
||||
}
|
||||
|
||||
struct page *ufs_get_locked_page(struct address_space *mapping, pgoff_t index)
|
||||
{
|
||||
struct folio *folio = ufs_get_locked_folio(mapping, index);
|
||||
|
||||
if (folio)
|
||||
return folio_file_page(folio, index);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufs_get_locked_page() - locate, pin and lock a pagecache page, if not exist
|
||||
* ufs_get_locked_folio() - locate, pin and lock a pagecache folio, if not exist
|
||||
* read it from disk.
|
||||
* @mapping: the address_space to search
|
||||
* @index: the page index
|
||||
*
|
||||
* Locates the desired pagecache page, if not exist we'll read it,
|
||||
* Locates the desired pagecache folio, if not exist we'll read it,
|
||||
* locks it, increments its reference
|
||||
* count and returns its address.
|
||||
*
|
||||
*/
|
||||
|
||||
struct page *ufs_get_locked_page(struct address_space *mapping,
|
||||
struct folio *ufs_get_locked_folio(struct address_space *mapping,
|
||||
pgoff_t index)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct page *page = find_lock_page(mapping, index);
|
||||
if (!page) {
|
||||
page = read_mapping_page(mapping, index, NULL);
|
||||
struct folio *folio = filemap_lock_folio(mapping, index);
|
||||
if (!folio) {
|
||||
folio = read_mapping_folio(mapping, index, NULL);
|
||||
|
||||
if (IS_ERR(page)) {
|
||||
printk(KERN_ERR "ufs_change_blocknr: "
|
||||
"read_mapping_page error: ino %lu, index: %lu\n",
|
||||
if (IS_ERR(folio)) {
|
||||
printk(KERN_ERR "ufs_change_blocknr: read_mapping_folio error: ino %lu, index: %lu\n",
|
||||
mapping->host->i_ino, index);
|
||||
return page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
lock_page(page);
|
||||
folio_lock(folio);
|
||||
|
||||
if (unlikely(page->mapping == NULL)) {
|
||||
if (unlikely(folio->mapping == NULL)) {
|
||||
/* Truncate got there first */
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (!page_has_buffers(page))
|
||||
create_empty_buffers(page, 1 << inode->i_blkbits, 0);
|
||||
return page;
|
||||
if (!folio_buffers(folio))
|
||||
folio_create_empty_buffers(folio, 1 << inode->i_blkbits, 0);
|
||||
return folio;
|
||||
}
|
||||
|
@ -273,12 +273,17 @@ extern void _ubh_ubhcpymem_(struct ufs_sb_private_info *, unsigned char *, struc
|
||||
extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned);
|
||||
|
||||
/* This functions works with cache pages*/
|
||||
extern struct page *ufs_get_locked_page(struct address_space *mapping,
|
||||
pgoff_t index);
|
||||
struct page *ufs_get_locked_page(struct address_space *mapping, pgoff_t index);
|
||||
struct folio *ufs_get_locked_folio(struct address_space *mapping, pgoff_t index);
|
||||
static inline void ufs_put_locked_folio(struct folio *folio)
|
||||
{
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
static inline void ufs_put_locked_page(struct page *page)
|
||||
{
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
ufs_put_locked_folio(page_folio(page));
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user