mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
afs: Convert invalidatepage to invalidate_folio
We know the page is in the page cache, not the swap cache. If we ever support folios larger than 2GB, afs_invalidate_dirty() will need to be fixed, but that's a larger project. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs Tested-by: David Howells <dhowells@redhat.com> # afs
This commit is contained in:
parent
f6bc6fb88c
commit
fcf227daed
@ -21,8 +21,8 @@
|
||||
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
static int afs_readpage(struct file *file, struct page *page);
|
||||
static int afs_symlink_readpage(struct file *file, struct page *page);
|
||||
static void afs_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int length);
|
||||
static void afs_invalidate_folio(struct folio *folio, size_t offset,
|
||||
size_t length);
|
||||
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
|
||||
|
||||
static void afs_readahead(struct readahead_control *ractl);
|
||||
@ -57,7 +57,7 @@ const struct address_space_operations afs_file_aops = {
|
||||
.set_page_dirty = afs_set_page_dirty,
|
||||
.launder_page = afs_launder_page,
|
||||
.releasepage = afs_releasepage,
|
||||
.invalidatepage = afs_invalidatepage,
|
||||
.invalidate_folio = afs_invalidate_folio,
|
||||
.write_begin = afs_write_begin,
|
||||
.write_end = afs_write_end,
|
||||
.writepage = afs_writepage,
|
||||
@ -67,7 +67,7 @@ const struct address_space_operations afs_file_aops = {
|
||||
const struct address_space_operations afs_symlink_aops = {
|
||||
.readpage = afs_symlink_readpage,
|
||||
.releasepage = afs_releasepage,
|
||||
.invalidatepage = afs_invalidatepage,
|
||||
.invalidate_folio = afs_invalidate_folio,
|
||||
};
|
||||
|
||||
static const struct vm_operations_struct afs_vm_ops = {
|
||||
@ -427,8 +427,8 @@ int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
* Adjust the dirty region of the page on truncation or full invalidation,
|
||||
* getting rid of the markers altogether if the region is entirely invalidated.
|
||||
*/
|
||||
static void afs_invalidate_dirty(struct folio *folio, unsigned int offset,
|
||||
unsigned int length)
|
||||
static void afs_invalidate_dirty(struct folio *folio, size_t offset,
|
||||
size_t length)
|
||||
{
|
||||
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
|
||||
unsigned long priv;
|
||||
@ -485,16 +485,14 @@ full_invalidate:
|
||||
* - release a page and clean up its private data if offset is 0 (indicating
|
||||
* the entire page)
|
||||
*/
|
||||
static void afs_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int length)
|
||||
static void afs_invalidate_folio(struct folio *folio, size_t offset,
|
||||
size_t length)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
_enter("{%lu},%zu,%zu", folio->index, offset, length);
|
||||
|
||||
_enter("{%lu},%u,%u", folio_index(folio), offset, length);
|
||||
BUG_ON(!folio_test_locked(folio));
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
|
||||
if (PagePrivate(page))
|
||||
if (folio_get_private(folio))
|
||||
afs_invalidate_dirty(folio, offset, length);
|
||||
|
||||
folio_wait_fscache(folio);
|
||||
|
Loading…
Reference in New Issue
Block a user