mm: convert page_swap_flags to folio_swap_flags

The only caller already has a folio, so push the folio->page conversion
down a level.

Link: https://lkml.kernel.org/r/20220617175020.717127-21-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-06-17 18:50:18 +01:00 committed by akpm
parent 5375336c8c
commit b98c359f1d
2 changed files with 4 additions and 4 deletions

View File

@ -61,9 +61,9 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf); struct vm_fault *vmf);
static inline unsigned int page_swap_flags(struct page *page) static inline unsigned int folio_swap_flags(struct folio *folio)
{ {
return page_swap_info(page)->flags; return page_swap_info(&folio->page)->flags;
} }
#else /* CONFIG_SWAP */ #else /* CONFIG_SWAP */
struct swap_iocb; struct swap_iocb;
@ -149,7 +149,7 @@ static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
{ {
} }
static inline unsigned int page_swap_flags(struct page *page) static inline unsigned int folio_swap_flags(struct folio *folio)
{ {
return 0; return 0;
} }

View File

@ -1572,7 +1572,7 @@ static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
* but that will never affect SWP_FS_OPS, so the data_race * but that will never affect SWP_FS_OPS, so the data_race
* is safe. * is safe.
*/ */
return !data_race(page_swap_flags(&folio->page) & SWP_FS_OPS); return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
} }
/* /*