vmscan: convert dirty page handling to folios

Mostly this just eliminates calls to compound_head(), but
NR_VMSCAN_IMMEDIATE was being incremented by 1 instead of by nr_pages.

Link: https://lkml.kernel.org/r/20220504182857.4013401-10-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-05-12 20:23:02 -07:00 committed by Andrew Morton
parent 09c02e5632
commit 49bd2bf967

View File

@ -1787,28 +1787,31 @@ retry:
}
}
if (PageDirty(page)) {
if (folio_test_dirty(folio)) {
/*
* Only kswapd can writeback filesystem pages
* Only kswapd can writeback filesystem folios
* to avoid risk of stack overflow. But avoid
* injecting inefficient single-page IO into
* injecting inefficient single-folio I/O into
* flusher writeback as much as possible: only
* write pages when we've encountered many
* dirty pages, and when we've already scanned
* the rest of the LRU for clean pages and see
* the same dirty pages again (PageReclaim).
* write folios when we've encountered many
* dirty folios, and when we've already scanned
* the rest of the LRU for clean folios and see
* the same dirty folios again (with the reclaim
* flag set).
*/
if (page_is_file_lru(page) &&
(!current_is_kswapd() || !PageReclaim(page) ||
if (folio_is_file_lru(folio) &&
(!current_is_kswapd() ||
!folio_test_reclaim(folio) ||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
* except we already have the page isolated
* Similar in principle to deactivate_page()
* except we already have the folio isolated
* and know it's dirty
*/
inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
SetPageReclaim(page);
node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
nr_pages);
folio_set_reclaim(folio);
goto activate_locked;
}
@ -1821,8 +1824,8 @@ retry:
goto keep_locked;
/*
* Page is dirty. Flush the TLB if a writable entry
* potentially exists to avoid CPU writes after IO
* Folio is dirty. Flush the TLB if a writable entry
* potentially exists to avoid CPU writes after I/O
* starts and then write it out here.
*/
try_to_unmap_flush_dirty();
@ -1834,23 +1837,24 @@ retry:
case PAGE_SUCCESS:
stat->nr_pageout += nr_pages;
if (PageWriteback(page))
if (folio_test_writeback(folio))
goto keep;
if (PageDirty(page))
if (folio_test_dirty(folio))
goto keep;
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page.
* ahead and try to reclaim the folio.
*/
if (!trylock_page(page))
if (!folio_trylock(folio))
goto keep;
if (PageDirty(page) || PageWriteback(page))
if (folio_test_dirty(folio) ||
folio_test_writeback(folio))
goto keep_locked;
mapping = page_mapping(page);
mapping = folio_mapping(folio);
fallthrough;
case PAGE_CLEAN:
; /* try to free the page below */
; /* try to free the folio below */
}
}