fs/writeback: Convert inode_switch_wbs_work_fn to folios

This gets the statistics correct for large folios by modifying the
counters by the number of pages in the folio instead of by 1.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-03-19 08:58:36 -04:00
parent 9144785b02
commit 22b3c8d661

View File

@ -372,7 +372,7 @@ static bool inode_do_switch_wbs(struct inode *inode,
{
struct address_space *mapping = inode->i_mapping;
XA_STATE(xas, &mapping->i_pages, 0);
struct page *page;
struct folio *folio;
bool switched = false;
spin_lock(&inode->i_lock);
@ -389,21 +389,23 @@ static bool inode_do_switch_wbs(struct inode *inode,
/*
* Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
* to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
* pages actually under writeback.
* to possibly dirty folios while PAGECACHE_TAG_WRITEBACK points to
* folios actually under writeback.
*/
xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
if (PageDirty(page)) {
dec_wb_stat(old_wb, WB_RECLAIMABLE);
inc_wb_stat(new_wb, WB_RECLAIMABLE);
xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
if (folio_test_dirty(folio)) {
long nr = folio_nr_pages(folio);
wb_stat_mod(old_wb, WB_RECLAIMABLE, -nr);
wb_stat_mod(new_wb, WB_RECLAIMABLE, nr);
}
}
xas_set(&xas, 0);
xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
WARN_ON_ONCE(!PageWriteback(page));
dec_wb_stat(old_wb, WB_WRITEBACK);
inc_wb_stat(new_wb, WB_WRITEBACK);
xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
long nr = folio_nr_pages(folio);
WARN_ON_ONCE(!folio_test_writeback(folio));
wb_stat_mod(old_wb, WB_WRITEBACK, -nr);
wb_stat_mod(new_wb, WB_WRITEBACK, nr);
}
if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {