mm: zswap: modify zswap_stored_pages to be atomic_long_t

For zswap_store() to support large folios, we need to be able to do a
batch update of zswap_stored_pages upon successful store of all pages in
the folio.  For this, we need to add folio_nr_pages(), which returns a
long, to zswap_stored_pages.

Link: https://lkml.kernel.org/r/20241001053222.6944-6-kanchana.p.sridhar@intel.com
Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
Acked-by: Yosry Ahmed <yosryahmed@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Wajdi Feghali <wajdi.k.feghali@intel.com>
Cc: "Zou, Nanhai" <nanhai.zou@intel.com>
Cc: Barry Song <21cnbao@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kanchana P Sridhar 2024-09-30 22:32:20 -07:00 committed by Andrew Morton
parent 0201c054c2
commit 6e1fa555ec
3 changed files with 15 additions and 8 deletions

View File

@ -91,7 +91,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#ifdef CONFIG_ZSWAP
show_val_kb(m, "Zswap: ", zswap_total_pages());
seq_printf(m, "Zswapped: %8lu kB\n",
(unsigned long)atomic_read(&zswap_stored_pages) <<
(unsigned long)atomic_long_read(&zswap_stored_pages) <<
(PAGE_SHIFT - 10));
#endif
show_val_kb(m, "Dirty: ",

View File

@ -7,7 +7,7 @@
struct lruvec;
extern atomic_t zswap_stored_pages;
extern atomic_long_t zswap_stored_pages;
#ifdef CONFIG_ZSWAP

View File

@ -43,7 +43,7 @@
* statistics
**********************************/
/* The number of compressed pages currently stored in zswap */
atomic_t zswap_stored_pages = ATOMIC_INIT(0);
atomic_long_t zswap_stored_pages = ATOMIC_INIT(0);
/*
* The statistics below are not protected from concurrent access for
@ -802,7 +802,7 @@ static void zswap_entry_free(struct zswap_entry *entry)
obj_cgroup_put(entry->objcg);
}
zswap_entry_cache_free(entry);
atomic_dec(&zswap_stored_pages);
atomic_long_dec(&zswap_stored_pages);
}
/*********************************
@ -1233,7 +1233,7 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
} else {
nr_backing = zswap_total_pages();
nr_stored = atomic_read(&zswap_stored_pages);
nr_stored = atomic_long_read(&zswap_stored_pages);
}
if (!nr_stored)
@ -1502,7 +1502,7 @@ bool zswap_store(struct folio *folio)
}
/* update stats */
atomic_inc(&zswap_stored_pages);
atomic_long_inc(&zswap_stored_pages);
count_vm_event(ZSWPOUT);
return true;
@ -1654,6 +1654,13 @@ static int debugfs_get_total_size(void *data, u64 *val)
}
DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
static int debugfs_get_stored_pages(void *data, u64 *val)
{
*val = atomic_long_read(&zswap_stored_pages);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n");
static int zswap_debugfs_init(void)
{
if (!debugfs_initialized())
@ -1677,8 +1684,8 @@ static int zswap_debugfs_init(void)
zswap_debugfs_root, &zswap_written_back_pages);
debugfs_create_file("pool_total_size", 0444,
zswap_debugfs_root, NULL, &total_size_fops);
debugfs_create_atomic_t("stored_pages", 0444,
zswap_debugfs_root, &zswap_stored_pages);
debugfs_create_file("stored_pages", 0444,
zswap_debugfs_root, NULL, &stored_pages_fops);
return 0;
}