mm/memcg: Convert mem_cgroup_swapout() to take a folio

This removes an assumption that THPs are the only kind of compound
pages and removes a couple of hidden calls to compound_head.  It
also documents that you can't pass a tail page to mem_cgroup_swapout().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-12-27 21:11:34 -05:00
parent 8927f6473e
commit 3ecb0087ec
3 changed files with 14 additions and 14 deletions

View File

@ -741,7 +741,7 @@ static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
#endif
#ifdef CONFIG_MEMCG_SWAP
extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
@ -761,7 +761,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
extern bool mem_cgroup_swap_full(struct page *page);
#else
static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
}

View File

@ -7116,19 +7116,19 @@ static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
/**
* mem_cgroup_swapout - transfer a memsw charge to swap
* @page: page whose memsw charge to transfer
* @folio: folio whose memsw charge to transfer
* @entry: swap entry to move the charge to
*
* Transfer the memsw charge of @page to @entry.
* Transfer the memsw charge of @folio to @entry.
*/
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
if (mem_cgroup_disabled())
return;
@ -7136,9 +7136,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
return;
memcg = page_memcg(page);
memcg = folio_memcg(folio);
VM_WARN_ON_ONCE_PAGE(!memcg, page);
VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
if (!memcg)
return;
@ -7148,16 +7148,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* ancestor for the swap instead and transfer the memory+swap charge.
*/
swap_memcg = mem_cgroup_id_get_online(memcg);
nr_entries = thp_nr_pages(page);
nr_entries = folio_nr_pages(folio);
/* Get references for the tail pages, too */
if (nr_entries > 1)
mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
nr_entries);
VM_BUG_ON_PAGE(oldid, page);
VM_BUG_ON_FOLIO(oldid, folio);
mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
page->memcg_data = 0;
folio->memcg_data = 0;
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, nr_entries);
@ -7176,7 +7176,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
*/
VM_BUG_ON(!irqs_disabled());
mem_cgroup_charge_statistics(memcg, -nr_entries);
memcg_check_events(memcg, page_to_nid(page));
memcg_check_events(memcg, folio_nid(folio));
css_put(&memcg->css);
}

View File

@ -1288,7 +1288,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) };
mem_cgroup_swapout(page, swap);
mem_cgroup_swapout(folio, swap);
if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(folio, target_memcg);
__delete_from_swap_cache(page, swap, shadow);