forked from Minki/linux
mm/memcg: Convert uncharge_page() to uncharge_folio()
Use a folio rather than a page to ensure that we're only operating on base or head pages, and not tail pages. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
8f425e4ed0
commit
c4ed6ebfcb
@ -6794,24 +6794,23 @@ static void uncharge_batch(const struct uncharge_gather *ug)
|
||||
memcg_check_events(ug->memcg, ug->nid);
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* drop reference from uncharge_page */
|
||||
/* drop reference from uncharge_folio */
|
||||
css_put(&ug->memcg->css);
|
||||
}
|
||||
|
||||
static void uncharge_page(struct page *page, struct uncharge_gather *ug)
|
||||
static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
unsigned long nr_pages;
|
||||
long nr_pages;
|
||||
struct mem_cgroup *memcg;
|
||||
struct obj_cgroup *objcg;
|
||||
bool use_objcg = PageMemcgKmem(page);
|
||||
bool use_objcg = folio_memcg_kmem(folio);
|
||||
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
|
||||
|
||||
/*
|
||||
* Nobody should be changing or seriously looking at
|
||||
* page memcg or objcg at this point, we have fully
|
||||
* exclusive access to the page.
|
||||
* folio memcg or objcg at this point, we have fully
|
||||
* exclusive access to the folio.
|
||||
*/
|
||||
if (use_objcg) {
|
||||
objcg = __folio_objcg(folio);
|
||||
@ -6833,19 +6832,19 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
|
||||
uncharge_gather_clear(ug);
|
||||
}
|
||||
ug->memcg = memcg;
|
||||
ug->nid = page_to_nid(page);
|
||||
ug->nid = folio_nid(folio);
|
||||
|
||||
/* pairs with css_put in uncharge_batch */
|
||||
css_get(&memcg->css);
|
||||
}
|
||||
|
||||
nr_pages = compound_nr(page);
|
||||
nr_pages = folio_nr_pages(folio);
|
||||
|
||||
if (use_objcg) {
|
||||
ug->nr_memory += nr_pages;
|
||||
ug->nr_kmem += nr_pages;
|
||||
|
||||
page->memcg_data = 0;
|
||||
folio->memcg_data = 0;
|
||||
obj_cgroup_put(objcg);
|
||||
} else {
|
||||
/* LRU pages aren't accounted at the root level */
|
||||
@ -6853,7 +6852,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
|
||||
ug->nr_memory += nr_pages;
|
||||
ug->pgpgout++;
|
||||
|
||||
page->memcg_data = 0;
|
||||
folio->memcg_data = 0;
|
||||
}
|
||||
|
||||
css_put(&memcg->css);
|
||||
@ -6874,7 +6873,7 @@ void __mem_cgroup_uncharge(struct page *page)
|
||||
return;
|
||||
|
||||
uncharge_gather_clear(&ug);
|
||||
uncharge_page(page, &ug);
|
||||
uncharge_folio(page_folio(page), &ug);
|
||||
uncharge_batch(&ug);
|
||||
}
|
||||
|
||||
@ -6888,11 +6887,11 @@ void __mem_cgroup_uncharge(struct page *page)
|
||||
void __mem_cgroup_uncharge_list(struct list_head *page_list)
|
||||
{
|
||||
struct uncharge_gather ug;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
uncharge_gather_clear(&ug);
|
||||
list_for_each_entry(page, page_list, lru)
|
||||
uncharge_page(page, &ug);
|
||||
list_for_each_entry(folio, page_list, lru)
|
||||
uncharge_folio(folio, &ug);
|
||||
if (ug.memcg)
|
||||
uncharge_batch(&ug);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user