mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
mm: use free_unref_folios() in put_pages_list()
Break up the list of folios into batches here so that the folios are more likely to be cache hot when doing the rest of the processing. Link: https://lkml.kernel.org/r/20240227174254.710559-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7c33b8c422
commit
24835f899c
17
mm/swap.c
17
mm/swap.c
@ -138,22 +138,25 @@ EXPORT_SYMBOL(__folio_put);
|
||||
*/
|
||||
void put_pages_list(struct list_head *pages)
|
||||
{
|
||||
struct folio *folio, *next;
|
||||
struct folio_batch fbatch;
|
||||
struct folio *folio;
|
||||
|
||||
list_for_each_entry_safe(folio, next, pages, lru) {
|
||||
if (!folio_put_testzero(folio)) {
|
||||
list_del(&folio->lru);
|
||||
folio_batch_init(&fbatch);
|
||||
list_for_each_entry(folio, pages, lru) {
|
||||
if (!folio_put_testzero(folio))
|
||||
continue;
|
||||
}
|
||||
if (folio_test_large(folio)) {
|
||||
list_del(&folio->lru);
|
||||
__folio_put_large(folio);
|
||||
continue;
|
||||
}
|
||||
/* LRU flag must be clear because it's passed using the lru */
|
||||
if (folio_batch_add(&fbatch, folio) > 0)
|
||||
continue;
|
||||
free_unref_folios(&fbatch);
|
||||
}
|
||||
|
||||
free_unref_page_list(pages);
|
||||
if (fbatch.nr)
|
||||
free_unref_folios(&fbatch);
|
||||
INIT_LIST_HEAD(pages);
|
||||
}
|
||||
EXPORT_SYMBOL(put_pages_list);
|
||||
|
Loading…
Reference in New Issue
Block a user