khugepaged: use a folio throughout hpage_collapse_scan_file()

Replace the use of pages with folios.  Saves a few calls to
compound_head() and removes some uses of obsolete functions.

Link: https://lkml.kernel.org/r/20240403171838.1445826-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-04-03 18:18:36 +01:00 committed by Andrew Morton
parent 8d1e24c0b8
commit 43849758fd
2 changed files with 19 additions and 20 deletions

View File

@ -174,10 +174,10 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
TRACE_EVENT(mm_khugepaged_scan_file,
TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file,
TP_PROTO(struct mm_struct *mm, struct folio *folio, struct file *file,
int present, int swap, int result),
TP_ARGS(mm, page, file, present, swap, result),
TP_ARGS(mm, folio, file, present, swap, result),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
@ -190,7 +190,7 @@ TRACE_EVENT(mm_khugepaged_scan_file,
TP_fast_assign(
__entry->mm = mm;
__entry->pfn = page ? page_to_pfn(page) : -1;
__entry->pfn = folio ? folio_pfn(folio) : -1;
__assign_str(filename, file->f_path.dentry->d_iname);
__entry->present = present;
__entry->swap = swap;

View File

@ -2203,7 +2203,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
struct file *file, pgoff_t start,
struct collapse_control *cc)
{
struct page *page = NULL;
struct folio *folio = NULL;
struct address_space *mapping = file->f_mapping;
XA_STATE(xas, &mapping->i_pages, start);
int present, swap;
@ -2215,11 +2215,11 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
rcu_read_lock();
xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
if (xas_retry(&xas, page))
xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
if (xas_retry(&xas, folio))
continue;
if (xa_is_value(page)) {
if (xa_is_value(folio)) {
++swap;
if (cc->is_khugepaged &&
swap > khugepaged_max_ptes_swap) {
@ -2234,11 +2234,9 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
* TODO: khugepaged should compact smaller compound pages
* into a PMD sized page
*/
if (PageTransCompound(page)) {
struct page *head = compound_head(page);
result = compound_order(head) == HPAGE_PMD_ORDER &&
head->index == start
if (folio_test_large(folio)) {
result = folio_order(folio) == HPAGE_PMD_ORDER &&
folio->index == start
/* Maybe PMD-mapped */
? SCAN_PTE_MAPPED_HUGEPAGE
: SCAN_PAGE_COMPOUND;
@ -2251,28 +2249,29 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
break;
}
node = page_to_nid(page);
node = folio_nid(folio);
if (hpage_collapse_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT;
break;
}
cc->node_load[node]++;
if (!PageLRU(page)) {
if (!folio_test_lru(folio)) {
result = SCAN_PAGE_LRU;
break;
}
if (page_count(page) !=
1 + page_mapcount(page) + page_has_private(page)) {
if (folio_ref_count(folio) !=
1 + folio_mapcount(folio) + folio_test_private(folio)) {
result = SCAN_PAGE_COUNT;
break;
}
/*
* We probably should check if the page is referenced here, but
* nobody would transfer pte_young() to PageReferenced() for us.
* And rmap walk here is just too costly...
* We probably should check if the folio is referenced
* here, but nobody would transfer pte_young() to
* folio_test_referenced() for us. And rmap walk here
* is just too costly...
*/
present++;
@ -2294,7 +2293,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
}
}
trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
return result;
}
#else