mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
mm/numa: no task_numa_fault() call if PTE is changed
When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commitb99a342d4f
("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") restructured do_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pte_same() return immediately. This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting). Link: https://lkml.kernel.org/r/20240809145906.1513458-2-ziy@nvidia.com Fixes:b99a342d4f
("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") Signed-off-by: Zi Yan <ziy@nvidia.com> Reported-by: "Huang, Ying" <ying.huang@intel.com> Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel.com/ Acked-by: David Hildenbrand <david@redhat.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Yang Shi <shy828301@gmail.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
61ebe5a747
commit
40b760cfd4
33
mm/memory.c
33
mm/memory.c
@ -5295,7 +5295,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
|
||||
|
||||
if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pte = pte_modify(old_pte, vma->vm_page_prot);
|
||||
@ -5358,23 +5358,19 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
|
||||
if (!migrate_misplaced_folio(folio, vma, target_nid)) {
|
||||
nid = target_nid;
|
||||
flags |= TNF_MIGRATED;
|
||||
} else {
|
||||
flags |= TNF_MIGRATE_FAIL;
|
||||
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
|
||||
vmf->address, &vmf->ptl);
|
||||
if (unlikely(!vmf->pte))
|
||||
goto out;
|
||||
if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
goto out;
|
||||
}
|
||||
goto out_map;
|
||||
task_numa_fault(last_cpupid, nid, nr_pages, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
out:
|
||||
if (nid != NUMA_NO_NODE)
|
||||
task_numa_fault(last_cpupid, nid, nr_pages, flags);
|
||||
return 0;
|
||||
flags |= TNF_MIGRATE_FAIL;
|
||||
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
|
||||
vmf->address, &vmf->ptl);
|
||||
if (unlikely(!vmf->pte))
|
||||
return 0;
|
||||
if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
return 0;
|
||||
}
|
||||
out_map:
|
||||
/*
|
||||
* Make it present again, depending on how arch implements
|
||||
@ -5387,7 +5383,10 @@ out_map:
|
||||
numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
|
||||
writable);
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
goto out;
|
||||
|
||||
if (nid != NUMA_NO_NODE)
|
||||
task_numa_fault(last_cpupid, nid, nr_pages, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
|
||||
|
Loading…
Reference in New Issue
Block a user