mm/pagewalkers: ACTION_AGAIN if pte_offset_map_lock() fails

Simple walk_page_range() users should set ACTION_AGAIN to retry when
pte_offset_map_lock() fails.

No need to check pmd_trans_unstable(): that was precisely to avoid the
possiblity of calling pte_offset_map() on a racily removed or inserted THP
entry, but such cases are now safely handled inside it.  Likewise there is
no need to check pmd_none() or pmd_bad() before calling it.

Link: https://lkml.kernel.org/r/c77d9d10-3aad-e3ce-4896-99e91c7947f3@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Reviewed-by: SeongJae Park <sj@kernel.org> for mm/damon part
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zack Rusin <zackr@vmware.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Hugh Dickins 2023-06-08 18:17:26 -07:00 committed by Andrew Morton
parent 2798bbe75b
commit 7780d04046
5 changed files with 36 additions and 28 deletions

View File

@ -631,14 +631,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
goto out; goto out;
} }
if (pmd_trans_unstable(pmd))
goto out;
/*
* The mmap_lock held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
* in here.
*/
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (!pte) {
walk->action = ACTION_AGAIN;
return 0;
}
for (; addr != end; pte++, addr += PAGE_SIZE) for (; addr != end; pte++, addr += PAGE_SIZE)
smaps_pte_entry(pte, addr, walk); smaps_pte_entry(pte, addr, walk);
pte_unmap_unlock(pte - 1, ptl); pte_unmap_unlock(pte - 1, ptl);
@ -1191,10 +1188,11 @@ out:
return 0; return 0;
} }
if (pmd_trans_unstable(pmd))
return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (!pte) {
walk->action = ACTION_AGAIN;
return 0;
}
for (; addr != end; pte++, addr += PAGE_SIZE) { for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte; ptent = *pte;
@ -1538,9 +1536,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
spin_unlock(ptl); spin_unlock(ptl);
return err; return err;
} }
if (pmd_trans_unstable(pmdp))
return 0;
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/* /*
@ -1548,6 +1543,10 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
* goes beyond vma->vm_end. * goes beyond vma->vm_end.
*/ */
orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
if (!pte) {
walk->action = ACTION_AGAIN;
return err;
}
for (; addr < end; pte++, addr += PAGE_SIZE) { for (; addr < end; pte++, addr += PAGE_SIZE) {
pagemap_entry_t pme; pagemap_entry_t pme;
@ -1887,11 +1886,12 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(ptl); spin_unlock(ptl);
return 0; return 0;
} }
if (pmd_trans_unstable(pmd))
return 0;
#endif #endif
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte) {
walk->action = ACTION_AGAIN;
return 0;
}
do { do {
struct page *page = can_gather_numa_stats(*pte, vma, addr); struct page *page = can_gather_numa_stats(*pte, vma, addr);
if (!page) if (!page)

View File

@ -318,9 +318,11 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
spin_unlock(ptl); spin_unlock(ptl);
} }
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
return 0;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte) {
walk->action = ACTION_AGAIN;
return 0;
}
if (!pte_present(*pte)) if (!pte_present(*pte))
goto out; goto out;
damon_ptep_mkold(pte, walk->vma, addr); damon_ptep_mkold(pte, walk->vma, addr);
@ -464,9 +466,11 @@ huge_out:
regular_page: regular_page:
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
return -EINVAL;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte) {
walk->action = ACTION_AGAIN;
return 0;
}
if (!pte_present(*pte)) if (!pte_present(*pte))
goto out; goto out;
folio = damon_get_folio(pte_pfn(*pte)); folio = damon_get_folio(pte_pfn(*pte));

View File

@ -514,10 +514,11 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
if (ptl) if (ptl)
return queue_folios_pmd(pmd, ptl, addr, end, walk); return queue_folios_pmd(pmd, ptl, addr, end, walk);
if (pmd_trans_unstable(pmd))
return 0;
mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte) {
walk->action = ACTION_AGAIN;
return 0;
}
for (; addr != end; pte++, addr += PAGE_SIZE) { for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte)) if (!pte_present(*pte))
continue; continue;

View File

@ -113,12 +113,11 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
goto out; goto out;
} }
if (pmd_trans_unstable(pmd)) {
__mincore_unmapped_range(addr, end, vma, vec);
goto out;
}
ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!ptep) {
walk->action = ACTION_AGAIN;
return 0;
}
for (; addr != end; ptep++, addr += PAGE_SIZE) { for (; addr != end; ptep++, addr += PAGE_SIZE) {
pte_t pte = *ptep; pte_t pte = *ptep;

View File

@ -329,6 +329,10 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
} }
start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (!start_pte) {
walk->action = ACTION_AGAIN;
return 0;
}
for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte)) if (!pte_present(*pte))
continue; continue;