mm: renovate page_address_in_vma()

This function doesn't modify any of its arguments, so if we make a few
other functions take const pointers, we can make page_address_in_vma()
take const pointers too.  All of its callers have the containing folio
already, so pass that in as an argument instead of recalculating it.  Also
add kernel-doc

Link: https://lkml.kernel.org/r/20241005200121.3231142-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-10-05 21:01:14 +01:00 committed by Andrew Morton
parent 7d3e93eca3
commit 713da0b33b
7 changed files with 30 additions and 21 deletions

View File

@ -728,11 +728,8 @@ page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw)
}
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
/*
* Used by swapoff to help locate where page is expected in vma.
*/
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
unsigned long page_address_in_vma(const struct folio *folio,
const struct page *, const struct vm_area_struct *);
/*
* Cleans the PTEs of shared mappings.

View File

@ -841,7 +841,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
}
/* mm/util.c */
struct anon_vma *folio_anon_vma(struct folio *folio);
struct anon_vma *folio_anon_vma(const struct folio *folio);
#ifdef CONFIG_MMU
void unmap_mapping_folio(struct folio *folio);
@ -959,7 +959,7 @@ extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/
static inline unsigned long vma_address(struct vm_area_struct *vma,
static inline unsigned long vma_address(const struct vm_area_struct *vma,
pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;

View File

@ -1256,7 +1256,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
if (WARN_ON_ONCE(folio_test_large(folio)))
return err;
pvmw.address = page_address_in_vma(&folio->page, vma);
pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma);
if (pvmw.address == -EFAULT)
goto out;
@ -1340,7 +1340,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
{
struct folio *kfolio = page_folio(kpage);
struct mm_struct *mm = vma->vm_mm;
struct folio *folio;
struct folio *folio = page_folio(page);
pmd_t *pmd;
pmd_t pmde;
pte_t *ptep;
@ -1350,7 +1350,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
int err = -EFAULT;
struct mmu_notifier_range range;
addr = page_address_in_vma(page, vma);
addr = page_address_in_vma(folio, page, vma);
if (addr == -EFAULT)
goto out;
@ -1416,7 +1416,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
ptep_clear_flush(vma, addr, ptep);
set_pte_at(mm, addr, ptep, newpte);
folio = page_folio(page);
folio_remove_rmap_pte(folio, page, vma);
if (!folio_mapped(folio))
folio_free_swap(folio);

View File

@ -671,7 +671,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
*/
if (vma->vm_mm != t->mm)
continue;
addr = page_address_in_vma(page, vma);
addr = page_address_in_vma(folio, page, vma);
add_to_kill_anon_file(t, page, vma, to_kill, addr);
}
}

View File

@ -1367,7 +1367,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_entry_is_head(folio, &pagelist, lru)) {
vma_iter_init(&vmi, mm, start);
for_each_vma_range(vmi, vma, end) {
addr = page_address_in_vma(
addr = page_address_in_vma(folio,
folio_page(folio, 0), vma);
if (addr != -EFAULT)
break;

View File

@ -767,14 +767,27 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
/*
* At what user virtual address is page expected in vma?
* Caller should check the page is actually part of the vma.
/**
* page_address_in_vma - The virtual address of a page in this VMA.
* @folio: The folio containing the page.
* @page: The page within the folio.
* @vma: The VMA we need to know the address in.
*
* Calculates the user virtual address of this page in the specified VMA.
* It is the caller's responsibililty to check the page is actually
* within the VMA. There may not currently be a PTE pointing at this
* page, but if a page fault occurs at this address, this is the page
* which will be accessed.
*
* Context: Caller should hold a reference to the folio. Caller should
* hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the
* VMA from being altered.
*
* Return: The virtual address corresponding to this page in the VMA.
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
unsigned long page_address_in_vma(const struct folio *folio,
const struct page *page, const struct vm_area_struct *vma)
{
struct folio *folio = page_folio(page);
if (folio_test_anon(folio)) {
struct anon_vma *page__anon_vma = folio_anon_vma(folio);
/*
@ -790,7 +803,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
return -EFAULT;
}
/* The !page__anon_vma above handles KSM folios */
/* KSM folios don't reach here because of the !page__anon_vma check */
return vma_address(vma, page_pgoff(folio, page), 1);
}

View File

@ -820,7 +820,7 @@ void *vcalloc_noprof(size_t n, size_t size)
}
EXPORT_SYMBOL(vcalloc_noprof);
struct anon_vma *folio_anon_vma(struct folio *folio)
struct anon_vma *folio_anon_vma(const struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;