mirror of
https://github.com/torvalds/linux.git
synced 2024-12-15 07:33:56 +00:00
c33c794828
Convert all instances of direct pte_t* dereferencing to instead use ptep_get() helper. This means that by default, the accesses change from a C dereference to a READ_ONCE(). This is technically the correct thing to do since where pgtables are modified by HW (for access/dirty) they are volatile and therefore we should always ensure READ_ONCE() semantics. But more importantly, by always using the helper, it can be overridden by the architecture to fully encapsulate the contents of the pte. Arch code is deliberately not converted, as the arch code knows best. It is intended that arch code (arm64) will override the default with its own implementation that can (e.g.) hide certain bits from the core code, or determine young/dirty status by mixing in state from another source. Conversion was done using Coccinelle: ---- // $ make coccicheck \ // COCCI=ptepget.cocci \ // SPFLAGS="--include-headers" \ // MODE=patch virtual patch @ depends on patch @ pte_t *v; @@ - *v + ptep_get(v) ---- Then reviewed and hand-edited to avoid multiple unnecessary calls to ptep_get(), instead opting to store the result of a single call in a variable, where it is correct to do so. This aims to negate any cost of READ_ONCE() and will benefit arch-overrides that may be more complex. Included is a fix for an issue in an earlier version of this patch that was pointed out by kernel test robot. The issue arose because config MMU=n elides definition of the ptep helper functions, including ptep_get(). HUGETLB_PAGE=n configs still define a simple huge_ptep_clear_flush() for linking purposes, which dereferences the ptep. So when both configs are disabled, this caused a build error because ptep_get() is not defined. Fix by continuing to do a direct dereference when MMU=n. This is safe because for this config the arch code cannot be trying to virtualize the ptes because none of the ptep helpers are defined. Link: https://lkml.kernel.org/r/20230612151545.3317766-4-ryan.roberts@arm.com Reported-by: kernel test robot <lkp@intel.com> Link: https://lore.kernel.org/oe-kbuild-all/202305120142.yXsNEo6H-lkp@intel.com/ Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Potapenko <glider@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Dave Airlie <airlied@gmail.com> Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ian Rogers <irogers@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: SeongJae Park <sj@kernel.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
288 lines
7.0 KiB
C
288 lines
7.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* mm/pgtable-generic.c
|
|
*
|
|
* Generic pgtable methods declared in linux/pgtable.h
|
|
*
|
|
* Copyright (C) 2010 Linus Torvalds
|
|
*/
|
|
|
|
#include <linux/pagemap.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/pgtable.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/swapops.h>
|
|
#include <linux/mm_inline.h>
|
|
#include <asm/tlb.h>
|
|
|
|
/*
|
|
* If a p?d_bad entry is found while walking page tables, report
|
|
* the error, before resetting entry to p?d_none. Usually (but
|
|
* very seldom) called out from the p?d_none_or_clear_bad macros.
|
|
*/
|
|
|
|
void pgd_clear_bad(pgd_t *pgd)
|
|
{
|
|
pgd_ERROR(*pgd);
|
|
pgd_clear(pgd);
|
|
}
|
|
|
|
#ifndef __PAGETABLE_P4D_FOLDED
|
|
void p4d_clear_bad(p4d_t *p4d)
|
|
{
|
|
p4d_ERROR(*p4d);
|
|
p4d_clear(p4d);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
void pud_clear_bad(pud_t *pud)
|
|
{
|
|
pud_ERROR(*pud);
|
|
pud_clear(pud);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Note that the pmd variant below can't be stub'ed out just as for p4d/pud
|
|
* above. pmd folding is special and typically pmd_* macros refer to upper
|
|
* level even when folded
|
|
*/
|
|
void pmd_clear_bad(pmd_t *pmd)
|
|
{
|
|
pmd_ERROR(*pmd);
|
|
pmd_clear(pmd);
|
|
}
|
|
|
|
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
|
/*
|
|
* Only sets the access flags (dirty, accessed), as well as write
|
|
* permission. Furthermore, we know it always gets set to a "more
|
|
* permissive" setting, which allows most architectures to optimize
|
|
* this. We return whether the PTE actually changed, which in turn
|
|
* instructs the caller to do things like update__mmu_cache. This
|
|
* used to be done in the caller, but sparc needs minor faults to
|
|
* force that call on sun4c so we changed this macro slightly
|
|
*/
|
|
int ptep_set_access_flags(struct vm_area_struct *vma,
|
|
unsigned long address, pte_t *ptep,
|
|
pte_t entry, int dirty)
|
|
{
|
|
int changed = !pte_same(ptep_get(ptep), entry);
|
|
if (changed) {
|
|
set_pte_at(vma->vm_mm, address, ptep, entry);
|
|
flush_tlb_fix_spurious_fault(vma, address, ptep);
|
|
}
|
|
return changed;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
|
int ptep_clear_flush_young(struct vm_area_struct *vma,
|
|
unsigned long address, pte_t *ptep)
|
|
{
|
|
int young;
|
|
young = ptep_test_and_clear_young(vma, address, ptep);
|
|
if (young)
|
|
flush_tlb_page(vma, address);
|
|
return young;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
|
|
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
|
pte_t *ptep)
|
|
{
|
|
struct mm_struct *mm = (vma)->vm_mm;
|
|
pte_t pte;
|
|
pte = ptep_get_and_clear(mm, address, ptep);
|
|
if (pte_accessible(mm, pte))
|
|
flush_tlb_page(vma, address);
|
|
return pte;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
|
|
int pmdp_set_access_flags(struct vm_area_struct *vma,
|
|
unsigned long address, pmd_t *pmdp,
|
|
pmd_t entry, int dirty)
|
|
{
|
|
int changed = !pmd_same(*pmdp, entry);
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
if (changed) {
|
|
set_pmd_at(vma->vm_mm, address, pmdp, entry);
|
|
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
}
|
|
return changed;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
|
|
int pmdp_clear_flush_young(struct vm_area_struct *vma,
|
|
unsigned long address, pmd_t *pmdp)
|
|
{
|
|
int young;
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
young = pmdp_test_and_clear_young(vma, address, pmdp);
|
|
if (young)
|
|
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
return young;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
|
|
pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
|
pmd_t *pmdp)
|
|
{
|
|
pmd_t pmd;
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
|
|
!pmd_devmap(*pmdp));
|
|
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
|
|
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
return pmd;
|
|
}
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
|
pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
|
pud_t *pudp)
|
|
{
|
|
pud_t pud;
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PUD_MASK);
|
|
VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
|
|
pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
|
|
flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
|
|
return pud;
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
|
|
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
pgtable_t pgtable)
|
|
{
|
|
assert_spin_locked(pmd_lockptr(mm, pmdp));
|
|
|
|
/* FIFO */
|
|
if (!pmd_huge_pte(mm, pmdp))
|
|
INIT_LIST_HEAD(&pgtable->lru);
|
|
else
|
|
list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
|
|
pmd_huge_pte(mm, pmdp) = pgtable;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
|
|
/* no "address" argument so destroys page coloring of some arch */
|
|
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
|
{
|
|
pgtable_t pgtable;
|
|
|
|
assert_spin_locked(pmd_lockptr(mm, pmdp));
|
|
|
|
/* FIFO */
|
|
pgtable = pmd_huge_pte(mm, pmdp);
|
|
pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
|
|
struct page, lru);
|
|
if (pmd_huge_pte(mm, pmdp))
|
|
list_del(&pgtable->lru);
|
|
return pgtable;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_INVALIDATE
|
|
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|
pmd_t *pmdp)
|
|
{
|
|
pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
|
|
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
return old;
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
|
|
pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
|
|
pmd_t *pmdp)
|
|
{
|
|
return pmdp_invalidate(vma, address, pmdp);
|
|
}
|
|
#endif
|
|
|
|
#ifndef pmdp_collapse_flush
|
|
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
|
|
pmd_t *pmdp)
|
|
{
|
|
/*
|
|
* pmd and hugepage pte format are same. So we could
|
|
* use the same function.
|
|
*/
|
|
pmd_t pmd;
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
VM_BUG_ON(pmd_trans_huge(*pmdp));
|
|
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
|
|
|
|
/* collapse entails shooting down ptes not pmd */
|
|
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
|
return pmd;
|
|
}
|
|
#endif
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
|
|
{
|
|
pmd_t pmdval;
|
|
|
|
/* rcu_read_lock() to be added later */
|
|
pmdval = pmdp_get_lockless(pmd);
|
|
if (pmdvalp)
|
|
*pmdvalp = pmdval;
|
|
if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
|
|
goto nomap;
|
|
if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval)))
|
|
goto nomap;
|
|
if (unlikely(pmd_bad(pmdval))) {
|
|
pmd_clear_bad(pmd);
|
|
goto nomap;
|
|
}
|
|
return __pte_map(&pmdval, addr);
|
|
nomap:
|
|
/* rcu_read_unlock() to be added later */
|
|
return NULL;
|
|
}
|
|
|
|
pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
|
|
unsigned long addr, spinlock_t **ptlp)
|
|
{
|
|
pmd_t pmdval;
|
|
pte_t *pte;
|
|
|
|
pte = __pte_offset_map(pmd, addr, &pmdval);
|
|
if (likely(pte))
|
|
*ptlp = pte_lockptr(mm, &pmdval);
|
|
return pte;
|
|
}
|
|
|
|
pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
|
|
unsigned long addr, spinlock_t **ptlp)
|
|
{
|
|
spinlock_t *ptl;
|
|
pmd_t pmdval;
|
|
pte_t *pte;
|
|
again:
|
|
pte = __pte_offset_map(pmd, addr, &pmdval);
|
|
if (unlikely(!pte))
|
|
return pte;
|
|
ptl = pte_lockptr(mm, &pmdval);
|
|
spin_lock(ptl);
|
|
if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
|
|
*ptlp = ptl;
|
|
return pte;
|
|
}
|
|
pte_unmap_unlock(pte, ptl);
|
|
goto again;
|
|
}
|