2019-05-27 06:55:06 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2017-09-08 23:11:23 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2013 Red Hat Inc.
|
|
|
|
*
|
2018-10-30 22:04:06 +00:00
|
|
|
* Authors: Jérôme Glisse <jglisse@redhat.com>
|
2017-09-08 23:11:23 +00:00
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Refer to include/linux/hmm.h for information about heterogeneous memory
|
|
|
|
* management or HMM for short.
|
|
|
|
*/
|
2019-08-28 14:19:53 +00:00
|
|
|
#include <linux/pagewalk.h>
|
2017-09-08 23:11:23 +00:00
|
|
|
#include <linux/hmm.h>
|
2017-09-08 23:12:02 +00:00
|
|
|
#include <linux/init.h>
|
2017-09-08 23:11:31 +00:00
|
|
|
#include <linux/rmap.h>
|
|
|
|
#include <linux/swap.h>
|
2017-09-08 23:11:23 +00:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sched.h>
|
2017-09-08 23:11:58 +00:00
|
|
|
#include <linux/mmzone.h>
|
|
|
|
#include <linux/pagemap.h>
|
2017-09-08 23:11:31 +00:00
|
|
|
#include <linux/swapops.h>
|
|
|
|
#include <linux/hugetlb.h>
|
2017-09-08 23:11:58 +00:00
|
|
|
#include <linux/memremap.h>
|
2019-05-23 13:36:46 +00:00
|
|
|
#include <linux/sched/mm.h>
|
2017-09-08 23:11:46 +00:00
|
|
|
#include <linux/jump_label.h>
|
2019-05-14 00:20:28 +00:00
|
|
|
#include <linux/dma-mapping.h>
|
2017-09-08 23:11:27 +00:00
|
|
|
#include <linux/mmu_notifier.h>
|
2017-09-08 23:11:58 +00:00
|
|
|
#include <linux/memory_hotplug.h>
|
|
|
|
|
2021-07-01 01:54:25 +00:00
|
|
|
#include "internal.h"
|
|
|
|
|
2017-09-08 23:11:35 +00:00
|
|
|
struct hmm_vma_walk {
|
|
|
|
struct hmm_range *range;
|
|
|
|
unsigned long last;
|
|
|
|
};
|
|
|
|
|
2020-03-27 20:00:14 +00:00
|
|
|
enum {
|
|
|
|
HMM_NEED_FAULT = 1 << 0,
|
|
|
|
HMM_NEED_WRITE_FAULT = 1 << 1,
|
|
|
|
HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
|
|
|
|
};
|
|
|
|
|
2019-11-04 22:21:40 +00:00
|
|
|
static int hmm_pfns_fill(unsigned long addr, unsigned long end,
|
2020-05-01 18:20:48 +00:00
|
|
|
struct hmm_range *range, unsigned long cpu_flags)
|
2017-09-08 23:11:31 +00:00
|
|
|
{
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long i = (addr - range->start) >> PAGE_SHIFT;
|
2017-09-08 23:11:31 +00:00
|
|
|
|
|
|
|
for (; addr < end; addr += PAGE_SIZE, i++)
|
2020-05-01 18:20:48 +00:00
|
|
|
range->hmm_pfns[i] = cpu_flags;
|
2017-09-08 23:11:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-10 23:28:46 +00:00
|
|
|
/*
|
2020-03-16 13:53:09 +00:00
|
|
|
* hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
|
2019-07-26 00:56:45 +00:00
|
|
|
* @addr: range virtual start address (inclusive)
|
2018-04-10 23:28:46 +00:00
|
|
|
* @end: range virtual end address (exclusive)
|
2020-03-27 20:00:14 +00:00
|
|
|
* @required_fault: HMM_NEED_* flags
|
2018-04-10 23:28:46 +00:00
|
|
|
* @walk: mm_walk structure
|
2020-03-16 13:53:09 +00:00
|
|
|
* Return: -EBUSY after page fault, or page fault error
|
2018-04-10 23:28:46 +00:00
|
|
|
*
|
|
|
|
* This function will be called whenever pmd_none() or pte_none() returns true,
|
|
|
|
* or whenever there is no page directory covering the virtual address range.
|
|
|
|
*/
|
2020-03-16 13:53:09 +00:00
|
|
|
static int hmm_vma_fault(unsigned long addr, unsigned long end,
|
2020-03-27 20:00:14 +00:00
|
|
|
unsigned int required_fault, struct mm_walk *walk)
|
2017-09-08 23:11:31 +00:00
|
|
|
{
|
2017-09-08 23:11:35 +00:00
|
|
|
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
2020-03-16 13:53:10 +00:00
|
|
|
struct vm_area_struct *vma = walk->vma;
|
|
|
|
unsigned int fault_flags = FAULT_FLAG_REMOTE;
|
2017-09-08 23:11:31 +00:00
|
|
|
|
2020-03-27 20:00:14 +00:00
|
|
|
WARN_ON_ONCE(!required_fault);
|
2017-09-08 23:11:35 +00:00
|
|
|
hmm_vma_walk->last = addr;
|
2019-05-14 00:20:18 +00:00
|
|
|
|
2020-03-27 20:00:14 +00:00
|
|
|
if (required_fault & HMM_NEED_WRITE_FAULT) {
|
2020-03-16 13:53:10 +00:00
|
|
|
if (!(vma->vm_flags & VM_WRITE))
|
|
|
|
return -EPERM;
|
|
|
|
fault_flags |= FAULT_FLAG_WRITE;
|
2017-09-08 23:11:35 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 20:00:20 +00:00
|
|
|
for (; addr < end; addr += PAGE_SIZE)
|
mm: do page fault accounting in handle_mm_fault
Patch series "mm: Page fault accounting cleanups", v5.
This is v5 of the pf accounting cleanup series. It originates from Gerald
Schaefer's report on an issue a week ago regarding to incorrect page fault
accountings for retried page fault after commit 4064b9827063 ("mm: allow
VM_FAULT_RETRY for multiple times"):
https://lore.kernel.org/lkml/20200610174811.44b94525@thinkpad/
What this series did:
- Correct page fault accounting: we do accounting for a page fault
(no matter whether it's from #PF handling, or gup, or anything else)
only with the one that completed the fault. For example, page fault
retries should not be counted in page fault counters. Same to the
perf events.
- Unify definition of PERF_COUNT_SW_PAGE_FAULTS: currently this perf
event is used in an adhoc way across different archs.
Case (1): for many archs it's done at the entry of a page fault
handler, so that it will also cover e.g. errornous faults.
Case (2): for some other archs, it is only accounted when the page
fault is resolved successfully.
Case (3): there're still quite some archs that have not enabled
this perf event.
Since this series will touch merely all the archs, we unify this
perf event to always follow case (1), which is the one that makes most
sense. And since we moved the accounting into handle_mm_fault, the
other two MAJ/MIN perf events are well taken care of naturally.
- Unify definition of "major faults": the definition of "major
fault" is slightly changed when used in accounting (not
VM_FAULT_MAJOR). More information in patch 1.
- Always account the page fault onto the one that triggered the page
fault. This does not matter much for #PF handlings, but mostly for
gup. More information on this in patch 25.
Patchset layout:
Patch 1: Introduced the accounting in handle_mm_fault(), not enabled.
Patch 2-23: Enable the new accounting for arch #PF handlers one by one.
Patch 24: Enable the new accounting for the rest outliers (gup, iommu, etc.)
Patch 25: Cleanup GUP task_struct pointer since it's not needed any more
This patch (of 25):
This is a preparation patch to move page fault accountings into the
general code in handle_mm_fault(). This includes both the per task
flt_maj/flt_min counters, and the major/minor page fault perf events. To
do this, the pt_regs pointer is passed into handle_mm_fault().
PERF_COUNT_SW_PAGE_FAULTS should still be kept in per-arch page fault
handlers.
So far, all the pt_regs pointer that passed into handle_mm_fault() is
NULL, which means this patch should have no intented functional change.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200707225021.200906-1-peterx@redhat.com
Link: http://lkml.kernel.org/r/20200707225021.200906-2-peterx@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-08-12 01:37:44 +00:00
|
|
|
if (handle_mm_fault(vma, addr, fault_flags, NULL) &
|
|
|
|
VM_FAULT_ERROR)
|
2020-03-27 20:00:20 +00:00
|
|
|
return -EFAULT;
|
2020-03-16 13:53:09 +00:00
|
|
|
return -EBUSY;
|
2018-04-10 23:29:02 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 20:00:14 +00:00
|
|
|
static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long pfn_req_flags,
|
|
|
|
unsigned long cpu_flags)
|
2018-04-10 23:29:02 +00:00
|
|
|
{
|
2018-04-10 23:29:06 +00:00
|
|
|
struct hmm_range *range = hmm_vma_walk->range;
|
|
|
|
|
2019-05-14 00:20:05 +00:00
|
|
|
/*
|
|
|
|
* So we not only consider the individual per page request we also
|
|
|
|
* consider the default flags requested for the range. The API can
|
2019-07-26 00:56:45 +00:00
|
|
|
* be used 2 ways. The first one where the HMM user coalesces
|
|
|
|
* multiple page faults into one request and sets flags per pfn for
|
|
|
|
* those faults. The second one where the HMM user wants to pre-
|
2019-05-14 00:20:05 +00:00
|
|
|
* fault a range with specific flags. For the latter one it is a
|
|
|
|
* waste to have the user pre-fill the pfn arrays with a default
|
|
|
|
* flags value.
|
|
|
|
*/
|
2020-05-01 18:20:48 +00:00
|
|
|
pfn_req_flags &= range->pfn_flags_mask;
|
|
|
|
pfn_req_flags |= range->default_flags;
|
2019-05-14 00:20:05 +00:00
|
|
|
|
2018-04-10 23:29:02 +00:00
|
|
|
/* We aren't ask to do anything ... */
|
2020-05-01 18:20:48 +00:00
|
|
|
if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
|
2020-03-27 20:00:14 +00:00
|
|
|
return 0;
|
2018-04-10 23:29:06 +00:00
|
|
|
|
|
|
|
/* Need to write fault ? */
|
2020-05-01 18:20:48 +00:00
|
|
|
if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
|
|
|
|
!(cpu_flags & HMM_PFN_WRITE))
|
2020-03-27 20:00:14 +00:00
|
|
|
return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
|
|
|
|
|
|
|
|
/* If CPU page table is not valid then we need to fault */
|
2020-05-01 18:20:48 +00:00
|
|
|
if (!(cpu_flags & HMM_PFN_VALID))
|
2020-03-27 20:00:14 +00:00
|
|
|
return HMM_NEED_FAULT;
|
|
|
|
return 0;
|
2018-04-10 23:29:02 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 20:00:14 +00:00
|
|
|
static unsigned int
|
|
|
|
hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
|
2020-05-01 18:20:48 +00:00
|
|
|
const unsigned long hmm_pfns[], unsigned long npages,
|
|
|
|
unsigned long cpu_flags)
|
2018-04-10 23:29:02 +00:00
|
|
|
{
|
2020-03-27 20:00:16 +00:00
|
|
|
struct hmm_range *range = hmm_vma_walk->range;
|
2020-03-27 20:00:14 +00:00
|
|
|
unsigned int required_fault = 0;
|
2018-04-10 23:29:02 +00:00
|
|
|
unsigned long i;
|
|
|
|
|
2020-03-27 20:00:16 +00:00
|
|
|
/*
|
|
|
|
* If the default flags do not request to fault pages, and the mask does
|
|
|
|
* not allow for individual pages to be faulted, then
|
|
|
|
* hmm_pte_need_fault() will always return 0.
|
|
|
|
*/
|
|
|
|
if (!((range->default_flags | range->pfn_flags_mask) &
|
2020-05-01 18:20:48 +00:00
|
|
|
HMM_PFN_REQ_FAULT))
|
2020-03-27 20:00:14 +00:00
|
|
|
return 0;
|
2018-04-10 23:29:02 +00:00
|
|
|
|
|
|
|
for (i = 0; i < npages; ++i) {
|
2020-05-01 18:20:48 +00:00
|
|
|
required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
|
|
|
|
cpu_flags);
|
2020-03-27 20:00:14 +00:00
|
|
|
if (required_fault == HMM_NEED_ALL_BITS)
|
|
|
|
return required_fault;
|
2018-04-10 23:29:02 +00:00
|
|
|
}
|
2020-03-27 20:00:14 +00:00
|
|
|
return required_fault;
|
2018-04-10 23:29:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
|
2020-02-04 01:36:03 +00:00
|
|
|
__always_unused int depth, struct mm_walk *walk)
|
2018-04-10 23:29:02 +00:00
|
|
|
{
|
|
|
|
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
|
|
|
struct hmm_range *range = hmm_vma_walk->range;
|
2020-03-27 20:00:14 +00:00
|
|
|
unsigned int required_fault;
|
2018-04-10 23:29:02 +00:00
|
|
|
unsigned long i, npages;
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long *hmm_pfns;
|
2018-04-10 23:29:02 +00:00
|
|
|
|
|
|
|
i = (addr - range->start) >> PAGE_SHIFT;
|
|
|
|
npages = (end - addr) >> PAGE_SHIFT;
|
2020-05-01 18:20:48 +00:00
|
|
|
hmm_pfns = &range->hmm_pfns[i];
|
|
|
|
required_fault =
|
|
|
|
hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
|
2020-03-27 20:00:21 +00:00
|
|
|
if (!walk->vma) {
|
|
|
|
if (required_fault)
|
|
|
|
return -EFAULT;
|
|
|
|
return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
|
|
|
|
}
|
2020-03-27 20:00:14 +00:00
|
|
|
if (required_fault)
|
|
|
|
return hmm_vma_fault(addr, end, required_fault, walk);
|
2020-05-01 18:20:48 +00:00
|
|
|
return hmm_pfns_fill(addr, end, range, 0);
|
2018-04-10 23:29:02 +00:00
|
|
|
}
|
|
|
|
|
2020-07-01 22:53:49 +00:00
|
|
|
static inline unsigned long hmm_pfn_flags_order(unsigned long order)
|
|
|
|
{
|
|
|
|
return order << HMM_PFN_ORDER_SHIFT;
|
|
|
|
}
|
|
|
|
|
2020-05-01 18:20:48 +00:00
|
|
|
static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
|
|
|
|
pmd_t pmd)
|
2018-04-10 23:29:02 +00:00
|
|
|
{
|
|
|
|
if (pmd_protnone(pmd))
|
|
|
|
return 0;
|
2020-07-01 22:53:49 +00:00
|
|
|
return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
|
|
|
|
HMM_PFN_VALID) |
|
|
|
|
hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
|
2017-09-08 23:11:31 +00:00
|
|
|
}
|
|
|
|
|
2019-05-14 00:20:21 +00:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
2019-08-06 16:05:49 +00:00
|
|
|
static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long end, unsigned long hmm_pfns[],
|
|
|
|
pmd_t pmd)
|
2019-08-06 16:05:49 +00:00
|
|
|
{
|
2018-04-10 23:28:59 +00:00
|
|
|
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
2018-04-10 23:29:06 +00:00
|
|
|
struct hmm_range *range = hmm_vma_walk->range;
|
2018-04-10 23:29:02 +00:00
|
|
|
unsigned long pfn, npages, i;
|
2020-03-27 20:00:14 +00:00
|
|
|
unsigned int required_fault;
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long cpu_flags;
|
2018-04-10 23:28:59 +00:00
|
|
|
|
2018-04-10 23:29:02 +00:00
|
|
|
npages = (end - addr) >> PAGE_SHIFT;
|
2018-04-10 23:29:06 +00:00
|
|
|
cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
|
2020-03-27 20:00:14 +00:00
|
|
|
required_fault =
|
2020-05-01 18:20:48 +00:00
|
|
|
hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
|
2020-03-27 20:00:14 +00:00
|
|
|
if (required_fault)
|
|
|
|
return hmm_vma_fault(addr, end, required_fault, walk);
|
2018-04-10 23:28:59 +00:00
|
|
|
|
2019-08-06 16:05:47 +00:00
|
|
|
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
2020-03-27 20:00:13 +00:00
|
|
|
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
|
2020-05-01 18:20:48 +00:00
|
|
|
hmm_pfns[i] = pfn | cpu_flags;
|
2018-04-10 23:28:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2019-08-06 16:05:49 +00:00
|
|
|
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
/* stub to allow the code below to compile */
|
|
|
|
int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
|
2019-08-06 16:05:49 +00:00
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
2018-04-10 23:28:59 +00:00
|
|
|
|
2020-05-01 18:20:48 +00:00
|
|
|
static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
|
|
|
|
pte_t pte)
|
2018-04-10 23:29:02 +00:00
|
|
|
{
|
2019-05-23 20:32:31 +00:00
|
|
|
if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
|
2018-04-10 23:29:02 +00:00
|
|
|
return 0;
|
2020-05-01 18:20:48 +00:00
|
|
|
return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
|
2018-04-10 23:29:02 +00:00
|
|
|
}
|
|
|
|
|
2018-04-10 23:28:59 +00:00
|
|
|
static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
|
|
|
unsigned long end, pmd_t *pmdp, pte_t *ptep,
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long *hmm_pfn)
|
2018-04-10 23:28:59 +00:00
|
|
|
{
|
|
|
|
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
2018-04-10 23:29:06 +00:00
|
|
|
struct hmm_range *range = hmm_vma_walk->range;
|
2020-03-27 20:00:14 +00:00
|
|
|
unsigned int required_fault;
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long cpu_flags;
|
mm: ptep_get() conversion
Convert all instances of direct pte_t* dereferencing to instead use
ptep_get() helper. This means that by default, the accesses change from a
C dereference to a READ_ONCE(). This is technically the correct thing to
do since where pgtables are modified by HW (for access/dirty) they are
volatile and therefore we should always ensure READ_ONCE() semantics.
But more importantly, by always using the helper, it can be overridden by
the architecture to fully encapsulate the contents of the pte. Arch code
is deliberately not converted, as the arch code knows best. It is
intended that arch code (arm64) will override the default with its own
implementation that can (e.g.) hide certain bits from the core code, or
determine young/dirty status by mixing in state from another source.
Conversion was done using Coccinelle:
----
// $ make coccicheck \
// COCCI=ptepget.cocci \
// SPFLAGS="--include-headers" \
// MODE=patch
virtual patch
@ depends on patch @
pte_t *v;
@@
- *v
+ ptep_get(v)
----
Then reviewed and hand-edited to avoid multiple unnecessary calls to
ptep_get(), instead opting to store the result of a single call in a
variable, where it is correct to do so. This aims to negate any cost of
READ_ONCE() and will benefit arch-overrides that may be more complex.
Included is a fix for an issue in an earlier version of this patch that
was pointed out by kernel test robot. The issue arose because config
MMU=n elides definition of the ptep helper functions, including
ptep_get(). HUGETLB_PAGE=n configs still define a simple
huge_ptep_clear_flush() for linking purposes, which dereferences the ptep.
So when both configs are disabled, this caused a build error because
ptep_get() is not defined. Fix by continuing to do a direct dereference
when MMU=n. This is safe because for this config the arch code cannot be
trying to virtualize the ptes because none of the ptep helpers are
defined.
Link: https://lkml.kernel.org/r/20230612151545.3317766-4-ryan.roberts@arm.com
Reported-by: kernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/oe-kbuild-all/202305120142.yXsNEo6H-lkp@intel.com/
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Dave Airlie <airlied@gmail.com>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: SeongJae Park <sj@kernel.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-06-12 15:15:45 +00:00
|
|
|
pte_t pte = ptep_get(ptep);
|
2020-05-01 18:20:48 +00:00
|
|
|
uint64_t pfn_req_flags = *hmm_pfn;
|
2018-04-10 23:28:59 +00:00
|
|
|
|
2022-05-13 03:22:52 +00:00
|
|
|
if (pte_none_mostly(pte)) {
|
2020-05-01 18:20:48 +00:00
|
|
|
required_fault =
|
|
|
|
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
|
2020-03-27 20:00:14 +00:00
|
|
|
if (required_fault)
|
2018-04-10 23:28:59 +00:00
|
|
|
goto fault;
|
2020-05-01 18:20:48 +00:00
|
|
|
*hmm_pfn = 0;
|
2018-04-10 23:28:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pte_present(pte)) {
|
|
|
|
swp_entry_t entry = pte_to_swp_entry(pte);
|
|
|
|
|
|
|
|
/*
|
2022-07-25 18:36:14 +00:00
|
|
|
* Don't fault in device private pages owned by the caller,
|
|
|
|
* just report the PFN.
|
2018-04-10 23:28:59 +00:00
|
|
|
*/
|
2022-07-25 18:36:14 +00:00
|
|
|
if (is_device_private_entry(entry) &&
|
|
|
|
pfn_swap_entry_to_page(entry)->pgmap->owner ==
|
|
|
|
range->dev_private_owner) {
|
2020-05-01 18:20:48 +00:00
|
|
|
cpu_flags = HMM_PFN_VALID;
|
2021-07-01 01:54:09 +00:00
|
|
|
if (is_writable_device_private_entry(entry))
|
2020-05-01 18:20:48 +00:00
|
|
|
cpu_flags |= HMM_PFN_WRITE;
|
mm/swap: add swp_offset_pfn() to fetch PFN from swap entry
We've got a bunch of special swap entries that stores PFN inside the swap
offset fields. To fetch the PFN, normally the user just calls
swp_offset() assuming that'll be the PFN.
Add a helper swp_offset_pfn() to fetch the PFN instead, fetching only the
max possible length of a PFN on the host, meanwhile doing proper check
with MAX_PHYSMEM_BITS to make sure the swap offsets can actually store the
PFNs properly always using the BUILD_BUG_ON() in is_pfn_swap_entry().
One reason to do so is we never tried to sanitize whether swap offset can
really fit for storing PFN. At the meantime, this patch also prepares us
with the future possibility to store more information inside the swp
offset field, so assuming "swp_offset(entry)" to be the PFN will not stand
any more very soon.
Replace many of the swp_offset() callers to use swp_offset_pfn() where
proper. Note that many of the existing users are not candidates for the
replacement, e.g.:
(1) When the swap entry is not a pfn swap entry at all, or,
(2) when we wanna keep the whole swp_offset but only change the swp type.
For the latter, it can happen when fork() triggered on a write-migration
swap entry pte, we may want to only change the migration type from
write->read but keep the rest, so it's not "fetching PFN" but "changing
swap type only". They're left aside so that when there're more
information within the swp offset they'll be carried over naturally in
those cases.
Since at it, dropping hwpoison_entry_to_pfn() because that's exactly what
the new swp_offset_pfn() is about.
Link: https://lkml.kernel.org/r/20220811161331.37055-4-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-08-11 16:13:27 +00:00
|
|
|
*hmm_pfn = swp_offset_pfn(entry) | cpu_flags;
|
2018-04-10 23:28:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-01 18:20:48 +00:00
|
|
|
required_fault =
|
|
|
|
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
|
2020-03-27 20:00:19 +00:00
|
|
|
if (!required_fault) {
|
2020-05-01 18:20:48 +00:00
|
|
|
*hmm_pfn = 0;
|
2018-04-10 23:28:59 +00:00
|
|
|
return 0;
|
2020-03-27 20:00:19 +00:00
|
|
|
}
|
2020-02-28 19:52:32 +00:00
|
|
|
|
|
|
|
if (!non_swap_entry(entry))
|
|
|
|
goto fault;
|
|
|
|
|
2022-07-25 18:36:14 +00:00
|
|
|
if (is_device_private_entry(entry))
|
|
|
|
goto fault;
|
|
|
|
|
2021-07-01 01:54:25 +00:00
|
|
|
if (is_device_exclusive_entry(entry))
|
|
|
|
goto fault;
|
|
|
|
|
2020-02-28 19:52:32 +00:00
|
|
|
if (is_migration_entry(entry)) {
|
|
|
|
pte_unmap(ptep);
|
|
|
|
hmm_vma_walk->last = addr;
|
|
|
|
migration_entry_wait(walk->mm, pmdp, addr);
|
|
|
|
return -EBUSY;
|
2018-04-10 23:28:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Report error for everything else */
|
2020-02-28 19:30:37 +00:00
|
|
|
pte_unmap(ptep);
|
2018-04-10 23:28:59 +00:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2020-02-28 19:52:32 +00:00
|
|
|
cpu_flags = pte_to_hmm_pfn_flags(range, pte);
|
2020-05-01 18:20:48 +00:00
|
|
|
required_fault =
|
|
|
|
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
|
2020-03-27 20:00:14 +00:00
|
|
|
if (required_fault)
|
2018-04-10 23:28:59 +00:00
|
|
|
goto fault;
|
|
|
|
|
2020-03-05 18:27:20 +00:00
|
|
|
/*
|
2021-09-09 01:10:02 +00:00
|
|
|
* Bypass devmap pte such as DAX page when all pfn requested
|
|
|
|
* flags(pfn_req_flags) are fulfilled.
|
2020-03-05 18:27:20 +00:00
|
|
|
* Since each architecture defines a struct page for the zero page, just
|
|
|
|
* fall through and treat it like a normal page.
|
|
|
|
*/
|
2022-01-14 22:09:31 +00:00
|
|
|
if (!vm_normal_page(walk->vma, addr, pte) &&
|
|
|
|
!pte_devmap(pte) &&
|
2021-09-09 01:10:02 +00:00
|
|
|
!is_zero_pfn(pte_pfn(pte))) {
|
2020-05-01 18:20:48 +00:00
|
|
|
if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
|
2020-02-28 19:30:37 +00:00
|
|
|
pte_unmap(ptep);
|
2019-10-23 19:55:14 +00:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
2020-05-01 18:20:48 +00:00
|
|
|
*hmm_pfn = HMM_PFN_ERROR;
|
2020-03-05 18:27:20 +00:00
|
|
|
return 0;
|
2019-05-14 00:20:21 +00:00
|
|
|
}
|
|
|
|
|
2020-05-01 18:20:48 +00:00
|
|
|
*hmm_pfn = pte_pfn(pte) | cpu_flags;
|
2018-04-10 23:28:59 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fault:
|
|
|
|
pte_unmap(ptep);
|
|
|
|
/* Fault any virtual address we were asked to fault */
|
2020-03-27 20:00:14 +00:00
|
|
|
return hmm_vma_fault(addr, end, required_fault, walk);
|
2018-04-10 23:28:59 +00:00
|
|
|
}
|
|
|
|
|
2017-09-08 23:11:31 +00:00
|
|
|
static int hmm_vma_walk_pmd(pmd_t *pmdp,
|
|
|
|
unsigned long start,
|
|
|
|
unsigned long end,
|
|
|
|
struct mm_walk *walk)
|
|
|
|
{
|
2017-09-08 23:11:35 +00:00
|
|
|
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
|
|
|
struct hmm_range *range = hmm_vma_walk->range;
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long *hmm_pfns =
|
|
|
|
&range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
|
2020-03-05 19:26:33 +00:00
|
|
|
unsigned long npages = (end - start) >> PAGE_SHIFT;
|
|
|
|
unsigned long addr = start;
|
2017-09-08 23:11:31 +00:00
|
|
|
pte_t *ptep;
|
2018-10-30 22:04:20 +00:00
|
|
|
pmd_t pmd;
|
2017-09-08 23:11:31 +00:00
|
|
|
|
|
|
|
again:
|
mm: use pmdp_get_lockless() without surplus barrier()
Patch series "mm: allow pte_offset_map[_lock]() to fail", v2.
What is it all about? Some mmap_lock avoidance i.e. latency reduction.
Initially just for the case of collapsing shmem or file pages to THPs; but
likely to be relied upon later in other contexts e.g. freeing of empty
page tables (but that's not work I'm doing). mmap_write_lock avoidance
when collapsing to anon THPs? Perhaps, but again that's not work I've
done: a quick attempt was not as easy as the shmem/file case.
I would much prefer not to have to make these small but wide-ranging
changes for such a niche case; but failed to find another way, and have
heard that shmem MADV_COLLAPSE's usefulness is being limited by that
mmap_write_lock it currently requires.
These changes (though of course not these exact patches) have been in
Google's data centre kernel for three years now: we do rely upon them.
What is this preparatory series about?
The current mmap locking will not be enough to guard against that tricky
transition between pmd entry pointing to page table, and empty pmd entry,
and pmd entry pointing to huge page: pte_offset_map() will have to
validate the pmd entry for itself, returning NULL if no page table is
there. What to do about that varies: sometimes nearby error handling
indicates just to skip it; but in many cases an ACTION_AGAIN or "goto
again" is appropriate (and if that risks an infinite loop, then there must
have been an oops, or pfn 0 mistaken for page table, before).
Given the likely extension to freeing empty page tables, I have not
limited this set of changes to a THP config; and it has been easier, and
sets a better example, if each site is given appropriate handling: even
where deeper study might prove that failure could only happen if the pmd
table were corrupted.
Several of the patches are, or include, cleanup on the way; and by the
end, pmd_trans_unstable() and suchlike are deleted: pte_offset_map() and
pte_offset_map_lock() then handle those original races and more. Most
uses of pte_lockptr() are deprecated, with pte_offset_map_nolock() taking
its place.
This patch (of 32):
Use pmdp_get_lockless() in preference to READ_ONCE(*pmdp), to get a more
reliable result with PAE (or READ_ONCE as before without PAE); and remove
the unnecessary extra barrier()s which got left behind in its callers.
HOWEVER: Note the small print in linux/pgtable.h, where it was designed
specifically for fast GUP, and depends on interrupts being disabled for
its full guarantee: most callers which have been added (here and before)
do NOT have interrupts disabled, so there is still some need for caution.
Link: https://lkml.kernel.org/r/f35279a9-9ac0-de22-d245-591afbfb4dc@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Yu Zhao <yuzhao@google.com>
Acked-by: Peter Xu <peterx@redhat.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zack Rusin <zackr@vmware.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-06-09 01:06:53 +00:00
|
|
|
pmd = pmdp_get_lockless(pmdp);
|
2018-10-30 22:04:20 +00:00
|
|
|
if (pmd_none(pmd))
|
2020-02-04 01:36:03 +00:00
|
|
|
return hmm_vma_walk_hole(start, end, -1, walk);
|
2017-09-08 23:11:31 +00:00
|
|
|
|
2018-10-30 22:04:20 +00:00
|
|
|
if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
|
2020-05-01 18:20:48 +00:00
|
|
|
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
|
2018-10-30 22:04:20 +00:00
|
|
|
hmm_vma_walk->last = addr;
|
2019-07-26 00:56:45 +00:00
|
|
|
pmd_migration_entry_wait(walk->mm, pmdp);
|
2019-05-14 00:19:58 +00:00
|
|
|
return -EBUSY;
|
2018-10-30 22:04:20 +00:00
|
|
|
}
|
2020-05-01 18:20:48 +00:00
|
|
|
return hmm_pfns_fill(start, end, range, 0);
|
2020-03-05 19:26:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!pmd_present(pmd)) {
|
2020-05-01 18:20:48 +00:00
|
|
|
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
|
2020-03-05 19:26:33 +00:00
|
|
|
return -EFAULT;
|
2019-11-04 22:21:40 +00:00
|
|
|
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
|
2020-03-05 19:26:33 +00:00
|
|
|
}
|
2017-09-08 23:11:31 +00:00
|
|
|
|
2018-10-30 22:04:20 +00:00
|
|
|
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
|
2017-09-08 23:11:31 +00:00
|
|
|
/*
|
2019-07-26 00:56:45 +00:00
|
|
|
* No need to take pmd_lock here, even if some other thread
|
2017-09-08 23:11:31 +00:00
|
|
|
* is splitting the huge pmd we will get that event through
|
|
|
|
* mmu_notifier callback.
|
|
|
|
*
|
2019-07-26 00:56:45 +00:00
|
|
|
* So just read pmd value and check again it's a transparent
|
2017-09-08 23:11:31 +00:00
|
|
|
* huge or device mapping one and compute corresponding pfn
|
|
|
|
* values.
|
|
|
|
*/
|
2020-11-26 16:20:28 +00:00
|
|
|
pmd = pmdp_get_lockless(pmdp);
|
2017-09-08 23:11:31 +00:00
|
|
|
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
|
|
|
|
goto again;
|
2017-09-08 23:11:35 +00:00
|
|
|
|
2020-05-01 18:20:48 +00:00
|
|
|
return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
|
2017-09-08 23:11:31 +00:00
|
|
|
}
|
|
|
|
|
2018-10-30 22:04:20 +00:00
|
|
|
/*
|
2019-07-26 00:56:45 +00:00
|
|
|
* We have handled all the valid cases above ie either none, migration,
|
2018-10-30 22:04:20 +00:00
|
|
|
* huge or transparent huge. At this point either it is a valid pmd
|
|
|
|
* entry pointing to pte directory or it is a bad pmd that will not
|
|
|
|
* recover.
|
|
|
|
*/
|
2020-03-05 19:26:33 +00:00
|
|
|
if (pmd_bad(pmd)) {
|
2020-05-01 18:20:48 +00:00
|
|
|
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
|
2020-03-05 19:26:33 +00:00
|
|
|
return -EFAULT;
|
2019-11-04 22:21:40 +00:00
|
|
|
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
|
2020-03-05 19:26:33 +00:00
|
|
|
}
|
2017-09-08 23:11:31 +00:00
|
|
|
|
|
|
|
ptep = pte_offset_map(pmdp, addr);
|
2023-06-09 01:23:19 +00:00
|
|
|
if (!ptep)
|
|
|
|
goto again;
|
2020-05-01 18:20:48 +00:00
|
|
|
for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
|
2018-04-10 23:28:59 +00:00
|
|
|
int r;
|
2017-09-08 23:11:35 +00:00
|
|
|
|
2020-05-01 18:20:48 +00:00
|
|
|
r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
|
2018-04-10 23:28:59 +00:00
|
|
|
if (r) {
|
2020-02-28 19:30:37 +00:00
|
|
|
/* hmm_vma_handle_pte() did pte_unmap() */
|
2018-04-10 23:28:59 +00:00
|
|
|
return r;
|
2017-09-08 23:11:35 +00:00
|
|
|
}
|
2017-09-08 23:11:31 +00:00
|
|
|
}
|
|
|
|
pte_unmap(ptep - 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-06 16:05:48 +00:00
|
|
|
#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
|
|
|
|
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
|
2020-05-01 18:20:48 +00:00
|
|
|
static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
|
|
|
|
pud_t pud)
|
2019-08-06 16:05:48 +00:00
|
|
|
{
|
|
|
|
if (!pud_present(pud))
|
|
|
|
return 0;
|
2020-07-01 22:53:49 +00:00
|
|
|
return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
|
|
|
|
HMM_PFN_VALID) |
|
|
|
|
hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
|
2019-08-06 16:05:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
|
|
|
|
struct mm_walk *walk)
|
2019-05-14 00:20:21 +00:00
|
|
|
{
|
|
|
|
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
|
|
|
struct hmm_range *range = hmm_vma_walk->range;
|
2020-02-04 01:35:45 +00:00
|
|
|
unsigned long addr = start;
|
2019-05-14 00:20:21 +00:00
|
|
|
pud_t pud;
|
2020-02-04 01:35:45 +00:00
|
|
|
spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
|
|
|
|
|
|
|
|
if (!ptl)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Normally we don't want to split the huge page */
|
|
|
|
walk->action = ACTION_CONTINUE;
|
2019-05-14 00:20:21 +00:00
|
|
|
|
|
|
|
pud = READ_ONCE(*pudp);
|
2020-02-04 01:35:45 +00:00
|
|
|
if (pud_none(pud)) {
|
2020-03-02 19:26:44 +00:00
|
|
|
spin_unlock(ptl);
|
|
|
|
return hmm_vma_walk_hole(start, end, -1, walk);
|
2020-02-04 01:35:45 +00:00
|
|
|
}
|
2019-05-14 00:20:21 +00:00
|
|
|
|
|
|
|
if (pud_huge(pud) && pud_devmap(pud)) {
|
|
|
|
unsigned long i, npages, pfn;
|
2020-03-27 20:00:14 +00:00
|
|
|
unsigned int required_fault;
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long *hmm_pfns;
|
|
|
|
unsigned long cpu_flags;
|
2019-05-14 00:20:21 +00:00
|
|
|
|
2020-02-04 01:35:45 +00:00
|
|
|
if (!pud_present(pud)) {
|
2020-03-02 19:26:44 +00:00
|
|
|
spin_unlock(ptl);
|
|
|
|
return hmm_vma_walk_hole(start, end, -1, walk);
|
2020-02-04 01:35:45 +00:00
|
|
|
}
|
2019-05-14 00:20:21 +00:00
|
|
|
|
|
|
|
i = (addr - range->start) >> PAGE_SHIFT;
|
|
|
|
npages = (end - addr) >> PAGE_SHIFT;
|
2020-05-01 18:20:48 +00:00
|
|
|
hmm_pfns = &range->hmm_pfns[i];
|
2019-05-14 00:20:21 +00:00
|
|
|
|
|
|
|
cpu_flags = pud_to_hmm_pfn_flags(range, pud);
|
2020-05-01 18:20:48 +00:00
|
|
|
required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
|
2020-03-27 20:00:14 +00:00
|
|
|
npages, cpu_flags);
|
|
|
|
if (required_fault) {
|
2020-03-02 19:26:44 +00:00
|
|
|
spin_unlock(ptl);
|
2020-03-27 20:00:14 +00:00
|
|
|
return hmm_vma_fault(addr, end, required_fault, walk);
|
2020-02-04 01:35:45 +00:00
|
|
|
}
|
2019-05-14 00:20:21 +00:00
|
|
|
|
|
|
|
pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
|
2020-03-27 20:00:13 +00:00
|
|
|
for (i = 0; i < npages; ++i, ++pfn)
|
2020-05-01 18:20:48 +00:00
|
|
|
hmm_pfns[i] = pfn | cpu_flags;
|
2020-02-04 01:35:45 +00:00
|
|
|
goto out_unlock;
|
2019-05-14 00:20:21 +00:00
|
|
|
}
|
|
|
|
|
2020-02-04 01:35:45 +00:00
|
|
|
/* Ask for the PUD to be split */
|
|
|
|
walk->action = ACTION_SUBTREE;
|
2019-05-14 00:20:21 +00:00
|
|
|
|
2020-02-04 01:35:45 +00:00
|
|
|
out_unlock:
|
|
|
|
spin_unlock(ptl);
|
2022-03-22 21:48:28 +00:00
|
|
|
return 0;
|
2019-05-14 00:20:21 +00:00
|
|
|
}
|
2019-08-06 16:05:48 +00:00
|
|
|
#else
|
|
|
|
#define hmm_vma_walk_pud NULL
|
|
|
|
#endif
|
2019-05-14 00:20:21 +00:00
|
|
|
|
2019-08-06 16:05:50 +00:00
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
2019-05-14 00:20:18 +00:00
|
|
|
static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
|
|
|
unsigned long start, unsigned long end,
|
|
|
|
struct mm_walk *walk)
|
|
|
|
{
|
2019-08-06 16:05:46 +00:00
|
|
|
unsigned long addr = start, i, pfn;
|
2019-05-14 00:20:18 +00:00
|
|
|
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
|
|
|
struct hmm_range *range = hmm_vma_walk->range;
|
|
|
|
struct vm_area_struct *vma = walk->vma;
|
2020-03-27 20:00:14 +00:00
|
|
|
unsigned int required_fault;
|
2020-05-01 18:20:48 +00:00
|
|
|
unsigned long pfn_req_flags;
|
|
|
|
unsigned long cpu_flags;
|
2019-05-14 00:20:18 +00:00
|
|
|
spinlock_t *ptl;
|
|
|
|
pte_t entry;
|
|
|
|
|
2019-07-26 00:56:45 +00:00
|
|
|
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
|
2019-05-14 00:20:18 +00:00
|
|
|
entry = huge_ptep_get(pte);
|
|
|
|
|
2019-08-06 16:05:45 +00:00
|
|
|
i = (start - range->start) >> PAGE_SHIFT;
|
2020-05-01 18:20:48 +00:00
|
|
|
pfn_req_flags = range->hmm_pfns[i];
|
2020-07-01 22:53:49 +00:00
|
|
|
cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
|
|
|
|
hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
|
2020-05-01 18:20:48 +00:00
|
|
|
required_fault =
|
|
|
|
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
|
2020-03-27 20:00:14 +00:00
|
|
|
if (required_fault) {
|
2022-12-16 15:52:26 +00:00
|
|
|
int ret;
|
|
|
|
|
2020-03-16 13:53:08 +00:00
|
|
|
spin_unlock(ptl);
|
2022-12-16 15:52:26 +00:00
|
|
|
hugetlb_vma_unlock_read(vma);
|
|
|
|
/*
|
|
|
|
* Avoid deadlock: drop the vma lock before calling
|
|
|
|
* hmm_vma_fault(), which will itself potentially take and
|
|
|
|
* drop the vma lock. This is also correct from a
|
|
|
|
* protection point of view, because there is no further
|
|
|
|
* use here of either pte or ptl after dropping the vma
|
|
|
|
* lock.
|
|
|
|
*/
|
|
|
|
ret = hmm_vma_fault(addr, end, required_fault, walk);
|
|
|
|
hugetlb_vma_lock_read(vma);
|
|
|
|
return ret;
|
2019-05-14 00:20:18 +00:00
|
|
|
}
|
|
|
|
|
2019-08-06 16:05:46 +00:00
|
|
|
pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
|
2019-08-06 16:05:45 +00:00
|
|
|
for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
|
2020-05-01 18:20:48 +00:00
|
|
|
range->hmm_pfns[i] = pfn | cpu_flags;
|
|
|
|
|
2019-05-14 00:20:18 +00:00
|
|
|
spin_unlock(ptl);
|
2020-03-16 13:53:08 +00:00
|
|
|
return 0;
|
2019-05-14 00:20:18 +00:00
|
|
|
}
|
2019-08-06 16:05:50 +00:00
|
|
|
#else
|
|
|
|
#define hmm_vma_walk_hugetlb_entry NULL
|
|
|
|
#endif /* CONFIG_HUGETLB_PAGE */
|
2019-05-14 00:20:18 +00:00
|
|
|
|
2019-11-04 22:21:40 +00:00
|
|
|
static int hmm_vma_walk_test(unsigned long start, unsigned long end,
|
|
|
|
struct mm_walk *walk)
|
2018-04-10 23:28:54 +00:00
|
|
|
{
|
2019-11-04 22:21:40 +00:00
|
|
|
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
|
|
|
struct hmm_range *range = hmm_vma_walk->range;
|
|
|
|
struct vm_area_struct *vma = walk->vma;
|
|
|
|
|
2022-01-14 22:09:31 +00:00
|
|
|
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
|
2020-03-27 20:00:14 +00:00
|
|
|
vma->vm_flags & VM_READ)
|
|
|
|
return 0;
|
|
|
|
|
2019-11-04 22:21:40 +00:00
|
|
|
/*
|
2020-03-27 20:00:14 +00:00
|
|
|
* vma ranges that don't have struct page backing them or map I/O
|
|
|
|
* devices directly cannot be handled by hmm_range_fault().
|
2020-03-05 16:00:22 +00:00
|
|
|
*
|
2019-11-04 22:21:40 +00:00
|
|
|
* If the vma does not allow read access, then assume that it does not
|
2020-03-05 16:00:22 +00:00
|
|
|
* allow write access either. HMM does not support architectures that
|
|
|
|
* allow write without read.
|
2020-03-27 20:00:14 +00:00
|
|
|
*
|
|
|
|
* If a fault is requested for an unsupported range then it is a hard
|
|
|
|
* failure.
|
2019-11-04 22:21:40 +00:00
|
|
|
*/
|
2020-03-27 20:00:14 +00:00
|
|
|
if (hmm_range_need_fault(hmm_vma_walk,
|
2020-05-01 18:20:48 +00:00
|
|
|
range->hmm_pfns +
|
2020-03-27 20:00:14 +00:00
|
|
|
((start - range->start) >> PAGE_SHIFT),
|
|
|
|
(end - start) >> PAGE_SHIFT, 0))
|
|
|
|
return -EFAULT;
|
2019-11-04 22:21:40 +00:00
|
|
|
|
2020-03-27 20:00:14 +00:00
|
|
|
hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
|
2019-11-04 22:21:40 +00:00
|
|
|
|
2020-03-27 20:00:14 +00:00
|
|
|
/* Skip this vma and continue processing the next vma. */
|
|
|
|
return 1;
|
2018-04-10 23:28:54 +00:00
|
|
|
}
|
|
|
|
|
2019-08-28 14:19:54 +00:00
|
|
|
static const struct mm_walk_ops hmm_walk_ops = {
|
|
|
|
.pud_entry = hmm_vma_walk_pud,
|
|
|
|
.pmd_entry = hmm_vma_walk_pmd,
|
|
|
|
.pte_hole = hmm_vma_walk_hole,
|
|
|
|
.hugetlb_entry = hmm_vma_walk_hugetlb_entry,
|
2019-11-04 22:21:40 +00:00
|
|
|
.test_walk = hmm_vma_walk_test,
|
2019-08-28 14:19:54 +00:00
|
|
|
};
|
|
|
|
|
2019-07-26 00:56:46 +00:00
|
|
|
/**
|
|
|
|
* hmm_range_fault - try to fault some address in a virtual address range
|
2020-03-27 20:00:15 +00:00
|
|
|
* @range: argument structure
|
2019-07-26 00:56:46 +00:00
|
|
|
*
|
2020-05-01 18:20:45 +00:00
|
|
|
* Returns 0 on success or one of the following error codes:
|
2019-05-14 00:19:58 +00:00
|
|
|
*
|
2019-07-26 00:56:46 +00:00
|
|
|
* -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
|
|
|
|
* (e.g., device file vma).
|
|
|
|
* -ENOMEM: Out of memory.
|
|
|
|
* -EPERM: Invalid permission (e.g., asking for write and range is read
|
|
|
|
* only).
|
|
|
|
* -EBUSY: The range has been invalidated and the caller needs to wait for
|
|
|
|
* the invalidation to finish.
|
2020-03-27 20:00:15 +00:00
|
|
|
* -EFAULT: A page was requested to be valid and could not be made valid
|
|
|
|
* ie it has no backing VMA or it is illegal to access
|
2017-09-08 23:11:35 +00:00
|
|
|
*
|
2020-03-27 20:00:15 +00:00
|
|
|
* This is similar to get_user_pages(), except that it can read the page tables
|
|
|
|
* without mutating them (ie causing faults).
|
2017-09-08 23:11:35 +00:00
|
|
|
*/
|
2020-05-01 18:20:45 +00:00
|
|
|
int hmm_range_fault(struct hmm_range *range)
|
2017-09-08 23:11:35 +00:00
|
|
|
{
|
2019-11-04 22:21:40 +00:00
|
|
|
struct hmm_vma_walk hmm_vma_walk = {
|
|
|
|
.range = range,
|
|
|
|
.last = range->start,
|
|
|
|
};
|
2019-11-12 20:22:30 +00:00
|
|
|
struct mm_struct *mm = range->notifier->mm;
|
2017-09-08 23:11:35 +00:00
|
|
|
int ret;
|
|
|
|
|
2020-06-09 04:33:44 +00:00
|
|
|
mmap_assert_locked(mm);
|
2019-05-14 00:19:48 +00:00
|
|
|
|
2019-05-14 00:20:01 +00:00
|
|
|
do {
|
|
|
|
/* If range is no longer valid force retry. */
|
2019-11-12 20:22:30 +00:00
|
|
|
if (mmu_interval_check_retry(range->notifier,
|
|
|
|
range->notifier_seq))
|
2019-07-24 06:52:52 +00:00
|
|
|
return -EBUSY;
|
2019-11-04 22:21:40 +00:00
|
|
|
ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
|
|
|
|
&hmm_walk_ops, &hmm_vma_walk);
|
2020-05-01 18:20:45 +00:00
|
|
|
/*
|
|
|
|
* When -EBUSY is returned the loop restarts with
|
|
|
|
* hmm_vma_walk.last set to an address that has not been stored
|
|
|
|
* in pfns. All entries < last in the pfn array are set to their
|
|
|
|
* output, and all >= are still at their input values.
|
|
|
|
*/
|
2019-11-04 22:21:40 +00:00
|
|
|
} while (ret == -EBUSY);
|
2020-05-01 18:20:45 +00:00
|
|
|
return ret;
|
2017-09-08 23:11:35 +00:00
|
|
|
}
|
2019-05-14 00:19:58 +00:00
|
|
|
EXPORT_SYMBOL(hmm_range_fault);
|