2019-06-04 08:11:33 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2013-01-18 09:42:20 +00:00
|
|
|
/* Page Fault Handling for ARC (TLB Miss / ProtV)
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/interrupt.h>
|
2017-02-08 17:51:30 +00:00
|
|
|
#include <linux/sched/signal.h>
|
2013-01-18 09:42:20 +00:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/kdebug.h>
|
2014-10-02 07:00:42 +00:00
|
|
|
#include <linux/perf_event.h>
|
2018-08-17 22:44:47 +00:00
|
|
|
#include <linux/mm_types.h>
|
2013-05-14 07:58:17 +00:00
|
|
|
#include <asm/mmu.h>
|
2013-01-18 09:42:20 +00:00
|
|
|
|
2015-03-05 11:36:31 +00:00
|
|
|
/*
|
|
|
|
* kernel virtual address is required to implement vmalloc/pkmap/fixmap
|
|
|
|
* Refer to asm/processor.h for System Memory Map
|
|
|
|
*
|
|
|
|
* It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
|
|
|
|
* from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
|
|
|
|
*/
|
|
|
|
noinline static int handle_kernel_vaddr_fault(unsigned long address)
|
2013-01-18 09:42:20 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Synchronize this task's top level page-table
|
|
|
|
* with the 'reference' page table.
|
|
|
|
*/
|
|
|
|
pgd_t *pgd, *pgd_k;
|
2019-12-01 01:51:06 +00:00
|
|
|
p4d_t *p4d, *p4d_k;
|
2013-01-18 09:42:20 +00:00
|
|
|
pud_t *pud, *pud_k;
|
|
|
|
pmd_t *pmd, *pmd_k;
|
|
|
|
|
2013-11-02 12:17:49 +00:00
|
|
|
pgd = pgd_offset_fast(current->active_mm, address);
|
2013-01-18 09:42:20 +00:00
|
|
|
pgd_k = pgd_offset_k(address);
|
|
|
|
|
|
|
|
if (!pgd_present(*pgd_k))
|
|
|
|
goto bad_area;
|
|
|
|
|
2019-12-01 01:51:06 +00:00
|
|
|
p4d = p4d_offset(pgd, address);
|
|
|
|
p4d_k = p4d_offset(pgd_k, address);
|
|
|
|
if (!p4d_present(*p4d_k))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
pud = pud_offset(p4d, address);
|
|
|
|
pud_k = pud_offset(p4d_k, address);
|
2013-01-18 09:42:20 +00:00
|
|
|
if (!pud_present(*pud_k))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
|
pmd_k = pmd_offset(pud_k, address);
|
|
|
|
if (!pmd_present(*pmd_k))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
set_pmd(pmd, *pmd_k);
|
|
|
|
|
|
|
|
/* XXX: create the TLB entry here */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad_area:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:55:40 +00:00
|
|
|
void do_page_fault(unsigned long address, struct pt_regs *regs)
|
2013-01-18 09:42:20 +00:00
|
|
|
{
|
|
|
|
struct vm_area_struct *vma = NULL;
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
struct mm_struct *mm = tsk->mm;
|
2019-05-14 22:55:31 +00:00
|
|
|
int sig, si_code = SEGV_MAPERR;
|
2019-05-14 21:25:54 +00:00
|
|
|
unsigned int write = 0, exec = 0, mask;
|
2019-05-14 22:55:31 +00:00
|
|
|
vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
|
2019-05-14 21:35:45 +00:00
|
|
|
unsigned int flags; /* handle_mm_fault() input */
|
2013-01-18 09:42:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE! We MUST NOT take any locks for this case. We may
|
|
|
|
* be in an interrupt or a critical region, and should
|
|
|
|
* only copy the information from the master page table,
|
|
|
|
* nothing more.
|
|
|
|
*/
|
2019-05-13 17:28:00 +00:00
|
|
|
if (address >= VMALLOC_START && !user_mode(regs)) {
|
2019-05-14 23:07:24 +00:00
|
|
|
if (unlikely(handle_kernel_vaddr_fault(address)))
|
2019-05-13 17:28:00 +00:00
|
|
|
goto no_context;
|
2013-01-18 09:42:20 +00:00
|
|
|
else
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're in an interrupt or have no user
|
|
|
|
* context, we must not take the fault..
|
|
|
|
*/
|
2015-05-11 15:52:11 +00:00
|
|
|
if (faulthandler_disabled() || !mm)
|
2013-01-18 09:42:20 +00:00
|
|
|
goto no_context;
|
|
|
|
|
2019-05-14 21:25:54 +00:00
|
|
|
if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
|
|
|
|
write = 1;
|
|
|
|
else if ((regs->ecr_vec == ECR_V_PROTV) &&
|
|
|
|
(regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
|
|
|
|
exec = 1;
|
|
|
|
|
2020-04-02 04:08:37 +00:00
|
|
|
flags = FAULT_FLAG_DEFAULT;
|
2013-09-12 22:13:39 +00:00
|
|
|
if (user_mode(regs))
|
|
|
|
flags |= FAULT_FLAG_USER;
|
2019-05-14 21:25:54 +00:00
|
|
|
if (write)
|
|
|
|
flags |= FAULT_FLAG_WRITE;
|
|
|
|
|
2013-01-18 09:42:20 +00:00
|
|
|
retry:
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_lock(mm);
|
2019-05-14 21:22:47 +00:00
|
|
|
|
2013-01-18 09:42:20 +00:00
|
|
|
vma = find_vma(mm, address);
|
|
|
|
if (!vma)
|
|
|
|
goto bad_area;
|
2019-05-14 21:22:47 +00:00
|
|
|
if (unlikely(address < vma->vm_start)) {
|
|
|
|
if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
|
|
|
|
goto bad_area;
|
|
|
|
}
|
2013-01-18 09:42:20 +00:00
|
|
|
|
|
|
|
/*
|
2019-05-14 21:25:54 +00:00
|
|
|
* vm_area is good, now check permissions for this memory access
|
2013-01-18 09:42:20 +00:00
|
|
|
*/
|
2019-05-14 21:25:54 +00:00
|
|
|
mask = VM_READ;
|
|
|
|
if (write)
|
|
|
|
mask = VM_WRITE;
|
|
|
|
if (exec)
|
|
|
|
mask = VM_EXEC;
|
|
|
|
|
|
|
|
if (!(vma->vm_flags & mask)) {
|
|
|
|
si_code = SEGV_ACCERR;
|
2013-01-18 09:42:20 +00:00
|
|
|
goto bad_area;
|
|
|
|
}
|
|
|
|
|
mm: do page fault accounting in handle_mm_fault
Patch series "mm: Page fault accounting cleanups", v5.
This is v5 of the pf accounting cleanup series. It originates from Gerald
Schaefer's report on an issue a week ago regarding to incorrect page fault
accountings for retried page fault after commit 4064b9827063 ("mm: allow
VM_FAULT_RETRY for multiple times"):
https://lore.kernel.org/lkml/20200610174811.44b94525@thinkpad/
What this series did:
- Correct page fault accounting: we do accounting for a page fault
(no matter whether it's from #PF handling, or gup, or anything else)
only with the one that completed the fault. For example, page fault
retries should not be counted in page fault counters. Same to the
perf events.
- Unify definition of PERF_COUNT_SW_PAGE_FAULTS: currently this perf
event is used in an adhoc way across different archs.
Case (1): for many archs it's done at the entry of a page fault
handler, so that it will also cover e.g. errornous faults.
Case (2): for some other archs, it is only accounted when the page
fault is resolved successfully.
Case (3): there're still quite some archs that have not enabled
this perf event.
Since this series will touch merely all the archs, we unify this
perf event to always follow case (1), which is the one that makes most
sense. And since we moved the accounting into handle_mm_fault, the
other two MAJ/MIN perf events are well taken care of naturally.
- Unify definition of "major faults": the definition of "major
fault" is slightly changed when used in accounting (not
VM_FAULT_MAJOR). More information in patch 1.
- Always account the page fault onto the one that triggered the page
fault. This does not matter much for #PF handlings, but mostly for
gup. More information on this in patch 25.
Patchset layout:
Patch 1: Introduced the accounting in handle_mm_fault(), not enabled.
Patch 2-23: Enable the new accounting for arch #PF handlers one by one.
Patch 24: Enable the new accounting for the rest outliers (gup, iommu, etc.)
Patch 25: Cleanup GUP task_struct pointer since it's not needed any more
This patch (of 25):
This is a preparation patch to move page fault accountings into the
general code in handle_mm_fault(). This includes both the per task
flt_maj/flt_min counters, and the major/minor page fault perf events. To
do this, the pt_regs pointer is passed into handle_mm_fault().
PERF_COUNT_SW_PAGE_FAULTS should still be kept in per-arch page fault
handlers.
So far, all the pt_regs pointer that passed into handle_mm_fault() is
NULL, which means this patch should have no intented functional change.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200707225021.200906-1-peterx@redhat.com
Link: http://lkml.kernel.org/r/20200707225021.200906-2-peterx@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-08-12 01:37:44 +00:00
|
|
|
fault = handle_mm_fault(vma, address, flags, NULL);
|
2013-01-18 09:42:20 +00:00
|
|
|
|
2020-04-02 04:08:14 +00:00
|
|
|
/* Quick path to respond to signals */
|
|
|
|
if (fault_signal_pending(fault, regs)) {
|
|
|
|
if (!user_mode(regs))
|
|
|
|
goto no_context;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-14 21:35:45 +00:00
|
|
|
/*
|
2020-06-09 04:33:54 +00:00
|
|
|
* Fault retry nuances, mmap_lock already relinquished by core mm
|
2019-05-14 21:35:45 +00:00
|
|
|
*/
|
2020-04-02 04:08:14 +00:00
|
|
|
if (unlikely((fault & VM_FAULT_RETRY) &&
|
|
|
|
(flags & FAULT_FLAG_ALLOW_RETRY))) {
|
|
|
|
flags |= FAULT_FLAG_TRIED;
|
|
|
|
goto retry;
|
2013-01-18 09:42:20 +00:00
|
|
|
}
|
|
|
|
|
2019-05-14 23:28:30 +00:00
|
|
|
bad_area:
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_unlock(mm);
|
2013-01-18 09:42:20 +00:00
|
|
|
|
|
|
|
/*
|
2019-05-14 21:35:45 +00:00
|
|
|
* Major/minor page fault accounting
|
|
|
|
* (in case of retry we only land here once)
|
2013-01-18 09:42:20 +00:00
|
|
|
*/
|
2014-10-02 07:00:42 +00:00
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
2013-01-18 09:42:20 +00:00
|
|
|
|
|
|
|
if (likely(!(fault & VM_FAULT_ERROR))) {
|
2019-05-14 21:35:45 +00:00
|
|
|
if (fault & VM_FAULT_MAJOR) {
|
|
|
|
tsk->maj_flt++;
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
|
|
|
regs, address);
|
|
|
|
} else {
|
|
|
|
tsk->min_flt++;
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
|
|
|
regs, address);
|
2013-01-18 09:42:20 +00:00
|
|
|
}
|
|
|
|
|
2019-05-14 21:35:45 +00:00
|
|
|
/* Normal return path: fault Handled Gracefully */
|
2013-01-18 09:42:20 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-14 22:10:45 +00:00
|
|
|
if (!user_mode(regs))
|
|
|
|
goto no_context;
|
2013-01-18 09:42:20 +00:00
|
|
|
|
2019-05-14 22:55:31 +00:00
|
|
|
if (fault & VM_FAULT_OOM) {
|
2013-07-08 22:59:50 +00:00
|
|
|
pagefault_out_of_memory();
|
|
|
|
return;
|
|
|
|
}
|
2013-01-18 09:42:20 +00:00
|
|
|
|
2019-05-14 22:55:31 +00:00
|
|
|
if (fault & VM_FAULT_SIGBUS) {
|
|
|
|
sig = SIGBUS;
|
|
|
|
si_code = BUS_ADRERR;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
sig = SIGSEGV;
|
|
|
|
}
|
2013-01-18 09:42:20 +00:00
|
|
|
|
|
|
|
tsk->thread.fault_address = address;
|
2019-07-16 22:07:51 +00:00
|
|
|
force_sig_fault(sig, si_code, (void __user *)address);
|
2019-05-14 21:45:44 +00:00
|
|
|
return;
|
2013-01-18 09:42:20 +00:00
|
|
|
|
2019-05-14 21:45:44 +00:00
|
|
|
no_context:
|
|
|
|
if (fixup_exception(regs))
|
|
|
|
return;
|
2013-01-18 09:42:20 +00:00
|
|
|
|
2019-05-14 21:45:44 +00:00
|
|
|
die("Oops", regs, address);
|
2013-01-18 09:42:20 +00:00
|
|
|
}
|