2005-06-24 05:01:24 +00:00
|
|
|
// TODO VM_EXEC flag work-around, cache aliasing
|
|
|
|
/*
|
|
|
|
* arch/xtensa/mm/fault.c
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
2012-10-14 23:55:36 +00:00
|
|
|
* Copyright (C) 2001 - 2010 Tensilica Inc.
|
2005-06-24 05:01:24 +00:00
|
|
|
*
|
|
|
|
* Chris Zankel <chris@zankel.net>
|
|
|
|
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
2016-07-23 18:01:45 +00:00
|
|
|
#include <linux/extable.h>
|
2009-03-10 19:55:49 +00:00
|
|
|
#include <linux/hardirq.h>
|
2015-06-04 10:42:22 +00:00
|
|
|
#include <linux/perf_event.h>
|
2015-05-11 15:52:11 +00:00
|
|
|
#include <linux/uaccess.h>
|
2005-06-24 05:01:24 +00:00
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/hardirq.h>
|
|
|
|
|
2013-10-16 22:42:26 +00:00
|
|
|
DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
|
2005-06-24 05:01:24 +00:00
|
|
|
void bad_page_fault(struct pt_regs*, unsigned long, int);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This routine handles page faults. It determines the address,
|
|
|
|
* and the problem, and then passes it off to one of the appropriate
|
|
|
|
* routines.
|
|
|
|
*
|
|
|
|
* Note: does not handle Miss and MultiHit.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void do_page_fault(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct vm_area_struct * vma;
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
unsigned int exccause = regs->exccause;
|
|
|
|
unsigned int address = regs->excvaddr;
|
2018-04-20 14:20:07 +00:00
|
|
|
int code;
|
2005-06-24 05:01:24 +00:00
|
|
|
|
|
|
|
int is_write, is_exec;
|
2018-08-17 22:44:47 +00:00
|
|
|
vm_fault_t fault;
|
2020-04-02 04:08:37 +00:00
|
|
|
unsigned int flags = FAULT_FLAG_DEFAULT;
|
2005-06-24 05:01:24 +00:00
|
|
|
|
2018-04-20 14:20:07 +00:00
|
|
|
code = SEGV_MAPERR;
|
2005-06-24 05:01:24 +00:00
|
|
|
|
|
|
|
/* We fault-in kernel-space virtual memory on-demand. The
|
|
|
|
* 'reference' page table is init_mm.pgd.
|
|
|
|
*/
|
|
|
|
if (address >= TASK_SIZE && !user_mode(regs))
|
|
|
|
goto vmalloc_fault;
|
|
|
|
|
|
|
|
/* If we're in an interrupt or have no user
|
|
|
|
* context, we must not take the fault..
|
|
|
|
*/
|
2015-05-11 15:52:11 +00:00
|
|
|
if (faulthandler_disabled() || !mm) {
|
2005-06-24 05:01:24 +00:00
|
|
|
bad_page_fault(regs, address, SIGSEGV);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:48 +00:00
|
|
|
is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
|
|
|
|
is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
|
|
|
|
exccause == EXCCAUSE_ITLB_MISS ||
|
|
|
|
exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
|
2005-06-24 05:01:24 +00:00
|
|
|
|
2017-12-15 20:00:30 +00:00
|
|
|
pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
|
|
|
|
current->comm, current->pid,
|
|
|
|
address, exccause, regs->pc,
|
|
|
|
is_write ? "w" : "", is_exec ? "x" : "");
|
2005-06-24 05:01:24 +00:00
|
|
|
|
2013-09-12 22:13:39 +00:00
|
|
|
if (user_mode(regs))
|
|
|
|
flags |= FAULT_FLAG_USER;
|
2020-08-12 01:38:53 +00:00
|
|
|
|
|
|
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
|
|
|
|
2012-07-30 21:39:21 +00:00
|
|
|
retry:
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_lock(mm);
|
2005-06-24 05:01:24 +00:00
|
|
|
vma = find_vma(mm, address);
|
|
|
|
|
|
|
|
if (!vma)
|
|
|
|
goto bad_area;
|
|
|
|
if (vma->vm_start <= address)
|
|
|
|
goto good_area;
|
|
|
|
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
|
|
goto bad_area;
|
|
|
|
if (expand_stack(vma, address))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
/* Ok, we have a good vm_area for this memory access, so
|
|
|
|
* we can handle it..
|
|
|
|
*/
|
|
|
|
|
|
|
|
good_area:
|
2018-04-20 14:20:07 +00:00
|
|
|
code = SEGV_ACCERR;
|
2005-06-24 05:01:24 +00:00
|
|
|
|
|
|
|
if (is_write) {
|
|
|
|
if (!(vma->vm_flags & VM_WRITE))
|
|
|
|
goto bad_area;
|
2012-07-30 21:39:21 +00:00
|
|
|
flags |= FAULT_FLAG_WRITE;
|
2005-06-24 05:01:24 +00:00
|
|
|
} else if (is_exec) {
|
|
|
|
if (!(vma->vm_flags & VM_EXEC))
|
|
|
|
goto bad_area;
|
|
|
|
} else /* Allow read even from write-only pages. */
|
|
|
|
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
|
|
|
|
goto bad_area;
|
|
|
|
|
|
|
|
/* If for any reason at all we couldn't handle the fault,
|
|
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
|
|
* the fault.
|
|
|
|
*/
|
2020-08-12 01:38:53 +00:00
|
|
|
fault = handle_mm_fault(vma, address, flags, regs);
|
2012-07-30 21:39:21 +00:00
|
|
|
|
2021-02-07 12:57:58 +00:00
|
|
|
if (fault_signal_pending(fault, regs)) {
|
|
|
|
if (!user_mode(regs))
|
|
|
|
goto bad_page_fault;
|
2012-07-30 21:39:21 +00:00
|
|
|
return;
|
2021-02-07 12:57:58 +00:00
|
|
|
}
|
2012-07-30 21:39:21 +00:00
|
|
|
|
2007-07-19 08:47:05 +00:00
|
|
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
|
|
if (fault & VM_FAULT_OOM)
|
|
|
|
goto out_of_memory;
|
vm: add VM_FAULT_SIGSEGV handling support
The core VM already knows about VM_FAULT_SIGBUS, but cannot return a
"you should SIGSEGV" error, because the SIGSEGV case was generally
handled by the caller - usually the architecture fault handler.
That results in lots of duplication - all the architecture fault
handlers end up doing very similar "look up vma, check permissions, do
retries etc" - but it generally works. However, there are cases where
the VM actually wants to SIGSEGV, and applications _expect_ SIGSEGV.
In particular, when accessing the stack guard page, libsigsegv expects a
SIGSEGV. And it usually got one, because the stack growth is handled by
that duplicated architecture fault handler.
However, when the generic VM layer started propagating the error return
from the stack expansion in commit fee7e49d4514 ("mm: propagate error
from stack expansion even for guard page"), that now exposed the
existing VM_FAULT_SIGBUS result to user space. And user space really
expected SIGSEGV, not SIGBUS.
To fix that case, we need to add a VM_FAULT_SIGSEGV, and teach all those
duplicate architecture fault handlers about it. They all already have
the code to handle SIGSEGV, so it's about just tying that new return
value to the existing code, but it's all a bit annoying.
This is the mindless minimal patch to do this. A more extensive patch
would be to try to gather up the mostly shared fault handling logic into
one generic helper routine, and long-term we really should do that
cleanup.
Just from this patch, you can generally see that most architectures just
copied (directly or indirectly) the old x86 way of doing things, but in
the meantime that original x86 model has been improved to hold the VM
semaphore for shorter times etc and to handle VM_FAULT_RETRY and other
"newer" things, so it would be a good idea to bring all those
improvements to the generic case and teach other architectures about
them too.
Reported-and-tested-by: Takashi Iwai <tiwai@suse.de>
Tested-by: Jan Engelhardt <jengelh@inai.de>
Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> # "s390 still compiles and boots"
Cc: linux-arch@vger.kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-01-29 18:51:32 +00:00
|
|
|
else if (fault & VM_FAULT_SIGSEGV)
|
|
|
|
goto bad_area;
|
2007-07-19 08:47:05 +00:00
|
|
|
else if (fault & VM_FAULT_SIGBUS)
|
|
|
|
goto do_sigbus;
|
2005-06-24 05:01:24 +00:00
|
|
|
BUG();
|
|
|
|
}
|
2012-07-30 21:39:21 +00:00
|
|
|
|
2022-01-14 22:05:51 +00:00
|
|
|
if (fault & VM_FAULT_RETRY) {
|
|
|
|
flags |= FAULT_FLAG_TRIED;
|
2012-07-30 21:39:21 +00:00
|
|
|
|
2022-01-14 22:05:51 +00:00
|
|
|
/* No need to mmap_read_unlock(mm) as we would
|
|
|
|
* have already released it in __lock_page_or_retry
|
|
|
|
* in mm/filemap.c.
|
|
|
|
*/
|
|
|
|
|
|
|
|
goto retry;
|
2012-07-30 21:39:21 +00:00
|
|
|
}
|
2005-06-24 05:01:24 +00:00
|
|
|
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_unlock(mm);
|
2005-06-24 05:01:24 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Something tried to access memory that isn't in our memory map..
|
|
|
|
* Fix it, but check if it's kernel or user first..
|
|
|
|
*/
|
|
|
|
bad_area:
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_unlock(mm);
|
2005-06-24 05:01:24 +00:00
|
|
|
if (user_mode(regs)) {
|
|
|
|
current->thread.bad_vaddr = address;
|
|
|
|
current->thread.error_code = is_write;
|
2019-05-23 16:04:24 +00:00
|
|
|
force_sig_fault(SIGSEGV, code, (void *) address);
|
2005-06-24 05:01:24 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
bad_page_fault(regs, address, SIGSEGV);
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
|
|
/* We ran out of memory, or some other thing happened to us that made
|
|
|
|
* us unable to handle the page fault gracefully.
|
|
|
|
*/
|
|
|
|
out_of_memory:
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_unlock(mm);
|
2010-06-04 21:14:51 +00:00
|
|
|
if (!user_mode(regs))
|
|
|
|
bad_page_fault(regs, address, SIGKILL);
|
|
|
|
else
|
|
|
|
pagefault_out_of_memory();
|
2005-06-24 05:01:24 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
do_sigbus:
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_unlock(mm);
|
2005-06-24 05:01:24 +00:00
|
|
|
|
|
|
|
/* Send a sigbus, regardless of whether we were in kernel
|
|
|
|
* or user mode.
|
|
|
|
*/
|
|
|
|
current->thread.bad_vaddr = address;
|
2019-05-23 16:04:24 +00:00
|
|
|
force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
|
2005-06-24 05:01:24 +00:00
|
|
|
|
|
|
|
/* Kernel mode? Handle exceptions or die */
|
|
|
|
if (!user_mode(regs))
|
|
|
|
bad_page_fault(regs, address, SIGBUS);
|
2012-10-14 23:55:36 +00:00
|
|
|
return;
|
2005-06-24 05:01:24 +00:00
|
|
|
|
|
|
|
vmalloc_fault:
|
|
|
|
{
|
|
|
|
/* Synchronize this task's top level page-table
|
|
|
|
* with the 'reference' page table.
|
|
|
|
*/
|
|
|
|
struct mm_struct *act_mm = current->active_mm;
|
|
|
|
int index = pgd_index(address);
|
|
|
|
pgd_t *pgd, *pgd_k;
|
2019-11-05 14:33:20 +00:00
|
|
|
p4d_t *p4d, *p4d_k;
|
2019-11-05 14:33:19 +00:00
|
|
|
pud_t *pud, *pud_k;
|
2005-06-24 05:01:24 +00:00
|
|
|
pmd_t *pmd, *pmd_k;
|
|
|
|
pte_t *pte_k;
|
|
|
|
|
|
|
|
if (act_mm == NULL)
|
|
|
|
goto bad_page_fault;
|
|
|
|
|
|
|
|
pgd = act_mm->pgd + index;
|
|
|
|
pgd_k = init_mm.pgd + index;
|
|
|
|
|
|
|
|
if (!pgd_present(*pgd_k))
|
|
|
|
goto bad_page_fault;
|
|
|
|
|
|
|
|
pgd_val(*pgd) = pgd_val(*pgd_k);
|
|
|
|
|
2019-11-05 14:33:20 +00:00
|
|
|
p4d = p4d_offset(pgd, address);
|
|
|
|
p4d_k = p4d_offset(pgd_k, address);
|
|
|
|
if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
|
|
|
|
goto bad_page_fault;
|
|
|
|
|
|
|
|
pud = pud_offset(p4d, address);
|
|
|
|
pud_k = pud_offset(p4d_k, address);
|
2019-11-05 14:33:19 +00:00
|
|
|
if (!pud_present(*pud) || !pud_present(*pud_k))
|
|
|
|
goto bad_page_fault;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
|
pmd_k = pmd_offset(pud_k, address);
|
2005-06-24 05:01:24 +00:00
|
|
|
if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
|
|
|
|
goto bad_page_fault;
|
|
|
|
|
|
|
|
pmd_val(*pmd) = pmd_val(*pmd_k);
|
|
|
|
pte_k = pte_offset_kernel(pmd_k, address);
|
|
|
|
|
|
|
|
if (!pte_present(*pte_k))
|
|
|
|
goto bad_page_fault;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
bad_page_fault:
|
|
|
|
bad_page_fault(regs, address, SIGKILL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
|
|
|
|
{
|
2021-10-20 17:43:48 +00:00
|
|
|
extern void __noreturn die(const char*, struct pt_regs*, long);
|
2005-06-24 05:01:24 +00:00
|
|
|
const struct exception_table_entry *entry;
|
|
|
|
|
|
|
|
/* Are we prepared to handle this kernel fault? */
|
|
|
|
if ((entry = search_exception_tables(regs->pc)) != NULL) {
|
2017-12-15 20:00:30 +00:00
|
|
|
pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
|
|
|
|
current->comm, regs->pc, entry->fixup);
|
2005-06-24 05:01:24 +00:00
|
|
|
current->thread.bad_uaddr = address;
|
|
|
|
regs->pc = entry->fixup;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Oops. The kernel tried to access some bad page. We'll have to
|
|
|
|
* terminate things with extreme prejudice.
|
|
|
|
*/
|
2017-12-15 20:00:30 +00:00
|
|
|
pr_alert("Unable to handle kernel paging request at virtual "
|
|
|
|
"address %08lx\n pc = %08lx, ra = %08lx\n",
|
|
|
|
address, regs->pc, regs->areg[0]);
|
2005-06-24 05:01:24 +00:00
|
|
|
die("Oops", regs, sig);
|
|
|
|
}
|