2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
2006-02-14 21:04:54 +00:00
|
|
|
* Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
|
2005-04-16 22:20:36 +00:00
|
|
|
* Copyright (C) 1995, 1996 Paul M. Antoine
|
|
|
|
* Copyright (C) 1998 Ulf Carlsson
|
|
|
|
* Copyright (C) 1999 Silicon Graphics, Inc.
|
|
|
|
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
|
2013-03-25 17:15:55 +00:00
|
|
|
* Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
|
2013-12-04 16:20:08 +00:00
|
|
|
* Copyright (C) 2014, Imagination Technologies Ltd.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2015-04-03 22:27:06 +00:00
|
|
|
#include <linux/bitops.h>
|
2007-05-31 13:00:19 +00:00
|
|
|
#include <linux/bug.h>
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
#include <linux/compiler.h>
|
2013-05-28 23:07:19 +00:00
|
|
|
#include <linux/context_tracking.h>
|
2014-03-04 10:20:43 +00:00
|
|
|
#include <linux/cpu_pm.h>
|
2012-10-11 16:14:58 +00:00
|
|
|
#include <linux/kexec.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/init.h>
|
2011-09-30 18:49:35 +00:00
|
|
|
#include <linux/kernel.h>
|
2012-02-29 00:24:46 +00:00
|
|
|
#include <linux/module.h>
|
2016-08-15 23:11:52 +00:00
|
|
|
#include <linux/extable.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mm.h>
|
2017-02-01 18:08:20 +00:00
|
|
|
#include <linux/sched/mm.h>
|
2017-02-08 17:51:35 +00:00
|
|
|
#include <linux/sched/debug.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/kallsyms.h>
|
2018-09-10 09:23:18 +00:00
|
|
|
#include <linux/memblock.h>
|
2006-07-20 16:52:02 +00:00
|
|
|
#include <linux/interrupt.h>
|
2008-04-28 16:14:26 +00:00
|
|
|
#include <linux/ptrace.h>
|
2008-07-29 20:58:53 +00:00
|
|
|
#include <linux/kgdb.h>
|
|
|
|
#include <linux/kdebug.h>
|
2010-08-03 18:22:20 +00:00
|
|
|
#include <linux/kprobes.h>
|
2009-11-24 01:24:58 +00:00
|
|
|
#include <linux/notifier.h>
|
2010-05-21 02:04:26 +00:00
|
|
|
#include <linux/kdb.h>
|
2010-10-07 13:08:54 +00:00
|
|
|
#include <linux/irq.h>
|
2010-10-12 11:37:21 +00:00
|
|
|
#include <linux/perf_event.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-22 17:15:22 +00:00
|
|
|
#include <asm/addrspace.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/bootinfo.h>
|
|
|
|
#include <asm/branch.h>
|
|
|
|
#include <asm/break.h>
|
2009-11-24 01:24:58 +00:00
|
|
|
#include <asm/cop2.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/cpu.h>
|
2013-09-17 08:25:47 +00:00
|
|
|
#include <asm/cpu-type.h>
|
2005-05-31 11:49:19 +00:00
|
|
|
#include <asm/dsp.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/fpu.h>
|
2008-10-28 17:38:42 +00:00
|
|
|
#include <asm/fpu_emulator.h>
|
2013-05-21 14:59:19 +00:00
|
|
|
#include <asm/idle.h>
|
2018-11-09 20:08:36 +00:00
|
|
|
#include <asm/isa-rev.h>
|
2017-08-13 02:49:41 +00:00
|
|
|
#include <asm/mips-cps.h>
|
2014-12-03 15:47:03 +00:00
|
|
|
#include <asm/mips-r2-to-r6-emul.h>
|
2005-08-17 17:44:08 +00:00
|
|
|
#include <asm/mipsregs.h>
|
|
|
|
#include <asm/mipsmtregs.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/module.h>
|
2014-01-27 15:23:11 +00:00
|
|
|
#include <asm/msa.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/sections.h>
|
2016-03-04 01:44:28 +00:00
|
|
|
#include <asm/siginfo.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/tlbdebug.h>
|
|
|
|
#include <asm/traps.h>
|
2016-12-24 19:46:01 +00:00
|
|
|
#include <linux/uaccess.h>
|
2008-09-23 07:08:45 +00:00
|
|
|
#include <asm/watch.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/types.h>
|
2006-09-26 14:44:01 +00:00
|
|
|
#include <asm/stacktrace.h>
|
MIPS: Consistently declare TLB functions
Since at least the beginning of the git era we've declared our TLB
exception handling functions inconsistently. They're actually functions,
but we declare them as arrays of u32 where each u32 is an encoded
instruction. This has always been the case for arch/mips/mm/tlbex.c, and
has also been true for arch/mips/kernel/traps.c since commit
86a1708a9d54 ("MIPS: Make tlb exception handler definitions and
declarations match.") which aimed for consistency but did so by
consistently making the our C code inconsistent with our assembly.
This is all usually harmless, but when using GCC 7 or newer to build a
kernel targeting microMIPS (ie. CONFIG_CPU_MICROMIPS=y) it becomes
problematic. With microMIPS bit 0 of the program counter indicates the
ISA mode. When bit 0 is zero instructions are decoded using the standard
MIPS32 or MIPS64 ISA. When bit 0 is one instructions are decoded using
microMIPS. This means that function pointers become odd - their least
significant bit is one for microMIPS code. We work around this in cases
where we need to access code using loads & stores with our
msk_isa16_mode() macro which simply clears bit 0 of the value it is
given:
#define msk_isa16_mode(x) ((x) & ~0x1)
For example we do this for our TLB load handler in
build_r4000_tlb_load_handler():
u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
We then write code to p, expecting it to be suitably aligned (our LEAF
macro aligns functions on 4 byte boundaries, so (ulong)handle_tlbl will
give a value one greater than a multiple of 4 - ie. the start of a
function on a 4 byte boundary, with the ISA mode bit 0 set).
This worked fine up to GCC 6, but GCC 7 & onwards is smart enough to
presume that handle_tlbl which we declared as an array of u32s must be
aligned sufficiently that bit 0 of its address will never be set, and as
a result optimize out msk_isa16_mode(). This leads to p having an
address with bit 0 set, and when we go on to attempt to store code at
that address we take an address error exception due to the unaligned
memory access.
This leads to an exception prior to the kernel having configured its own
exception handlers, so we jump to whatever handlers the bootloader
configured. In the case of QEMU this results in a silent hang, since it
has no useful general exception vector.
Fix this by consistently declaring our TLB-related functions as
functions. For handle_tlbl(), handle_tlbs() & handle_tlbm() we do this
in asm/tlbex.h & we make use of the existing declaration of
tlbmiss_handler_setup_pgd() in asm/mmu_context.h. Our TLB handler
generation code in arch/mips/mm/tlbex.c is adjusted to deal with these
definitions, in most cases simply by casting the function pointers to
u32 pointers.
This allows us to include asm/mmu_context.h in arch/mips/mm/tlbex.c to
get the definitions of tlbmiss_handler_setup_pgd & pgd_current, removing
some needless duplication. Consistently using msk_isa16_mode() on
function pointers means we no longer need the
tlbmiss_handler_setup_pgd_start symbol so that is removed entirely.
Now that we're declaring our functions as functions GCC stops optimizing
out msk_isa16_mode() & a microMIPS kernel built with either GCC 7.3.0 or
8.1.0 boots successfully.
Signed-off-by: Paul Burton <paul.burton@mips.com>
2018-08-10 23:03:31 +00:00
|
|
|
#include <asm/tlbex.h>
|
2010-01-28 14:22:37 +00:00
|
|
|
#include <asm/uasm.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
MIPS: emulate CPUCFG instruction on older Loongson64 cores
CPUCFG is the instruction for querying processor characteristics on
newer Loongson processors, much like CPUID of x86. Since the instruction
is supposedly designed to provide a unified way to do feature detection
(without having to, for example, parse /proc/cpuinfo which is too
heavyweight), it is important to provide compatibility for older cores
without native support. Fortunately, most of the fields can be
synthesized without changes to semantics. Performance is not really big
a concern, because feature detection logic is not expected to be
invoked very often in typical userland applications.
The instruction can't be emulated on LOONGSON_2EF cores, according to
FlyGoat's experiments. Because the LWC2 opcode is assigned to other
valid instructions on 2E and 2F, no RI exception is raised for us to
intercept. So compatibility is only extended back furthest to
Loongson-3A1000. Loongson-2K is covered too, as it is basically a remix
of various blocks from the 3A/3B models from a kernel perspective.
This is lightly based on Loongson's work on their Linux 3.10 fork, for
being the authority on the right feature flags to fill in, where things
aren't otherwise discoverable.
Signed-off-by: WANG Xuerui <git@xen0n.name>
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
2020-05-23 13:37:01 +00:00
|
|
|
#include <asm/mach-loongson64/cpucfg-emul.h>
|
|
|
|
|
2007-11-11 17:05:18 +00:00
|
|
|
extern void check_wait(void);
|
|
|
|
extern asmlinkage void rollback_handle_int(void);
|
2006-04-03 16:56:36 +00:00
|
|
|
extern asmlinkage void handle_int(void);
|
2005-04-16 22:20:36 +00:00
|
|
|
extern asmlinkage void handle_adel(void);
|
|
|
|
extern asmlinkage void handle_ades(void);
|
|
|
|
extern asmlinkage void handle_ibe(void);
|
|
|
|
extern asmlinkage void handle_dbe(void);
|
|
|
|
extern asmlinkage void handle_sys(void);
|
|
|
|
extern asmlinkage void handle_bp(void);
|
|
|
|
extern asmlinkage void handle_ri(void);
|
MIPS: Check TLB before handle_ri_rdhwr() for Loongson-3
Loongson-3's micro TLB (ITLB) is not strictly a subset of JTLB. That
means: when a JTLB entry is replaced by hardware, there may be an old
valid entry exists in ITLB. So, a TLB miss exception may occur while
handle_ri_rdhwr() is running because it try to access EPC's content.
However, handle_ri_rdhwr() doesn't clear EXL, which makes a TLB Refill
exception be treated as a TLB Invalid exception and tlbp may fail. In
this case, if FTLB (which is usually set-associative instead of set-
associative) is enabled, a tlbp failure will cause an invalid tlbwi,
which will hang the whole system.
This patch rename handle_ri_rdhwr_vivt to handle_ri_rdhwr_tlbp and use
it for Loongson-3. It try to solve the same problem described as below,
but more straightforwards.
https://patchwork.linux-mips.org/patch/12591/
I think Loongson-2 has the same problem, but it has no FTLB, so we just
keep it as is.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Rui Wang <wangr@lemote.com>
Cc: John Crispin <john@phrozen.org>
Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/15753/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-16 13:00:26 +00:00
|
|
|
extern asmlinkage void handle_ri_rdhwr_tlbp(void);
|
2006-09-11 08:50:29 +00:00
|
|
|
extern asmlinkage void handle_ri_rdhwr(void);
|
2005-04-16 22:20:36 +00:00
|
|
|
extern asmlinkage void handle_cpu(void);
|
|
|
|
extern asmlinkage void handle_ov(void);
|
|
|
|
extern asmlinkage void handle_tr(void);
|
2014-01-27 15:23:12 +00:00
|
|
|
extern asmlinkage void handle_msa_fpe(void);
|
2005-04-16 22:20:36 +00:00
|
|
|
extern asmlinkage void handle_fpe(void);
|
2013-11-14 16:12:31 +00:00
|
|
|
extern asmlinkage void handle_ftlb(void);
|
2014-01-27 15:23:11 +00:00
|
|
|
extern asmlinkage void handle_msa(void);
|
2005-04-16 22:20:36 +00:00
|
|
|
extern asmlinkage void handle_mdmx(void);
|
|
|
|
extern asmlinkage void handle_watch(void);
|
2005-08-17 17:44:08 +00:00
|
|
|
extern asmlinkage void handle_mt(void);
|
2005-05-31 11:49:19 +00:00
|
|
|
extern asmlinkage void handle_dsp(void);
|
2005-04-16 22:20:36 +00:00
|
|
|
extern asmlinkage void handle_mcheck(void);
|
|
|
|
extern asmlinkage void handle_reserved(void);
|
2014-07-15 13:09:56 +00:00
|
|
|
extern void tlb_do_page_fault_0(void);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
void (*board_be_init)(void);
|
|
|
|
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
|
2005-07-14 15:57:16 +00:00
|
|
|
void (*board_nmi_handler_setup)(void);
|
|
|
|
void (*board_ejtag_handler_setup)(void);
|
|
|
|
void (*board_bind_eic_interrupt)(int irq, int regset);
|
2011-11-16 01:25:45 +00:00
|
|
|
void (*board_ebase_setup)(void);
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
void(*board_cache_error_setup)(void);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-06-09 04:30:59 +00:00
|
|
|
static void show_raw_backtrace(unsigned long reg29, const char *loglvl)
|
2006-07-25 14:51:36 +00:00
|
|
|
{
|
2008-04-28 16:14:26 +00:00
|
|
|
unsigned long *sp = (unsigned long *)(reg29 & ~3);
|
2006-07-25 14:51:36 +00:00
|
|
|
unsigned long addr;
|
|
|
|
|
2020-06-09 04:30:59 +00:00
|
|
|
printk("%sCall Trace:", loglvl);
|
2006-07-25 14:51:36 +00:00
|
|
|
#ifdef CONFIG_KALLSYMS
|
2020-06-09 04:30:59 +00:00
|
|
|
printk("%s\n", loglvl);
|
2006-07-25 14:51:36 +00:00
|
|
|
#endif
|
2008-05-12 15:58:48 +00:00
|
|
|
while (!kstack_end(sp)) {
|
|
|
|
unsigned long __user *p =
|
|
|
|
(unsigned long __user *)(unsigned long)sp++;
|
|
|
|
if (__get_user(addr, p)) {
|
2020-06-09 04:30:59 +00:00
|
|
|
printk("%s (Bad stack address)", loglvl);
|
2008-05-12 15:58:48 +00:00
|
|
|
break;
|
2008-04-28 16:14:26 +00:00
|
|
|
}
|
2008-05-12 15:58:48 +00:00
|
|
|
if (__kernel_text_address(addr))
|
2020-06-09 04:30:59 +00:00
|
|
|
print_ip_sym(loglvl, addr);
|
2006-07-25 14:51:36 +00:00
|
|
|
}
|
2020-06-09 04:30:59 +00:00
|
|
|
printk("%s\n", loglvl);
|
2006-07-25 14:51:36 +00:00
|
|
|
}
|
|
|
|
|
2006-07-29 14:27:20 +00:00
|
|
|
#ifdef CONFIG_KALLSYMS
|
2006-09-26 14:44:01 +00:00
|
|
|
int raw_show_trace;
|
2006-07-29 14:27:20 +00:00
|
|
|
static int __init set_raw_show_trace(char *str)
|
|
|
|
{
|
|
|
|
raw_show_trace = 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("raw_show_trace", set_raw_show_trace);
|
2006-09-26 14:44:01 +00:00
|
|
|
#endif
|
2006-08-03 07:29:21 +00:00
|
|
|
|
2020-06-09 04:30:59 +00:00
|
|
|
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
|
|
|
|
const char *loglvl)
|
2006-07-29 14:27:20 +00:00
|
|
|
{
|
2006-08-03 07:29:21 +00:00
|
|
|
unsigned long sp = regs->regs[29];
|
|
|
|
unsigned long ra = regs->regs[31];
|
2006-07-29 14:27:20 +00:00
|
|
|
unsigned long pc = regs->cp0_epc;
|
|
|
|
|
2012-07-19 07:11:16 +00:00
|
|
|
if (!task)
|
|
|
|
task = current;
|
|
|
|
|
2015-12-04 22:25:02 +00:00
|
|
|
if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
|
2020-06-09 04:30:59 +00:00
|
|
|
show_raw_backtrace(sp, loglvl);
|
2006-07-29 14:27:20 +00:00
|
|
|
return;
|
|
|
|
}
|
2020-06-09 04:30:59 +00:00
|
|
|
printk("%sCall Trace:\n", loglvl);
|
2006-08-03 07:29:21 +00:00
|
|
|
do {
|
2020-06-09 04:30:59 +00:00
|
|
|
print_ip_sym(loglvl, pc);
|
2006-09-29 09:02:51 +00:00
|
|
|
pc = unwind_stack(task, &sp, pc, &ra);
|
2006-08-03 07:29:21 +00:00
|
|
|
} while (pc);
|
2016-10-19 13:33:20 +00:00
|
|
|
pr_cont("\n");
|
2006-07-29 14:27:20 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This routine abuses get_user()/put_user() to reference pointers
|
|
|
|
* with at least a bit of error checking ...
|
|
|
|
*/
|
2007-10-14 22:27:21 +00:00
|
|
|
static void show_stacktrace(struct task_struct *task,
|
2020-06-09 04:30:59 +00:00
|
|
|
const struct pt_regs *regs, const char *loglvl)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
const int field = 2 * sizeof(unsigned long);
|
|
|
|
long stackdata;
|
|
|
|
int i;
|
2007-07-13 14:02:42 +00:00
|
|
|
unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-06-09 04:30:59 +00:00
|
|
|
printk("%sStack :", loglvl);
|
2005-04-16 22:20:36 +00:00
|
|
|
i = 0;
|
|
|
|
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
|
2016-10-19 13:33:21 +00:00
|
|
|
if (i && ((i % (64 / field)) == 0)) {
|
|
|
|
pr_cont("\n");
|
2020-06-09 04:30:59 +00:00
|
|
|
printk("%s ", loglvl);
|
2016-10-19 13:33:21 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
if (i > 39) {
|
2016-10-19 13:33:21 +00:00
|
|
|
pr_cont(" ...");
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (__get_user(stackdata, sp++)) {
|
2016-10-19 13:33:21 +00:00
|
|
|
pr_cont(" (Bad stack address)");
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-10-19 13:33:21 +00:00
|
|
|
pr_cont(" %0*lx", field, stackdata);
|
2005-04-16 22:20:36 +00:00
|
|
|
i++;
|
|
|
|
}
|
2016-10-19 13:33:21 +00:00
|
|
|
pr_cont("\n");
|
2020-06-09 04:30:59 +00:00
|
|
|
show_backtrace(task, regs, loglvl);
|
2006-07-29 14:27:20 +00:00
|
|
|
}
|
|
|
|
|
2020-06-09 04:32:29 +00:00
|
|
|
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
|
2006-07-29 14:27:20 +00:00
|
|
|
{
|
|
|
|
struct pt_regs regs;
|
2015-07-27 12:50:22 +00:00
|
|
|
mm_segment_t old_fs = get_fs();
|
2017-06-29 14:05:04 +00:00
|
|
|
|
|
|
|
regs.cp0_status = KSU_KERNEL;
|
2006-07-29 14:27:20 +00:00
|
|
|
if (sp) {
|
|
|
|
regs.regs[29] = (unsigned long)sp;
|
|
|
|
regs.regs[31] = 0;
|
|
|
|
regs.cp0_epc = 0;
|
|
|
|
} else {
|
|
|
|
if (task && task != current) {
|
|
|
|
regs.regs[29] = task->thread.reg29;
|
|
|
|
regs.regs[31] = 0;
|
|
|
|
regs.cp0_epc = task->thread.reg31;
|
|
|
|
} else {
|
|
|
|
prepare_frametrace(®s);
|
|
|
|
}
|
|
|
|
}
|
2015-07-27 12:50:22 +00:00
|
|
|
/*
|
|
|
|
* show_stack() deals exclusively with kernel mode, so be sure to access
|
|
|
|
* the stack in the kernel (not user) address space.
|
|
|
|
*/
|
|
|
|
set_fs(KERNEL_DS);
|
2020-06-09 04:30:59 +00:00
|
|
|
show_stacktrace(task, ®s, loglvl);
|
2015-07-27 12:50:22 +00:00
|
|
|
set_fs(old_fs);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-07-13 14:51:46 +00:00
|
|
|
static void show_code(unsigned int __user *pc)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
long i;
|
2008-04-28 16:14:26 +00:00
|
|
|
unsigned short __user *pc16 = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-10-19 13:33:22 +00:00
|
|
|
printk("Code:");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-04-28 16:14:26 +00:00
|
|
|
if ((unsigned long)pc & 1)
|
|
|
|
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
|
2005-04-16 22:20:36 +00:00
|
|
|
for(i = -3 ; i < 6 ; i++) {
|
|
|
|
unsigned int insn;
|
2008-04-28 16:14:26 +00:00
|
|
|
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
|
2016-10-19 13:33:22 +00:00
|
|
|
pr_cont(" (Bad address in epc)\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-10-19 13:33:22 +00:00
|
|
|
pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2016-10-19 13:33:22 +00:00
|
|
|
pr_cont("\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-10-14 22:27:21 +00:00
|
|
|
static void __show_regs(const struct pt_regs *regs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
const int field = 2 * sizeof(unsigned long);
|
|
|
|
unsigned int cause = regs->cp0_cause;
|
2015-05-08 22:10:10 +00:00
|
|
|
unsigned int exccode;
|
2005-04-16 22:20:36 +00:00
|
|
|
int i;
|
|
|
|
|
dump_stack: unify debug information printed by show_regs()
show_regs() is inherently arch-dependent but it does make sense to print
generic debug information and some archs already do albeit in slightly
different forms. This patch introduces a generic function to print debug
information from show_regs() so that different archs print out the same
information and it's much easier to modify what's printed.
show_regs_print_info() prints out the same debug info as dump_stack()
does plus task and thread_info pointers.
* Archs which didn't print debug info now do.
alpha, arc, blackfin, c6x, cris, frv, h8300, hexagon, ia64, m32r,
metag, microblaze, mn10300, openrisc, parisc, score, sh64, sparc,
um, xtensa
* Already prints debug info. Replaced with show_regs_print_info().
The printed information is superset of what used to be there.
arm, arm64, avr32, mips, powerpc, sh32, tile, unicore32, x86
* s390 is special in that it used to print arch-specific information
along with generic debug info. Heiko and Martin think that the
arch-specific extra isn't worth keeping s390 specfic implementation.
Converted to use the generic version.
Note that now all archs print the debug info before actual register
dumps.
An example BUG() dump follows.
kernel BUG at /work/os/work/kernel/workqueue.c:4841!
invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
Modules linked in:
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.9.0-rc1-work+ #7
Hardware name: empty empty/S3992, BIOS 080011 10/26/2007
task: ffff88007c85e040 ti: ffff88007c860000 task.ti: ffff88007c860000
RIP: 0010:[<ffffffff8234a07e>] [<ffffffff8234a07e>] init_workqueues+0x4/0x6
RSP: 0000:ffff88007c861ec8 EFLAGS: 00010246
RAX: ffff88007c861fd8 RBX: ffffffff824466a8 RCX: 0000000000000001
RDX: 0000000000000046 RSI: 0000000000000001 RDI: ffffffff8234a07a
RBP: ffff88007c861ec8 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: ffffffff8234a07a
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
FS: 0000000000000000(0000) GS:ffff88007dc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffff88015f7ff000 CR3: 00000000021f1000 CR4: 00000000000007f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Stack:
ffff88007c861ef8 ffffffff81000312 ffffffff824466a8 ffff88007c85e650
0000000000000003 0000000000000000 ffff88007c861f38 ffffffff82335e5d
ffff88007c862080 ffffffff8223d8c0 ffff88007c862080 ffffffff81c47760
Call Trace:
[<ffffffff81000312>] do_one_initcall+0x122/0x170
[<ffffffff82335e5d>] kernel_init_freeable+0x9b/0x1c8
[<ffffffff81c47760>] ? rest_init+0x140/0x140
[<ffffffff81c4776e>] kernel_init+0xe/0xf0
[<ffffffff81c6be9c>] ret_from_fork+0x7c/0xb0
[<ffffffff81c47760>] ? rest_init+0x140/0x140
...
v2: Typo fix in x86-32.
v3: CPU number dropped from show_regs_print_info() as
dump_stack_print_info() has been updated to print it. s390
specific implementation dropped as requested by s390 maintainers.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Fengguang Wu <fengguang.wu@intel.com>
Cc: Mike Frysinger <vapier@gentoo.org>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Sam Ravnborg <sam@ravnborg.org>
Acked-by: Chris Metcalf <cmetcalf@tilera.com> [tile bits]
Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon bits]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-04-30 22:27:17 +00:00
|
|
|
show_regs_print_info(KERN_DEFAULT);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Saved main processor registers
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 32; ) {
|
|
|
|
if ((i % 4) == 0)
|
|
|
|
printk("$%2d :", i);
|
|
|
|
if (i == 0)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont(" %0*lx", field, 0UL);
|
2005-04-16 22:20:36 +00:00
|
|
|
else if (i == 26 || i == 27)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont(" %*s", field, "");
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont(" %0*lx", field, regs->regs[i]);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
i++;
|
|
|
|
if ((i % 4) == 0)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-02-02 16:41:47 +00:00
|
|
|
#ifdef CONFIG_CPU_HAS_SMARTMIPS
|
|
|
|
printk("Acx : %0*lx\n", field, regs->acx);
|
|
|
|
#endif
|
2018-11-09 20:08:36 +00:00
|
|
|
if (MIPS_ISA_REV < 6) {
|
|
|
|
printk("Hi : %0*lx\n", field, regs->hi);
|
|
|
|
printk("Lo : %0*lx\n", field, regs->lo);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Saved cp0 registers
|
|
|
|
*/
|
2008-07-15 17:44:33 +00:00
|
|
|
printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
|
|
|
|
(void *) regs->cp0_epc);
|
|
|
|
printk("ra : %0*lx %pS\n", field, regs->regs[31],
|
|
|
|
(void *) regs->regs[31]);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-01-22 11:59:30 +00:00
|
|
|
printk("Status: %08x ", (uint32_t) regs->cp0_status);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-06-26 15:06:34 +00:00
|
|
|
if (cpu_has_3kex) {
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_KUO)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("KUo ");
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_IEO)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("IEo ");
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_KUP)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("KUp ");
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_IEP)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("IEp ");
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_KUC)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("KUc ");
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_IEC)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("IEc ");
|
2013-06-26 15:06:34 +00:00
|
|
|
} else if (cpu_has_4kex) {
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_KX)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("KX ");
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_SX)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("SX ");
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_UX)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("UX ");
|
2005-06-22 20:43:29 +00:00
|
|
|
switch (regs->cp0_status & ST0_KSU) {
|
|
|
|
case KSU_USER:
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("USER ");
|
2005-06-22 20:43:29 +00:00
|
|
|
break;
|
|
|
|
case KSU_SUPERVISOR:
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("SUPERVISOR ");
|
2005-06-22 20:43:29 +00:00
|
|
|
break;
|
|
|
|
case KSU_KERNEL:
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("KERNEL ");
|
2005-06-22 20:43:29 +00:00
|
|
|
break;
|
|
|
|
default:
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("BAD_MODE ");
|
2005-06-22 20:43:29 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (regs->cp0_status & ST0_ERL)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("ERL ");
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_EXL)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("EXL ");
|
2005-06-22 20:43:29 +00:00
|
|
|
if (regs->cp0_status & ST0_IE)
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("IE ");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2016-10-19 13:33:23 +00:00
|
|
|
pr_cont("\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-05-08 22:10:10 +00:00
|
|
|
exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
|
|
|
|
printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-05-08 22:10:10 +00:00
|
|
|
if (1 <= exccode && exccode <= 5)
|
2005-04-16 22:20:36 +00:00
|
|
|
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
|
|
|
|
|
2007-10-11 22:46:17 +00:00
|
|
|
printk("PrId : %08x (%s)\n", read_c0_prid(),
|
|
|
|
cpu_name_string());
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-10-14 22:27:21 +00:00
|
|
|
/*
|
|
|
|
* FIXME: really the generic show_regs should take a const pointer argument.
|
|
|
|
*/
|
|
|
|
void show_regs(struct pt_regs *regs)
|
|
|
|
{
|
2018-08-31 18:49:20 +00:00
|
|
|
__show_regs(regs);
|
2018-06-22 17:55:45 +00:00
|
|
|
dump_stack();
|
2007-10-14 22:27:21 +00:00
|
|
|
}
|
|
|
|
|
2010-08-03 18:22:20 +00:00
|
|
|
void show_registers(struct pt_regs *regs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-04-28 16:14:26 +00:00
|
|
|
const int field = 2 * sizeof(unsigned long);
|
2013-10-08 11:39:31 +00:00
|
|
|
mm_segment_t old_fs = get_fs();
|
2008-04-28 16:14:26 +00:00
|
|
|
|
2007-10-14 22:27:21 +00:00
|
|
|
__show_regs(regs);
|
2005-04-16 22:20:36 +00:00
|
|
|
print_modules();
|
2008-04-28 16:14:26 +00:00
|
|
|
printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
|
|
|
|
current->comm, current->pid, current_thread_info(), current,
|
|
|
|
field, current_thread_info()->tp_value);
|
|
|
|
if (cpu_has_userlocal) {
|
|
|
|
unsigned long tls;
|
|
|
|
|
|
|
|
tls = read_c0_userlocal();
|
|
|
|
if (tls != current_thread_info()->tp_value)
|
|
|
|
printk("*HwTLS: %0*lx\n", field, tls);
|
|
|
|
}
|
|
|
|
|
2013-10-08 11:39:31 +00:00
|
|
|
if (!user_mode(regs))
|
|
|
|
/* Necessary for getting the correct stack content */
|
|
|
|
set_fs(KERNEL_DS);
|
2020-06-09 04:30:59 +00:00
|
|
|
show_stacktrace(current, regs, KERN_DEFAULT);
|
2007-07-13 14:51:46 +00:00
|
|
|
show_code((unsigned int __user *) regs->cp0_epc);
|
2005-04-16 22:20:36 +00:00
|
|
|
printk("\n");
|
2013-10-08 11:39:31 +00:00
|
|
|
set_fs(old_fs);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-07-23 12:41:24 +00:00
|
|
|
static DEFINE_RAW_SPINLOCK(die_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-08-03 22:44:43 +00:00
|
|
|
void __noreturn die(const char *str, struct pt_regs *regs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
static int die_counter;
|
2010-04-26 04:53:10 +00:00
|
|
|
int sig = SIGSEGV;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-09-30 18:49:35 +00:00
|
|
|
oops_enter();
|
|
|
|
|
2015-07-28 18:37:43 +00:00
|
|
|
if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
|
2013-10-09 06:54:15 +00:00
|
|
|
SIGSEGV) == NOTIFY_STOP)
|
2011-05-13 09:33:28 +00:00
|
|
|
sig = 0;
|
2010-05-21 02:04:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
console_verbose();
|
2011-07-23 12:41:24 +00:00
|
|
|
raw_spin_lock_irq(&die_lock);
|
2006-04-05 08:45:45 +00:00
|
|
|
bust_spinlocks(1);
|
2010-04-26 04:53:10 +00:00
|
|
|
|
2005-10-13 16:07:54 +00:00
|
|
|
printk("%s[#%d]:\n", str, ++die_counter);
|
2005-04-16 22:20:36 +00:00
|
|
|
show_registers(regs);
|
2013-01-21 06:47:39 +00:00
|
|
|
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
|
2011-07-23 12:41:24 +00:00
|
|
|
raw_spin_unlock_irq(&die_lock);
|
2006-07-20 16:52:02 +00:00
|
|
|
|
2011-09-30 18:49:35 +00:00
|
|
|
oops_exit();
|
|
|
|
|
2006-07-20 16:52:02 +00:00
|
|
|
if (in_interrupt())
|
|
|
|
panic("Fatal exception in interrupt");
|
|
|
|
|
2016-03-09 20:08:42 +00:00
|
|
|
if (panic_on_oops)
|
2006-07-20 16:52:02 +00:00
|
|
|
panic("Fatal exception");
|
|
|
|
|
2012-10-11 16:14:58 +00:00
|
|
|
if (regs && kexec_should_crash(current))
|
|
|
|
crash_kexec(regs);
|
|
|
|
|
2010-04-26 04:53:10 +00:00
|
|
|
do_exit(sig);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-08-04 17:44:34 +00:00
|
|
|
extern struct exception_table_entry __start___dbe_table[];
|
|
|
|
extern struct exception_table_entry __stop___dbe_table[];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-02-18 15:57:09 +00:00
|
|
|
__asm__(
|
|
|
|
" .section __dbe_table, \"a\"\n"
|
|
|
|
" .previous \n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Given an address, look for it in the exception tables. */
|
|
|
|
static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
|
|
|
|
{
|
|
|
|
const struct exception_table_entry *e;
|
|
|
|
|
2017-07-10 22:51:58 +00:00
|
|
|
e = search_extable(__start___dbe_table,
|
|
|
|
__stop___dbe_table - __start___dbe_table, addr);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!e)
|
|
|
|
e = search_module_dbetables(addr);
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void do_be(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
const int field = 2 * sizeof(unsigned long);
|
|
|
|
const struct exception_table_entry *fixup = NULL;
|
|
|
|
int data = regs->cp0_cause & 4;
|
|
|
|
int action = MIPS_BE_FATAL;
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
prev_state = exception_enter();
|
2013-01-22 11:59:30 +00:00
|
|
|
/* XXX For now. Fixme, this searches the wrong table ... */
|
2005-04-16 22:20:36 +00:00
|
|
|
if (data && !user_mode(regs))
|
|
|
|
fixup = search_dbe_tables(exception_epc(regs));
|
|
|
|
|
|
|
|
if (fixup)
|
|
|
|
action = MIPS_BE_FIXUP;
|
|
|
|
|
|
|
|
if (board_be_handler)
|
2007-07-12 16:49:49 +00:00
|
|
|
action = board_be_handler(regs, fixup != NULL);
|
2016-10-05 17:18:17 +00:00
|
|
|
else
|
|
|
|
mips_cm_error_report();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case MIPS_BE_DISCARD:
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
case MIPS_BE_FIXUP:
|
|
|
|
if (fixup) {
|
|
|
|
regs->cp0_epc = fixup->nextinsn;
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assume it would be too dangerous to continue ...
|
|
|
|
*/
|
|
|
|
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
|
|
|
|
data ? "Data" : "Instruction",
|
|
|
|
field, regs->cp0_epc, field, regs->regs[31]);
|
2015-07-28 18:37:43 +00:00
|
|
|
if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
|
2013-10-09 06:54:15 +00:00
|
|
|
SIGBUS) == NOTIFY_STOP)
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
2008-07-29 20:58:53 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
die_if_kernel("Oops", regs);
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGBUS);
|
2013-05-28 23:07:19 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
exception_exit(prev_state);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
* ll/sc, rdhwr, sync emulation
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#define OPCODE 0xfc000000
|
|
|
|
#define BASE 0x03e00000
|
|
|
|
#define RT 0x001f0000
|
|
|
|
#define OFFSET 0x0000ffff
|
|
|
|
#define LL 0xc0000000
|
|
|
|
#define SC 0xe0000000
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
#define SPEC0 0x00000000
|
2005-04-13 17:43:59 +00:00
|
|
|
#define SPEC3 0x7c000000
|
|
|
|
#define RD 0x0000f800
|
|
|
|
#define FUNC 0x0000003f
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
#define SYNC 0x0000000f
|
2005-04-13 17:43:59 +00:00
|
|
|
#define RDHWR 0x0000003b
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-03-25 17:15:55 +00:00
|
|
|
/* microMIPS definitions */
|
|
|
|
#define MM_POOL32A_FUNC 0xfc00ffff
|
|
|
|
#define MM_RDHWR 0x00006b3c
|
|
|
|
#define MM_RS 0x001f0000
|
|
|
|
#define MM_RT 0x03e00000
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* The ll_bit is cleared by r*_switch.S
|
|
|
|
*/
|
|
|
|
|
2009-09-17 00:25:05 +00:00
|
|
|
unsigned int ll_bit;
|
|
|
|
struct task_struct *ll_task;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-03-01 19:22:29 +00:00
|
|
|
unsigned long value, __user *vaddr;
|
2005-04-16 22:20:36 +00:00
|
|
|
long offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* analyse the ll instruction that just caused a ri exception
|
|
|
|
* and put the referenced address to addr.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* sign extend offset */
|
|
|
|
offset = opcode & OFFSET;
|
|
|
|
offset <<= 16;
|
|
|
|
offset >>= 16;
|
|
|
|
|
2005-03-01 19:22:29 +00:00
|
|
|
vaddr = (unsigned long __user *)
|
2013-01-12 23:29:27 +00:00
|
|
|
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
if ((unsigned long)vaddr & 3)
|
|
|
|
return SIGBUS;
|
|
|
|
if (get_user(value, vaddr))
|
|
|
|
return SIGSEGV;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
if (ll_task == NULL || ll_task == current) {
|
|
|
|
ll_bit = 1;
|
|
|
|
} else {
|
|
|
|
ll_bit = 0;
|
|
|
|
}
|
|
|
|
ll_task = current;
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
regs->regs[(opcode & RT) >> 16] = value;
|
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-03-01 19:22:29 +00:00
|
|
|
unsigned long __user *vaddr;
|
|
|
|
unsigned long reg;
|
2005-04-16 22:20:36 +00:00
|
|
|
long offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* analyse the sc instruction that just caused a ri exception
|
|
|
|
* and put the referenced address to addr.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* sign extend offset */
|
|
|
|
offset = opcode & OFFSET;
|
|
|
|
offset <<= 16;
|
|
|
|
offset >>= 16;
|
|
|
|
|
2005-03-01 19:22:29 +00:00
|
|
|
vaddr = (unsigned long __user *)
|
2013-01-12 23:29:27 +00:00
|
|
|
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
|
2005-04-16 22:20:36 +00:00
|
|
|
reg = (opcode & RT) >> 16;
|
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
if ((unsigned long)vaddr & 3)
|
|
|
|
return SIGBUS;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
if (ll_bit == 0 || ll_task != current) {
|
|
|
|
regs->regs[reg] = 0;
|
|
|
|
preempt_enable();
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
if (put_user(regs->regs[reg], vaddr))
|
|
|
|
return SIGSEGV;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
regs->regs[reg] = 1;
|
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
|
|
|
|
* opcodes are supposed to result in coprocessor unusable exceptions if
|
|
|
|
* executed on ll/sc-less processors. That's the theory. In practice a
|
|
|
|
* few processors such as NEC's VR4100 throw reserved instruction exceptions
|
|
|
|
* instead, so we're doing the emulation thing in both exception handlers.
|
|
|
|
*/
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-10-12 11:37:21 +00:00
|
|
|
if ((opcode & OPCODE) == LL) {
|
|
|
|
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
2011-06-27 12:41:57 +00:00
|
|
|
1, regs, 0);
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
return simulate_ll(regs, opcode);
|
2010-10-12 11:37:21 +00:00
|
|
|
}
|
|
|
|
if ((opcode & OPCODE) == SC) {
|
|
|
|
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
2011-06-27 12:41:57 +00:00
|
|
|
1, regs, 0);
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
return simulate_sc(regs, opcode);
|
2010-10-12 11:37:21 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
return -1; /* Must be something else ... */
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-04-13 17:43:59 +00:00
|
|
|
/*
|
|
|
|
* Simulate trapping 'rdhwr' instructions to provide user accessible
|
2006-05-08 17:02:16 +00:00
|
|
|
* registers not implemented in hardware.
|
2005-04-13 17:43:59 +00:00
|
|
|
*/
|
2013-03-25 17:15:55 +00:00
|
|
|
static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
|
2005-04-13 17:43:59 +00:00
|
|
|
{
|
2006-01-12 09:06:07 +00:00
|
|
|
struct thread_info *ti = task_thread_info(current);
|
2005-04-13 17:43:59 +00:00
|
|
|
|
2013-03-25 17:15:55 +00:00
|
|
|
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
|
|
|
1, regs, 0);
|
|
|
|
switch (rd) {
|
2016-06-15 18:29:52 +00:00
|
|
|
case MIPS_HWR_CPUNUM: /* CPU number */
|
2013-03-25 17:15:55 +00:00
|
|
|
regs->regs[rt] = smp_processor_id();
|
|
|
|
return 0;
|
2016-06-15 18:29:52 +00:00
|
|
|
case MIPS_HWR_SYNCISTEP: /* SYNCI length */
|
2013-03-25 17:15:55 +00:00
|
|
|
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
|
|
|
|
current_cpu_data.icache.linesz);
|
|
|
|
return 0;
|
2016-06-15 18:29:52 +00:00
|
|
|
case MIPS_HWR_CC: /* Read count register */
|
2013-03-25 17:15:55 +00:00
|
|
|
regs->regs[rt] = read_c0_count();
|
|
|
|
return 0;
|
2016-06-15 18:29:52 +00:00
|
|
|
case MIPS_HWR_CCRES: /* Count register resolution */
|
2013-09-17 08:25:47 +00:00
|
|
|
switch (current_cpu_type()) {
|
2013-03-25 17:15:55 +00:00
|
|
|
case CPU_20KC:
|
|
|
|
case CPU_25KF:
|
|
|
|
regs->regs[rt] = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
regs->regs[rt] = 2;
|
|
|
|
}
|
|
|
|
return 0;
|
2016-06-15 18:29:52 +00:00
|
|
|
case MIPS_HWR_ULR: /* Read UserLocal register */
|
2013-03-25 17:15:55 +00:00
|
|
|
regs->regs[rt] = ti->tp_value;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
|
|
|
|
{
|
2005-04-13 17:43:59 +00:00
|
|
|
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
|
|
|
|
int rd = (opcode & RD) >> 11;
|
|
|
|
int rt = (opcode & RT) >> 16;
|
2013-03-25 17:15:55 +00:00
|
|
|
|
|
|
|
simulate_rdhwr(regs, rd, rt);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not ours. */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-01-30 09:08:28 +00:00
|
|
|
static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
|
2013-03-25 17:15:55 +00:00
|
|
|
{
|
|
|
|
if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
|
|
|
|
int rd = (opcode & MM_RS) >> 16;
|
|
|
|
int rt = (opcode & MM_RT) >> 21;
|
|
|
|
simulate_rdhwr(regs, rd, rt);
|
|
|
|
return 0;
|
2005-04-13 17:43:59 +00:00
|
|
|
}
|
|
|
|
|
2005-11-27 03:34:41 +00:00
|
|
|
/* Not ours. */
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2006-11-30 01:14:47 +00:00
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
|
|
|
|
{
|
2010-10-12 11:37:21 +00:00
|
|
|
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
|
|
|
|
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
2011-06-27 12:41:57 +00:00
|
|
|
1, regs, 0);
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
return 0;
|
2010-10-12 11:37:21 +00:00
|
|
|
}
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
|
|
|
|
return -1; /* Must be something else ... */
|
2005-04-13 17:43:59 +00:00
|
|
|
}
|
|
|
|
|
MIPS: emulate CPUCFG instruction on older Loongson64 cores
CPUCFG is the instruction for querying processor characteristics on
newer Loongson processors, much like CPUID of x86. Since the instruction
is supposedly designed to provide a unified way to do feature detection
(without having to, for example, parse /proc/cpuinfo which is too
heavyweight), it is important to provide compatibility for older cores
without native support. Fortunately, most of the fields can be
synthesized without changes to semantics. Performance is not really big
a concern, because feature detection logic is not expected to be
invoked very often in typical userland applications.
The instruction can't be emulated on LOONGSON_2EF cores, according to
FlyGoat's experiments. Because the LWC2 opcode is assigned to other
valid instructions on 2E and 2F, no RI exception is raised for us to
intercept. So compatibility is only extended back furthest to
Loongson-3A1000. Loongson-2K is covered too, as it is basically a remix
of various blocks from the 3A/3B models from a kernel perspective.
This is lightly based on Loongson's work on their Linux 3.10 fork, for
being the authority on the right feature flags to fill in, where things
aren't otherwise discoverable.
Signed-off-by: WANG Xuerui <git@xen0n.name>
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
2020-05-23 13:37:01 +00:00
|
|
|
/*
|
|
|
|
* Loongson-3 CSR instructions emulation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
|
|
|
|
|
|
|
|
#define LWC2 0xc8000000
|
|
|
|
#define RS BASE
|
|
|
|
#define CSR_OPCODE2 0x00000118
|
|
|
|
#define CSR_OPCODE2_MASK 0x000007ff
|
|
|
|
#define CSR_FUNC_MASK RT
|
|
|
|
#define CSR_FUNC_CPUCFG 0x8
|
|
|
|
|
|
|
|
static int simulate_loongson3_cpucfg(struct pt_regs *regs,
|
|
|
|
unsigned int opcode)
|
|
|
|
{
|
|
|
|
int op = opcode & OPCODE;
|
|
|
|
int op2 = opcode & CSR_OPCODE2_MASK;
|
|
|
|
int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
|
|
|
|
|
|
|
|
if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
|
|
|
|
int rd = (opcode & RD) >> 11;
|
|
|
|
int rs = (opcode & RS) >> 21;
|
|
|
|
__u64 sel = regs->regs[rs];
|
|
|
|
|
|
|
|
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
|
|
|
|
|
2020-05-30 07:32:41 +00:00
|
|
|
/* Do not emulate on unsupported core models. */
|
|
|
|
if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data))
|
|
|
|
return -1;
|
|
|
|
|
MIPS: emulate CPUCFG instruction on older Loongson64 cores
CPUCFG is the instruction for querying processor characteristics on
newer Loongson processors, much like CPUID of x86. Since the instruction
is supposedly designed to provide a unified way to do feature detection
(without having to, for example, parse /proc/cpuinfo which is too
heavyweight), it is important to provide compatibility for older cores
without native support. Fortunately, most of the fields can be
synthesized without changes to semantics. Performance is not really big
a concern, because feature detection logic is not expected to be
invoked very often in typical userland applications.
The instruction can't be emulated on LOONGSON_2EF cores, according to
FlyGoat's experiments. Because the LWC2 opcode is assigned to other
valid instructions on 2E and 2F, no RI exception is raised for us to
intercept. So compatibility is only extended back furthest to
Loongson-3A1000. Loongson-2K is covered too, as it is basically a remix
of various blocks from the 3A/3B models from a kernel perspective.
This is lightly based on Loongson's work on their Linux 3.10 fork, for
being the authority on the right feature flags to fill in, where things
aren't otherwise discoverable.
Signed-off-by: WANG Xuerui <git@xen0n.name>
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
2020-05-23 13:37:01 +00:00
|
|
|
regs->regs[rd] = loongson3_cpucfg_read_synthesized(
|
|
|
|
¤t_cpu_data, sel);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not ours. */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
asmlinkage void do_ov(struct pt_regs *regs)
|
|
|
|
{
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
prev_state = exception_enter();
|
2006-02-14 21:04:54 +00:00
|
|
|
die_if_kernel("Integer overflow", regs);
|
|
|
|
|
2019-05-23 16:04:24 +00:00
|
|
|
force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
|
2013-05-28 23:07:19 +00:00
|
|
|
exception_exit(prev_state);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-11-07 23:14:05 +00:00
|
|
|
#ifdef CONFIG_MIPS_FP_SUPPORT
|
|
|
|
|
2016-10-28 07:21:03 +00:00
|
|
|
/*
|
|
|
|
* Send SIGFPE according to FCSR Cause bits, which must have already
|
|
|
|
* been masked against Enable bits. This is impotant as Inexact can
|
|
|
|
* happen together with Overflow or Underflow, and `ptrace' can set
|
|
|
|
* any bits.
|
|
|
|
*/
|
|
|
|
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
|
|
|
|
struct task_struct *tsk)
|
|
|
|
{
|
2018-05-15 13:50:47 +00:00
|
|
|
int si_code = FPE_FLTUNK;
|
2016-10-28 07:21:03 +00:00
|
|
|
|
|
|
|
if (fcr31 & FPU_CSR_INV_X)
|
2018-04-16 02:11:06 +00:00
|
|
|
si_code = FPE_FLTINV;
|
2016-10-28 07:21:03 +00:00
|
|
|
else if (fcr31 & FPU_CSR_DIV_X)
|
2018-04-16 02:11:06 +00:00
|
|
|
si_code = FPE_FLTDIV;
|
2016-10-28 07:21:03 +00:00
|
|
|
else if (fcr31 & FPU_CSR_OVF_X)
|
2018-04-16 02:11:06 +00:00
|
|
|
si_code = FPE_FLTOVF;
|
2016-10-28 07:21:03 +00:00
|
|
|
else if (fcr31 & FPU_CSR_UDF_X)
|
2018-04-16 02:11:06 +00:00
|
|
|
si_code = FPE_FLTUND;
|
2016-10-28 07:21:03 +00:00
|
|
|
else if (fcr31 & FPU_CSR_INE_X)
|
2018-04-16 02:11:06 +00:00
|
|
|
si_code = FPE_FLTRES;
|
2017-08-07 19:14:18 +00:00
|
|
|
|
2019-02-06 22:39:13 +00:00
|
|
|
force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
|
2016-10-28 07:21:03 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 22:27:15 +00:00
|
|
|
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
|
2010-10-21 23:32:26 +00:00
|
|
|
{
|
2018-04-16 02:11:06 +00:00
|
|
|
int si_code;
|
2016-07-13 13:23:37 +00:00
|
|
|
struct vm_area_struct *vma;
|
2015-04-03 22:27:15 +00:00
|
|
|
|
|
|
|
switch (sig) {
|
|
|
|
case 0:
|
|
|
|
return 0;
|
2015-01-30 12:09:35 +00:00
|
|
|
|
2015-04-03 22:27:15 +00:00
|
|
|
case SIGFPE:
|
2016-10-28 07:21:03 +00:00
|
|
|
force_fcr31_sig(fcr31, fault_addr, current);
|
2010-10-21 23:32:26 +00:00
|
|
|
return 1;
|
2015-04-03 22:27:15 +00:00
|
|
|
|
|
|
|
case SIGBUS:
|
2019-05-23 16:04:24 +00:00
|
|
|
force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
|
2015-04-03 22:27:15 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
case SIGSEGV:
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_lock(current->mm);
|
2016-07-13 13:23:37 +00:00
|
|
|
vma = find_vma(current->mm, (unsigned long)fault_addr);
|
|
|
|
if (vma && (vma->vm_start <= (unsigned long)fault_addr))
|
2018-04-16 02:11:06 +00:00
|
|
|
si_code = SEGV_ACCERR;
|
2015-04-03 22:27:15 +00:00
|
|
|
else
|
2018-04-16 02:11:06 +00:00
|
|
|
si_code = SEGV_MAPERR;
|
2020-06-09 04:33:25 +00:00
|
|
|
mmap_read_unlock(current->mm);
|
2019-05-23 16:04:24 +00:00
|
|
|
force_sig_fault(SIGSEGV, si_code, fault_addr);
|
2015-04-03 22:27:15 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
default:
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(sig);
|
2010-10-21 23:32:26 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-11 07:30:20 +00:00
|
|
|
static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
|
|
|
|
unsigned long old_epc, unsigned long old_ra)
|
|
|
|
{
|
|
|
|
union mips_instruction inst = { .word = opcode };
|
2015-04-03 22:27:15 +00:00
|
|
|
void __user *fault_addr;
|
|
|
|
unsigned long fcr31;
|
2014-09-11 07:30:20 +00:00
|
|
|
int sig;
|
|
|
|
|
|
|
|
/* If it's obviously not an FP instruction, skip it */
|
|
|
|
switch (inst.i_format.opcode) {
|
|
|
|
case cop1_op:
|
|
|
|
case cop1x_op:
|
|
|
|
case lwc1_op:
|
|
|
|
case ldc1_op:
|
|
|
|
case swc1_op:
|
|
|
|
case sdc1_op:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* do_ri skipped over the instruction via compute_return_epc, undo
|
|
|
|
* that for the FPU emulator.
|
|
|
|
*/
|
|
|
|
regs->cp0_epc = old_epc;
|
|
|
|
regs->regs[31] = old_ra;
|
|
|
|
|
|
|
|
/* Run the emulator */
|
|
|
|
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
|
|
|
|
&fault_addr);
|
|
|
|
|
2015-04-03 22:27:10 +00:00
|
|
|
/*
|
2016-10-28 07:21:03 +00:00
|
|
|
* We can't allow the emulated instruction to leave any
|
|
|
|
* enabled Cause bits set in $fcr31.
|
2015-04-03 22:27:10 +00:00
|
|
|
*/
|
2016-10-28 07:21:03 +00:00
|
|
|
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
|
|
|
|
current->thread.fpu.fcr31 &= ~fcr31;
|
2014-09-11 07:30:20 +00:00
|
|
|
|
|
|
|
/* Restore the hardware register state */
|
|
|
|
own_fpu(1);
|
|
|
|
|
2015-04-03 22:27:15 +00:00
|
|
|
/* Send a signal if required. */
|
|
|
|
process_fpemu_return(sig, fault_addr, fcr31);
|
|
|
|
|
2014-09-11 07:30:20 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* XXX Delayed fp exceptions when doing a lazy ctx switch XXX
|
|
|
|
*/
|
|
|
|
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
|
|
|
|
{
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
2015-04-03 22:27:15 +00:00
|
|
|
void __user *fault_addr;
|
|
|
|
int sig;
|
2007-08-22 00:42:04 +00:00
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
prev_state = exception_enter();
|
2015-07-28 18:37:43 +00:00
|
|
|
if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
|
2013-10-09 06:54:15 +00:00
|
|
|
SIGFPE) == NOTIFY_STOP)
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 13:44:13 +00:00
|
|
|
|
|
|
|
/* Clear FCSR.Cause before enabling interrupts */
|
2016-10-28 07:21:03 +00:00
|
|
|
write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
|
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 13:44:13 +00:00
|
|
|
local_irq_enable();
|
|
|
|
|
2006-06-30 22:35:28 +00:00
|
|
|
die_if_kernel("FP exception in kernel code", regs);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (fcr31 & FPU_CSR_UNI_X) {
|
|
|
|
/*
|
2006-03-11 08:18:41 +00:00
|
|
|
* Unimplemented operation exception. If we've got the full
|
2005-04-16 22:20:36 +00:00
|
|
|
* software emulator on-board, let's use it...
|
|
|
|
*
|
|
|
|
* Force FPU to dump state into task/thread context. We're
|
|
|
|
* moving a lot of data here for what is probably a single
|
|
|
|
* instruction, but the alternative is to pre-decode the FP
|
|
|
|
* register operands before invoking the emulator, which seems
|
|
|
|
* a bit extreme for what should be an infrequent event.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Run the emulator */
|
2010-10-21 23:32:26 +00:00
|
|
|
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
|
|
|
|
&fault_addr);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2016-10-28 07:21:03 +00:00
|
|
|
* We can't allow the emulated instruction to leave any
|
|
|
|
* enabled Cause bits set in $fcr31.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2016-10-28 07:21:03 +00:00
|
|
|
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
|
|
|
|
current->thread.fpu.fcr31 &= ~fcr31;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Restore the hardware register state */
|
2013-01-22 11:59:30 +00:00
|
|
|
own_fpu(1); /* Using the FPU again. */
|
2015-04-03 22:27:15 +00:00
|
|
|
} else {
|
|
|
|
sig = SIGFPE;
|
|
|
|
fault_addr = (void __user *) regs->cp0_epc;
|
2015-04-03 22:27:06 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-04-03 22:27:15 +00:00
|
|
|
/* Send a signal if required. */
|
|
|
|
process_fpemu_return(sig, fault_addr, fcr31);
|
2013-05-28 23:07:19 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
exception_exit(prev_state);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-11-07 23:14:05 +00:00
|
|
|
/*
|
|
|
|
* MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
|
|
|
|
* emulated more than some threshold number of instructions, force migration to
|
|
|
|
* a "CPU" that has FP support.
|
|
|
|
*/
|
|
|
|
static void mt_ase_fp_affinity(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_MIPS_MT_FPAFF
|
|
|
|
if (mt_fpemul_threshold > 0 &&
|
|
|
|
((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
|
|
|
|
/*
|
|
|
|
* If there's no FPU present, or if the application has already
|
|
|
|
* restricted the allowed set to exclude any CPUs with FPUs,
|
|
|
|
* we'll skip the procedure.
|
|
|
|
*/
|
2019-04-23 14:26:36 +00:00
|
|
|
if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
|
2018-11-07 23:14:05 +00:00
|
|
|
cpumask_t tmask;
|
|
|
|
|
|
|
|
current->thread.user_cpus_allowed
|
2019-04-23 14:26:36 +00:00
|
|
|
= current->cpus_mask;
|
|
|
|
cpumask_and(&tmask, ¤t->cpus_mask,
|
2018-11-07 23:14:05 +00:00
|
|
|
&mt_fpu_cpumask);
|
|
|
|
set_cpus_allowed_ptr(current, &tmask);
|
|
|
|
set_thread_flag(TIF_FPUBOUND);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_MIPS_MT_FPAFF */
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* !CONFIG_MIPS_FP_SUPPORT */
|
|
|
|
|
|
|
|
static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
|
|
|
|
unsigned long old_epc, unsigned long old_ra)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* !CONFIG_MIPS_FP_SUPPORT */
|
|
|
|
|
2016-03-04 01:44:28 +00:00
|
|
|
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
|
2008-04-20 15:28:54 +00:00
|
|
|
const char *str)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-04-20 15:28:54 +00:00
|
|
|
char b[40];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-05-21 02:04:26 +00:00
|
|
|
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
|
2015-07-28 18:37:43 +00:00
|
|
|
if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
|
|
|
|
SIGTRAP) == NOTIFY_STOP)
|
2010-05-21 02:04:26 +00:00
|
|
|
return;
|
|
|
|
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
|
|
|
|
|
2015-07-28 18:37:43 +00:00
|
|
|
if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
|
2013-10-09 06:54:15 +00:00
|
|
|
SIGTRAP) == NOTIFY_STOP)
|
2008-07-29 20:58:53 +00:00
|
|
|
return;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2008-04-20 15:28:54 +00:00
|
|
|
* A short test says that IRIX 5.3 sends SIGTRAP for all trap
|
|
|
|
* insns, even for trap and break codes that indicate arithmetic
|
|
|
|
* failures. Weird ...
|
2005-04-16 22:20:36 +00:00
|
|
|
* But should we continue the brokenness??? --macro
|
|
|
|
*/
|
2008-04-20 15:28:54 +00:00
|
|
|
switch (code) {
|
|
|
|
case BRK_OVERFLOW:
|
|
|
|
case BRK_DIVZERO:
|
|
|
|
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
|
|
|
|
die_if_kernel(b, regs);
|
2018-04-16 02:11:06 +00:00
|
|
|
force_sig_fault(SIGFPE,
|
|
|
|
code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
|
2019-05-23 16:04:24 +00:00
|
|
|
(void __user *) regs->cp0_epc);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2006-10-16 00:38:50 +00:00
|
|
|
case BRK_BUG:
|
2008-04-20 15:28:54 +00:00
|
|
|
die_if_kernel("Kernel bug detected", regs);
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGTRAP);
|
2006-10-16 00:38:50 +00:00
|
|
|
break;
|
2008-10-28 17:38:42 +00:00
|
|
|
case BRK_MEMU:
|
|
|
|
/*
|
2015-04-03 22:24:14 +00:00
|
|
|
* This breakpoint code is used by the FPU emulator to retake
|
|
|
|
* control of the CPU after executing the instruction from the
|
|
|
|
* delay slot of an emulated branch.
|
2008-10-28 17:38:42 +00:00
|
|
|
*
|
|
|
|
* Terminate if exception was recognized as a delay slot return
|
|
|
|
* otherwise handle as normal.
|
|
|
|
*/
|
|
|
|
if (do_dsemulret(regs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
die_if_kernel("Math emu break/trap", regs);
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGTRAP);
|
2008-10-28 17:38:42 +00:00
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
2008-04-20 15:28:54 +00:00
|
|
|
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
|
|
|
|
die_if_kernel(b, regs);
|
2016-03-04 01:44:28 +00:00
|
|
|
if (si_code) {
|
2019-05-23 16:04:24 +00:00
|
|
|
force_sig_fault(SIGTRAP, si_code, NULL);
|
2016-03-04 01:44:28 +00:00
|
|
|
} else {
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGTRAP);
|
2016-03-04 01:44:28 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-04-20 15:28:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void do_bp(struct pt_regs *regs)
|
|
|
|
{
|
2015-04-03 22:26:27 +00:00
|
|
|
unsigned long epc = msk_isa16_mode(exception_epc(regs));
|
2008-04-20 15:28:54 +00:00
|
|
|
unsigned int opcode, bcode;
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
2013-12-04 16:39:34 +00:00
|
|
|
mm_segment_t seg;
|
|
|
|
|
|
|
|
seg = get_fs();
|
|
|
|
if (!user_mode(regs))
|
|
|
|
set_fs(KERNEL_DS);
|
2013-03-25 17:15:55 +00:00
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
prev_state = exception_enter();
|
2015-07-28 18:37:43 +00:00
|
|
|
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
|
2013-03-25 17:15:55 +00:00
|
|
|
if (get_isa16_mode(regs->cp0_epc)) {
|
2015-04-03 22:26:27 +00:00
|
|
|
u16 instr[2];
|
|
|
|
|
|
|
|
if (__get_user(instr[0], (u16 __user *)epc))
|
|
|
|
goto out_sigsegv;
|
|
|
|
|
|
|
|
if (!cpu_has_mmips) {
|
2013-12-04 16:20:08 +00:00
|
|
|
/* MIPS16e mode */
|
2015-04-03 22:26:21 +00:00
|
|
|
bcode = (instr[0] >> 5) & 0x3f;
|
2015-04-03 22:26:27 +00:00
|
|
|
} else if (mm_insn_16bit(instr[0])) {
|
|
|
|
/* 16-bit microMIPS BREAK */
|
|
|
|
bcode = instr[0] & 0xf;
|
|
|
|
} else {
|
|
|
|
/* 32-bit microMIPS BREAK */
|
|
|
|
if (__get_user(instr[1], (u16 __user *)(epc + 2)))
|
2013-03-25 17:15:55 +00:00
|
|
|
goto out_sigsegv;
|
2015-04-03 22:26:27 +00:00
|
|
|
opcode = (instr[0] << 16) | instr[1];
|
|
|
|
bcode = (opcode >> 6) & ((1 << 20) - 1);
|
2013-03-25 17:15:55 +00:00
|
|
|
}
|
|
|
|
} else {
|
2015-04-03 22:26:27 +00:00
|
|
|
if (__get_user(opcode, (unsigned int __user *)epc))
|
2013-03-25 17:15:55 +00:00
|
|
|
goto out_sigsegv;
|
2015-04-03 22:26:27 +00:00
|
|
|
bcode = (opcode >> 6) & ((1 << 20) - 1);
|
2013-03-25 17:15:55 +00:00
|
|
|
}
|
2008-04-20 15:28:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There is the ancient bug in the MIPS assemblers that the break
|
|
|
|
* code starts left to bit 16 instead to bit 6 in the opcode.
|
|
|
|
* Gas is bug-compatible, but not always, grrr...
|
|
|
|
* We handle both cases with a simple heuristics. --macro
|
|
|
|
*/
|
|
|
|
if (bcode >= (1 << 10))
|
2015-04-03 22:26:32 +00:00
|
|
|
bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
|
2008-04-20 15:28:54 +00:00
|
|
|
|
2010-08-03 18:22:20 +00:00
|
|
|
/*
|
|
|
|
* notify the kprobe handlers, if instruction is likely to
|
|
|
|
* pertain to them.
|
|
|
|
*/
|
|
|
|
switch (bcode) {
|
2015-07-29 20:44:53 +00:00
|
|
|
case BRK_UPROBE:
|
|
|
|
if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
|
|
|
|
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
|
|
|
|
goto out;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
case BRK_UPROBE_XOL:
|
|
|
|
if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
|
|
|
|
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
|
|
|
|
goto out;
|
|
|
|
else
|
|
|
|
break;
|
2010-08-03 18:22:20 +00:00
|
|
|
case BRK_KPROBE_BP:
|
2013-10-09 06:54:15 +00:00
|
|
|
if (notify_die(DIE_BREAK, "debug", regs, bcode,
|
2015-07-28 18:37:43 +00:00
|
|
|
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
2010-08-03 18:22:20 +00:00
|
|
|
else
|
|
|
|
break;
|
|
|
|
case BRK_KPROBE_SSTEPBP:
|
2013-10-09 06:54:15 +00:00
|
|
|
if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
|
2015-07-28 18:37:43 +00:00
|
|
|
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
2010-08-03 18:22:20 +00:00
|
|
|
else
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-03-04 01:44:28 +00:00
|
|
|
do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
|
2013-05-28 23:07:19 +00:00
|
|
|
|
|
|
|
out:
|
2013-12-04 16:39:34 +00:00
|
|
|
set_fs(seg);
|
2013-05-28 23:07:19 +00:00
|
|
|
exception_exit(prev_state);
|
2007-02-06 07:02:21 +00:00
|
|
|
return;
|
2006-11-30 01:14:47 +00:00
|
|
|
|
|
|
|
out_sigsegv:
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGSEGV);
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void do_tr(struct pt_regs *regs)
|
|
|
|
{
|
2013-05-23 14:31:23 +00:00
|
|
|
u32 opcode, tcode = 0;
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
2013-03-25 17:15:55 +00:00
|
|
|
u16 instr[2];
|
2013-12-04 16:39:34 +00:00
|
|
|
mm_segment_t seg;
|
2013-05-23 14:31:23 +00:00
|
|
|
unsigned long epc = msk_isa16_mode(exception_epc(regs));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-12-04 16:39:34 +00:00
|
|
|
seg = get_fs();
|
|
|
|
if (!user_mode(regs))
|
2019-03-04 18:39:05 +00:00
|
|
|
set_fs(KERNEL_DS);
|
2013-12-04 16:39:34 +00:00
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
prev_state = exception_enter();
|
2015-07-28 18:37:43 +00:00
|
|
|
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
|
2013-05-23 14:31:23 +00:00
|
|
|
if (get_isa16_mode(regs->cp0_epc)) {
|
|
|
|
if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
|
|
|
|
__get_user(instr[1], (u16 __user *)(epc + 2)))
|
2013-03-25 17:15:55 +00:00
|
|
|
goto out_sigsegv;
|
2013-05-23 14:31:23 +00:00
|
|
|
opcode = (instr[0] << 16) | instr[1];
|
|
|
|
/* Immediate versions don't provide a code. */
|
|
|
|
if (!(opcode & OPCODE))
|
|
|
|
tcode = (opcode >> 12) & ((1 << 4) - 1);
|
|
|
|
} else {
|
|
|
|
if (__get_user(opcode, (u32 __user *)epc))
|
|
|
|
goto out_sigsegv;
|
|
|
|
/* Immediate versions don't provide a code. */
|
|
|
|
if (!(opcode & OPCODE))
|
|
|
|
tcode = (opcode >> 6) & ((1 << 10) - 1);
|
2013-03-25 17:15:55 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-03-04 01:44:28 +00:00
|
|
|
do_trap_or_bp(regs, tcode, 0, "Trap");
|
2013-05-28 23:07:19 +00:00
|
|
|
|
|
|
|
out:
|
2013-12-04 16:39:34 +00:00
|
|
|
set_fs(seg);
|
2013-05-28 23:07:19 +00:00
|
|
|
exception_exit(prev_state);
|
2007-02-06 07:02:21 +00:00
|
|
|
return;
|
2006-11-30 01:14:47 +00:00
|
|
|
|
|
|
|
out_sigsegv:
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGSEGV);
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void do_ri(struct pt_regs *regs)
|
|
|
|
{
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
|
|
|
|
unsigned long old_epc = regs->cp0_epc;
|
2013-03-25 17:15:55 +00:00
|
|
|
unsigned long old31 = regs->regs[31];
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
unsigned int opcode = 0;
|
|
|
|
int status = -1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-12-03 15:47:03 +00:00
|
|
|
/*
|
|
|
|
* Avoid any kernel code. Just emulate the R2 instruction
|
|
|
|
* as quickly as possible.
|
|
|
|
*/
|
|
|
|
if (mipsr2_emulation && cpu_has_mips_r6 &&
|
2015-04-03 22:24:51 +00:00
|
|
|
likely(user_mode(regs)) &&
|
|
|
|
likely(get_user(opcode, epc) >= 0)) {
|
2015-04-03 22:27:15 +00:00
|
|
|
unsigned long fcr31 = 0;
|
|
|
|
|
|
|
|
status = mipsr2_decoder(regs, opcode, &fcr31);
|
2015-04-03 22:24:51 +00:00
|
|
|
switch (status) {
|
|
|
|
case 0:
|
|
|
|
case SIGEMT:
|
|
|
|
return;
|
|
|
|
case SIGILL:
|
|
|
|
goto no_r2_instr;
|
|
|
|
default:
|
|
|
|
process_fpemu_return(status,
|
2015-04-03 22:27:15 +00:00
|
|
|
¤t->thread.cp0_baduaddr,
|
|
|
|
fcr31);
|
2015-04-03 22:24:51 +00:00
|
|
|
return;
|
2014-12-03 15:47:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
no_r2_instr:
|
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
prev_state = exception_enter();
|
2015-07-28 18:37:43 +00:00
|
|
|
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
|
2014-12-03 15:47:03 +00:00
|
|
|
|
2015-07-28 18:37:43 +00:00
|
|
|
if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
|
2013-10-09 06:54:15 +00:00
|
|
|
SIGILL) == NOTIFY_STOP)
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
2008-07-29 20:58:53 +00:00
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
die_if_kernel("Reserved instruction in kernel code", regs);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
if (unlikely(compute_return_epc(regs) < 0))
|
2013-05-28 23:07:19 +00:00
|
|
|
goto out;
|
2005-04-13 17:43:59 +00:00
|
|
|
|
2016-01-30 09:08:43 +00:00
|
|
|
if (!get_isa16_mode(regs->cp0_epc)) {
|
2013-03-25 17:15:55 +00:00
|
|
|
if (unlikely(get_user(opcode, epc) < 0))
|
|
|
|
status = SIGSEGV;
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
|
2013-03-25 17:15:55 +00:00
|
|
|
if (!cpu_has_llsc && status < 0)
|
|
|
|
status = simulate_llsc(regs, opcode);
|
|
|
|
|
|
|
|
if (status < 0)
|
|
|
|
status = simulate_rdhwr_normal(regs, opcode);
|
|
|
|
|
|
|
|
if (status < 0)
|
|
|
|
status = simulate_sync(regs, opcode);
|
2014-09-11 07:30:20 +00:00
|
|
|
|
|
|
|
if (status < 0)
|
|
|
|
status = simulate_fp(regs, opcode, old_epc, old31);
|
MIPS: emulate CPUCFG instruction on older Loongson64 cores
CPUCFG is the instruction for querying processor characteristics on
newer Loongson processors, much like CPUID of x86. Since the instruction
is supposedly designed to provide a unified way to do feature detection
(without having to, for example, parse /proc/cpuinfo which is too
heavyweight), it is important to provide compatibility for older cores
without native support. Fortunately, most of the fields can be
synthesized without changes to semantics. Performance is not really big
a concern, because feature detection logic is not expected to be
invoked very often in typical userland applications.
The instruction can't be emulated on LOONGSON_2EF cores, according to
FlyGoat's experiments. Because the LWC2 opcode is assigned to other
valid instructions on 2E and 2F, no RI exception is raised for us to
intercept. So compatibility is only extended back furthest to
Loongson-3A1000. Loongson-2K is covered too, as it is basically a remix
of various blocks from the 3A/3B models from a kernel perspective.
This is lightly based on Loongson's work on their Linux 3.10 fork, for
being the authority on the right feature flags to fill in, where things
aren't otherwise discoverable.
Signed-off-by: WANG Xuerui <git@xen0n.name>
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
2020-05-23 13:37:01 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
|
|
|
|
if (status < 0)
|
|
|
|
status = simulate_loongson3_cpucfg(regs, opcode);
|
|
|
|
#endif
|
2016-01-30 09:08:43 +00:00
|
|
|
} else if (cpu_has_mmips) {
|
|
|
|
unsigned short mmop[2] = { 0 };
|
|
|
|
|
|
|
|
if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
|
|
|
|
status = SIGSEGV;
|
|
|
|
if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
|
|
|
|
status = SIGSEGV;
|
|
|
|
opcode = mmop[0];
|
|
|
|
opcode = (opcode << 16) | mmop[1];
|
|
|
|
|
|
|
|
if (status < 0)
|
|
|
|
status = simulate_rdhwr_mm(regs, opcode);
|
2013-03-25 17:15:55 +00:00
|
|
|
}
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
|
|
|
|
if (status < 0)
|
|
|
|
status = SIGILL;
|
|
|
|
|
|
|
|
if (unlikely(status > 0)) {
|
|
|
|
regs->cp0_epc = old_epc; /* Undo skip-over. */
|
2013-03-25 17:15:55 +00:00
|
|
|
regs->regs[31] = old31;
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(status);
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
}
|
2013-05-28 23:07:19 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
exception_exit(prev_state);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-11-24 01:24:58 +00:00
|
|
|
/*
|
|
|
|
* No lock; only written during early bootup by CPU 0.
|
|
|
|
*/
|
|
|
|
static RAW_NOTIFIER_HEAD(cu2_chain);
|
|
|
|
|
|
|
|
int __ref register_cu2_notifier(struct notifier_block *nb)
|
|
|
|
{
|
|
|
|
return raw_notifier_chain_register(&cu2_chain, nb);
|
|
|
|
}
|
|
|
|
|
|
|
|
int cu2_notifier_call_chain(unsigned long val, void *v)
|
|
|
|
{
|
|
|
|
return raw_notifier_call_chain(&cu2_chain, val, v);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
|
2013-01-22 11:59:30 +00:00
|
|
|
void *data)
|
2009-11-24 01:24:58 +00:00
|
|
|
{
|
|
|
|
struct pt_regs *regs = data;
|
|
|
|
|
2013-06-10 06:30:01 +00:00
|
|
|
die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
|
2009-11-24 01:24:58 +00:00
|
|
|
"instruction", regs);
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGILL);
|
2009-11-24 01:24:58 +00:00
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
2018-11-07 23:14:05 +00:00
|
|
|
#ifdef CONFIG_MIPS_FP_SUPPORT
|
|
|
|
|
2014-01-27 15:23:11 +00:00
|
|
|
static int enable_restore_fp_context(int msa)
|
|
|
|
{
|
2014-07-30 07:53:20 +00:00
|
|
|
int err, was_fpu_owner, prior_msa;
|
2018-11-07 23:13:59 +00:00
|
|
|
bool first_fp;
|
2014-01-27 15:23:11 +00:00
|
|
|
|
2018-11-07 23:13:59 +00:00
|
|
|
/* Initialize context if it hasn't been used already */
|
|
|
|
first_fp = init_fp_ctx(current);
|
2014-01-27 15:23:11 +00:00
|
|
|
|
2018-11-07 23:13:59 +00:00
|
|
|
if (first_fp) {
|
2014-07-11 15:44:35 +00:00
|
|
|
preempt_disable();
|
2018-11-07 23:13:59 +00:00
|
|
|
err = own_fpu_inatomic(1);
|
2014-07-30 07:53:20 +00:00
|
|
|
if (msa && !err) {
|
2014-01-27 15:23:11 +00:00
|
|
|
enable_msa();
|
2014-07-31 13:53:16 +00:00
|
|
|
set_thread_flag(TIF_USEDMSA);
|
|
|
|
set_thread_flag(TIF_MSA_CTX_LIVE);
|
2014-07-30 07:53:20 +00:00
|
|
|
}
|
2014-07-11 15:44:35 +00:00
|
|
|
preempt_enable();
|
2014-01-27 15:23:11 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This task has formerly used the FP context.
|
|
|
|
*
|
|
|
|
* If this thread has no live MSA vector context then we can simply
|
|
|
|
* restore the scalar FP context. If it has live MSA vector context
|
|
|
|
* (that is, it has or may have used MSA since last performing a
|
|
|
|
* function call) then we'll need to restore the vector context. This
|
|
|
|
* applies even if we're currently only executing a scalar FP
|
|
|
|
* instruction. This is because if we were to later execute an MSA
|
|
|
|
* instruction then we'd either have to:
|
|
|
|
*
|
|
|
|
* - Restore the vector context & clobber any registers modified by
|
|
|
|
* scalar FP instructions between now & then.
|
|
|
|
*
|
|
|
|
* or
|
|
|
|
*
|
|
|
|
* - Not restore the vector context & lose the most significant bits
|
|
|
|
* of all vector registers.
|
|
|
|
*
|
|
|
|
* Neither of those options is acceptable. We cannot restore the least
|
|
|
|
* significant bits of the registers now & only restore the most
|
|
|
|
* significant bits later because the most significant bits of any
|
|
|
|
* vector registers whose aliased FP register is modified now will have
|
|
|
|
* been zeroed. We'd have no way to know that when restoring the vector
|
|
|
|
* context & thus may load an outdated value for the most significant
|
|
|
|
* bits of a vector register.
|
|
|
|
*/
|
|
|
|
if (!msa && !thread_msa_context_live())
|
|
|
|
return own_fpu(1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This task is using or has previously used MSA. Thus we require
|
|
|
|
* that Status.FR == 1.
|
|
|
|
*/
|
2014-07-11 15:44:35 +00:00
|
|
|
preempt_disable();
|
2014-01-27 15:23:11 +00:00
|
|
|
was_fpu_owner = is_fpu_owner();
|
2014-07-11 15:44:35 +00:00
|
|
|
err = own_fpu_inatomic(0);
|
2014-01-27 15:23:11 +00:00
|
|
|
if (err)
|
2014-07-11 15:44:35 +00:00
|
|
|
goto out;
|
2014-01-27 15:23:11 +00:00
|
|
|
|
|
|
|
enable_msa();
|
|
|
|
write_msa_csr(current->thread.fpu.msacsr);
|
|
|
|
set_thread_flag(TIF_USEDMSA);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is the first time that the task is using MSA and it has
|
|
|
|
* previously used scalar FP in this time slice then we already nave
|
2014-07-30 07:53:20 +00:00
|
|
|
* FP context which we shouldn't clobber. We do however need to clear
|
|
|
|
* the upper 64b of each vector register so that this task has no
|
|
|
|
* opportunity to see data left behind by another.
|
2014-01-27 15:23:11 +00:00
|
|
|
*/
|
2014-07-30 07:53:20 +00:00
|
|
|
prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
|
|
|
|
if (!prior_msa && was_fpu_owner) {
|
2016-05-17 05:12:27 +00:00
|
|
|
init_msa_upper();
|
2014-07-11 15:44:35 +00:00
|
|
|
|
|
|
|
goto out;
|
2014-07-30 07:53:20 +00:00
|
|
|
}
|
2014-01-27 15:23:11 +00:00
|
|
|
|
2014-07-30 07:53:20 +00:00
|
|
|
if (!prior_msa) {
|
|
|
|
/*
|
|
|
|
* Restore the least significant 64b of each vector register
|
|
|
|
* from the existing scalar FP context.
|
|
|
|
*/
|
|
|
|
_restore_fp(current);
|
2014-07-11 15:44:29 +00:00
|
|
|
|
2014-07-30 07:53:20 +00:00
|
|
|
/*
|
|
|
|
* The task has not formerly used MSA, so clear the upper 64b
|
|
|
|
* of each vector register such that it cannot see data left
|
|
|
|
* behind by another task.
|
|
|
|
*/
|
2016-05-17 05:12:27 +00:00
|
|
|
init_msa_upper();
|
2014-07-30 07:53:20 +00:00
|
|
|
} else {
|
|
|
|
/* We need to restore the vector context. */
|
|
|
|
restore_msa(current);
|
2014-07-11 15:44:29 +00:00
|
|
|
|
2014-07-30 07:53:20 +00:00
|
|
|
/* Restore the scalar FP control & status register */
|
|
|
|
if (!was_fpu_owner)
|
2015-01-30 15:40:20 +00:00
|
|
|
write_32bit_cp1_register(CP1_STATUS,
|
|
|
|
current->thread.fpu.fcr31);
|
2014-07-30 07:53:20 +00:00
|
|
|
}
|
2014-07-11 15:44:35 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
preempt_enable();
|
|
|
|
|
2014-01-27 15:23:11 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-07 23:14:05 +00:00
|
|
|
#else /* !CONFIG_MIPS_FP_SUPPORT */
|
|
|
|
|
|
|
|
static int enable_restore_fp_context(int msa)
|
|
|
|
{
|
|
|
|
return SIGILL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_MIPS_FP_SUPPORT */
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
asmlinkage void do_cpu(struct pt_regs *regs)
|
|
|
|
{
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
unsigned int __user *epc;
|
2013-03-25 17:15:55 +00:00
|
|
|
unsigned long old_epc, old31;
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
unsigned int opcode;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned int cpid;
|
2018-11-07 23:14:05 +00:00
|
|
|
int status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
prev_state = exception_enter();
|
2005-04-16 22:20:36 +00:00
|
|
|
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
|
|
|
|
|
2013-06-10 06:30:01 +00:00
|
|
|
if (cpid != 2)
|
|
|
|
die_if_kernel("do_cpu invoked from kernel context!", regs);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (cpid) {
|
|
|
|
case 0:
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
epc = (unsigned int __user *)exception_epc(regs);
|
|
|
|
old_epc = regs->cp0_epc;
|
2013-03-25 17:15:55 +00:00
|
|
|
old31 = regs->regs[31];
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
opcode = 0;
|
|
|
|
status = -1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
if (unlikely(compute_return_epc(regs) < 0))
|
2015-04-03 22:25:08 +00:00
|
|
|
break;
|
2005-04-13 17:43:59 +00:00
|
|
|
|
2016-01-30 09:08:16 +00:00
|
|
|
if (!get_isa16_mode(regs->cp0_epc)) {
|
2013-03-25 17:15:55 +00:00
|
|
|
if (unlikely(get_user(opcode, epc) < 0))
|
|
|
|
status = SIGSEGV;
|
|
|
|
|
|
|
|
if (!cpu_has_llsc && status < 0)
|
|
|
|
status = simulate_llsc(regs, opcode);
|
|
|
|
}
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
|
|
|
|
if (status < 0)
|
|
|
|
status = SIGILL;
|
|
|
|
|
|
|
|
if (unlikely(status > 0)) {
|
|
|
|
regs->cp0_epc = old_epc; /* Undo skip-over. */
|
2013-03-25 17:15:55 +00:00
|
|
|
regs->regs[31] = old31;
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(status);
|
[MIPS] SYNC emulation for MIPS I processors
Userland, including the C library and the dynamic linker, is keen to use
the SYNC instruction, even for "generic" MIPS I binaries these days.
Which makes it less than useful on MIPS I processors.
This change adds the emulation, but as our do_ri() infrastructure was not
really prepared to take yet another instruction, I have rewritten it and
its callees slightly as follows.
Now there is only a single place a possible signal is thrown from. The
place is at the end of do_ri(). The instruction word is fetched in
do_ri() and passed down to handlers. The handlers are called in sequence
and return a result that lets the caller decide upon further processing.
If the result is positive, then the handler has picked the instruction,
but a signal should be thrown and the result is the signal number. If the
result is zero, then the handler has successfully simulated the
instruction. If the result is negative, then the handler did not handle
the instruction; to make it more obvious the calls do not follow the usual
0/-Exxx result convention they now return -1 instead of -EFAULT.
The calculation of the return EPC is now at the beginning. The reason is
it is easier to handle it there as emulation callees may modify a register
and an instruction may be located in delay slot of a branch whose result
depends on the register. It has to be undone if a signal is to be raised,
but it is not a problem as this is the slow-path case, and both actions
are done in single places now rather than the former being scattered
through emulation handlers.
The part of do_cpu() being covered follows the changes to do_ri().
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
---
2007-10-16 17:43:26 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 22:25:08 +00:00
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-11-07 23:14:05 +00:00
|
|
|
#ifdef CONFIG_MIPS_FP_SUPPORT
|
2012-03-06 20:28:54 +00:00
|
|
|
case 3:
|
|
|
|
/*
|
MIPS: Correct FP ISA requirements
Correct ISA requirements for floating-point instructions:
* the CU3 exception signifies a real COP3 instruction in MIPS I & II,
* the BC1FL and BC1TL instructions are not supported in MIPS I,
* the SQRT.fmt instructions are indeed supported in MIPS II,
* the LDC1 and SDC1 instructions are indeed supported in MIPS32r1,
* the CEIL.W.fmt, FLOOR.W.fmt, ROUND.W.fmt and TRUNC.W.fmt instructions
are indeed supported in MIPS32,
* the CVT.L.fmt and CVT.fmt.L instructions are indeed supported in
MIPS32r2 and MIPS32r6,
* the CEIL.L.fmt, FLOOR.L.fmt, ROUND.L.fmt and TRUNC.L.fmt instructions
are indeed supported in MIPS32r2 and MIPS32r6,
* the RSQRT.fmt and RECIP.fmt instructions are indeed supported in
MIPS64r1,
Also simplify conditionals for MIPS III and MIPS IV FPU instructions and
the handling of the MOVCI minor opcode.
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/9700/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2015-04-03 22:26:49 +00:00
|
|
|
* The COP3 opcode space and consequently the CP0.Status.CU3
|
|
|
|
* bit and the CP0.Cause.CE=3 encoding have been removed as
|
|
|
|
* of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
|
|
|
|
* up the space has been reused for COP1X instructions, that
|
|
|
|
* are enabled by the CP0.Status.CU1 bit and consequently
|
|
|
|
* use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
|
|
|
|
* exceptions. Some FPU-less processors that implement one
|
|
|
|
* of these ISAs however use this code erroneously for COP1X
|
|
|
|
* instructions. Therefore we redirect this trap to the FP
|
|
|
|
* emulator too.
|
2012-03-06 20:28:54 +00:00
|
|
|
*/
|
MIPS: Correct FP ISA requirements
Correct ISA requirements for floating-point instructions:
* the CU3 exception signifies a real COP3 instruction in MIPS I & II,
* the BC1FL and BC1TL instructions are not supported in MIPS I,
* the SQRT.fmt instructions are indeed supported in MIPS II,
* the LDC1 and SDC1 instructions are indeed supported in MIPS32r1,
* the CEIL.W.fmt, FLOOR.W.fmt, ROUND.W.fmt and TRUNC.W.fmt instructions
are indeed supported in MIPS32,
* the CVT.L.fmt and CVT.fmt.L instructions are indeed supported in
MIPS32r2 and MIPS32r6,
* the CEIL.L.fmt, FLOOR.L.fmt, ROUND.L.fmt and TRUNC.L.fmt instructions
are indeed supported in MIPS32r2 and MIPS32r6,
* the RSQRT.fmt and RECIP.fmt instructions are indeed supported in
MIPS64r1,
Also simplify conditionals for MIPS III and MIPS IV FPU instructions and
the handling of the MOVCI minor opcode.
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/9700/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2015-04-03 22:26:49 +00:00
|
|
|
if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGILL);
|
2012-03-06 20:28:54 +00:00
|
|
|
break;
|
2015-04-03 22:25:08 +00:00
|
|
|
}
|
2020-05-04 08:51:29 +00:00
|
|
|
fallthrough;
|
2018-11-07 23:14:05 +00:00
|
|
|
case 1: {
|
|
|
|
void __user *fault_addr;
|
|
|
|
unsigned long fcr31;
|
|
|
|
int err, sig;
|
|
|
|
|
2014-01-27 15:23:11 +00:00
|
|
|
err = enable_restore_fp_context(0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-04-03 22:27:15 +00:00
|
|
|
if (raw_cpu_has_fpu && !err)
|
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-04-03 22:27:15 +00:00
|
|
|
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
|
|
|
|
&fault_addr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can't allow the emulated instruction to leave
|
2016-10-28 07:21:03 +00:00
|
|
|
* any enabled Cause bits set in $fcr31.
|
2015-04-03 22:27:15 +00:00
|
|
|
*/
|
2016-10-28 07:21:03 +00:00
|
|
|
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
|
|
|
|
current->thread.fpu.fcr31 &= ~fcr31;
|
2015-04-03 22:27:15 +00:00
|
|
|
|
|
|
|
/* Send a signal if required. */
|
|
|
|
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
|
|
|
|
mt_ase_fp_affinity();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-04-03 22:25:08 +00:00
|
|
|
break;
|
2018-11-07 23:14:05 +00:00
|
|
|
}
|
|
|
|
#else /* CONFIG_MIPS_FP_SUPPORT */
|
|
|
|
case 1:
|
|
|
|
case 3:
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGILL);
|
2018-11-07 23:14:05 +00:00
|
|
|
break;
|
|
|
|
#endif /* CONFIG_MIPS_FP_SUPPORT */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
case 2:
|
2009-11-24 01:24:58 +00:00
|
|
|
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
|
2015-04-03 22:25:08 +00:00
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
exception_exit(prev_state);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 13:44:13 +00:00
|
|
|
asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
|
2014-01-27 15:23:12 +00:00
|
|
|
{
|
|
|
|
enum ctx_state prev_state;
|
|
|
|
|
|
|
|
prev_state = exception_enter();
|
2015-07-28 18:37:43 +00:00
|
|
|
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
|
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 13:44:13 +00:00
|
|
|
if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
|
2015-07-28 18:37:43 +00:00
|
|
|
current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
|
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 13:44:13 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Clear MSACSR.Cause before enabling interrupts */
|
|
|
|
write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
|
|
|
|
local_irq_enable();
|
|
|
|
|
2014-01-27 15:23:12 +00:00
|
|
|
die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGFPE);
|
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 13:44:13 +00:00
|
|
|
out:
|
2014-01-27 15:23:12 +00:00
|
|
|
exception_exit(prev_state);
|
|
|
|
}
|
|
|
|
|
2014-01-27 15:23:11 +00:00
|
|
|
asmlinkage void do_msa(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
enum ctx_state prev_state;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
prev_state = exception_enter();
|
|
|
|
|
|
|
|
if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGILL);
|
2014-01-27 15:23:11 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
die_if_kernel("do_msa invoked from kernel context!", regs);
|
|
|
|
|
|
|
|
err = enable_restore_fp_context(1);
|
|
|
|
if (err)
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGILL);
|
2014-01-27 15:23:11 +00:00
|
|
|
out:
|
|
|
|
exception_exit(prev_state);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
asmlinkage void do_mdmx(struct pt_regs *regs)
|
|
|
|
{
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
|
|
|
|
|
|
|
prev_state = exception_enter();
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGILL);
|
2013-05-28 23:07:19 +00:00
|
|
|
exception_exit(prev_state);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-01-05 23:29:58 +00:00
|
|
|
/*
|
|
|
|
* Called with interrupts disabled.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
asmlinkage void do_watch(struct pt_regs *regs)
|
|
|
|
{
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
2008-09-23 07:08:45 +00:00
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
prev_state = exception_enter();
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2008-09-23 07:08:45 +00:00
|
|
|
* Clear WP (bit 22) bit of cause register so we don't loop
|
|
|
|
* forever.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2016-03-01 22:19:38 +00:00
|
|
|
clear_c0_cause(CAUSEF_WP);
|
2008-09-23 07:08:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the current thread has the watch registers loaded, save
|
|
|
|
* their values and send SIGTRAP. Otherwise another thread
|
|
|
|
* left the registers set, clear them and continue.
|
|
|
|
*/
|
|
|
|
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
|
|
|
|
mips_read_watch_registers();
|
2009-01-05 23:29:58 +00:00
|
|
|
local_irq_enable();
|
2019-05-23 16:04:24 +00:00
|
|
|
force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
|
2009-01-05 23:29:58 +00:00
|
|
|
} else {
|
2008-09-23 07:08:45 +00:00
|
|
|
mips_clear_watch_registers();
|
2009-01-05 23:29:58 +00:00
|
|
|
local_irq_enable();
|
|
|
|
}
|
2013-05-28 23:07:19 +00:00
|
|
|
exception_exit(prev_state);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void do_mcheck(struct pt_regs *regs)
|
|
|
|
{
|
2006-05-24 15:51:02 +00:00
|
|
|
int multi_match = regs->cp0_status & ST0_TS;
|
2013-05-28 23:07:19 +00:00
|
|
|
enum ctx_state prev_state;
|
2015-07-27 12:50:21 +00:00
|
|
|
mm_segment_t old_fs = get_fs();
|
2006-05-24 15:51:02 +00:00
|
|
|
|
2013-05-28 23:07:19 +00:00
|
|
|
prev_state = exception_enter();
|
2005-04-16 22:20:36 +00:00
|
|
|
show_regs(regs);
|
2006-05-24 15:51:02 +00:00
|
|
|
|
|
|
|
if (multi_match) {
|
2015-07-15 15:17:43 +00:00
|
|
|
dump_tlb_regs();
|
|
|
|
pr_info("\n");
|
2006-05-24 15:51:02 +00:00
|
|
|
dump_tlb_all();
|
|
|
|
}
|
|
|
|
|
2015-07-27 12:50:21 +00:00
|
|
|
if (!user_mode(regs))
|
|
|
|
set_fs(KERNEL_DS);
|
|
|
|
|
2007-07-13 14:51:46 +00:00
|
|
|
show_code((unsigned int __user *) regs->cp0_epc);
|
2006-05-24 15:51:02 +00:00
|
|
|
|
2015-07-27 12:50:21 +00:00
|
|
|
set_fs(old_fs);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Some chips may have other causes of machine check (e.g. SB1
|
|
|
|
* graduation timer)
|
|
|
|
*/
|
|
|
|
panic("Caught Machine Check exception - %scaused by multiple "
|
|
|
|
"matching entries in the TLB.",
|
2006-05-24 15:51:02 +00:00
|
|
|
(multi_match) ? "" : "not ");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-08-17 17:44:08 +00:00
|
|
|
asmlinkage void do_mt(struct pt_regs *regs)
|
|
|
|
{
|
2006-04-05 08:45:45 +00:00
|
|
|
int subcode;
|
|
|
|
|
|
|
|
subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
|
|
|
|
>> VPECONTROL_EXCPT_SHIFT;
|
|
|
|
switch (subcode) {
|
|
|
|
case 0:
|
2006-06-30 13:19:45 +00:00
|
|
|
printk(KERN_DEBUG "Thread Underflow\n");
|
2006-04-05 08:45:45 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2006-06-30 13:19:45 +00:00
|
|
|
printk(KERN_DEBUG "Thread Overflow\n");
|
2006-04-05 08:45:45 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2006-06-30 13:19:45 +00:00
|
|
|
printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
|
2006-04-05 08:45:45 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2006-06-30 13:19:45 +00:00
|
|
|
printk(KERN_DEBUG "Gating Storage Exception\n");
|
2006-04-05 08:45:45 +00:00
|
|
|
break;
|
|
|
|
case 4:
|
2006-06-30 13:19:45 +00:00
|
|
|
printk(KERN_DEBUG "YIELD Scheduler Exception\n");
|
2006-04-05 08:45:45 +00:00
|
|
|
break;
|
|
|
|
case 5:
|
2012-02-08 12:53:14 +00:00
|
|
|
printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
|
2006-04-05 08:45:45 +00:00
|
|
|
break;
|
|
|
|
default:
|
2006-06-30 13:19:45 +00:00
|
|
|
printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
|
2006-04-05 08:45:45 +00:00
|
|
|
subcode);
|
|
|
|
break;
|
|
|
|
}
|
2005-08-17 17:44:08 +00:00
|
|
|
die_if_kernel("MIPS MT Thread exception in kernel", regs);
|
|
|
|
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGILL);
|
2005-08-17 17:44:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-05-31 11:49:19 +00:00
|
|
|
asmlinkage void do_dsp(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
if (cpu_has_dsp)
|
2011-11-17 15:07:31 +00:00
|
|
|
panic("Unexpected DSP exception");
|
2005-05-31 11:49:19 +00:00
|
|
|
|
2019-05-23 15:17:27 +00:00
|
|
|
force_sig(SIGILL);
|
2005-05-31 11:49:19 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
asmlinkage void do_reserved(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
/*
|
2013-01-22 11:59:30 +00:00
|
|
|
* Game over - no way to handle this if it ever occurs. Most probably
|
2005-04-16 22:20:36 +00:00
|
|
|
* caused by a new unknown cpu type or after another deadly
|
|
|
|
* hard/software error.
|
|
|
|
*/
|
|
|
|
show_regs(regs);
|
|
|
|
panic("Caught reserved exception %ld - should not happen.",
|
|
|
|
(regs->cp0_cause & 0x7f) >> 2);
|
|
|
|
}
|
|
|
|
|
2008-04-28 16:14:26 +00:00
|
|
|
static int __initdata l1parity = 1;
|
|
|
|
static int __init nol1parity(char *s)
|
|
|
|
{
|
|
|
|
l1parity = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("nol1par", nol1parity);
|
|
|
|
static int __initdata l2parity = 1;
|
|
|
|
static int __init nol2parity(char *s)
|
|
|
|
{
|
|
|
|
l2parity = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("nol2par", nol2parity);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Some MIPS CPUs can enable/disable for cache parity detection, but do
|
|
|
|
* it different ways.
|
|
|
|
*/
|
|
|
|
static inline void parity_protection_init(void)
|
|
|
|
{
|
2016-10-17 15:01:07 +00:00
|
|
|
#define ERRCTL_PE 0x80000000
|
|
|
|
#define ERRCTL_L2P 0x00800000
|
|
|
|
|
|
|
|
if (mips_cm_revision() >= CM_REV_CM3) {
|
|
|
|
ulong gcr_ectl, cp0_ectl;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* With CM3 systems we need to ensure that the L1 & L2
|
|
|
|
* parity enables are set to the same value, since this
|
|
|
|
* is presumed by the hardware engineers.
|
|
|
|
*
|
|
|
|
* If the user disabled either of L1 or L2 ECC checking,
|
|
|
|
* disable both.
|
|
|
|
*/
|
|
|
|
l1parity &= l2parity;
|
|
|
|
l2parity &= l1parity;
|
|
|
|
|
|
|
|
/* Probe L1 ECC support */
|
|
|
|
cp0_ectl = read_c0_ecc();
|
|
|
|
write_c0_ecc(cp0_ectl | ERRCTL_PE);
|
|
|
|
back_to_back_c0_hazard();
|
|
|
|
cp0_ectl = read_c0_ecc();
|
|
|
|
|
|
|
|
/* Probe L2 ECC support */
|
|
|
|
gcr_ectl = read_gcr_err_control();
|
|
|
|
|
2017-08-13 02:49:27 +00:00
|
|
|
if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
|
2016-10-17 15:01:07 +00:00
|
|
|
!(cp0_ectl & ERRCTL_PE)) {
|
|
|
|
/*
|
|
|
|
* One of L1 or L2 ECC checking isn't supported,
|
|
|
|
* so we cannot enable either.
|
|
|
|
*/
|
|
|
|
l1parity = l2parity = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Configure L1 ECC checking */
|
|
|
|
if (l1parity)
|
|
|
|
cp0_ectl |= ERRCTL_PE;
|
|
|
|
else
|
|
|
|
cp0_ectl &= ~ERRCTL_PE;
|
|
|
|
write_c0_ecc(cp0_ectl);
|
|
|
|
back_to_back_c0_hazard();
|
|
|
|
WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
|
|
|
|
|
|
|
|
/* Configure L2 ECC checking */
|
|
|
|
if (l2parity)
|
2017-08-13 02:49:27 +00:00
|
|
|
gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
|
2016-10-17 15:01:07 +00:00
|
|
|
else
|
2017-08-13 02:49:27 +00:00
|
|
|
gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
|
2016-10-17 15:01:07 +00:00
|
|
|
write_gcr_err_control(gcr_ectl);
|
|
|
|
gcr_ectl = read_gcr_err_control();
|
2017-08-13 02:49:27 +00:00
|
|
|
gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
|
2016-10-17 15:01:07 +00:00
|
|
|
WARN_ON(!!gcr_ectl != l2parity);
|
|
|
|
|
|
|
|
pr_info("Cache parity protection %sabled\n",
|
|
|
|
l1parity ? "en" : "dis");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-10-11 22:46:15 +00:00
|
|
|
switch (current_cpu_type()) {
|
2005-04-16 22:20:36 +00:00
|
|
|
case CPU_24K:
|
2006-04-27 14:50:32 +00:00
|
|
|
case CPU_34K:
|
2008-04-28 16:14:26 +00:00
|
|
|
case CPU_74K:
|
|
|
|
case CPU_1004K:
|
2014-01-17 21:03:50 +00:00
|
|
|
case CPU_1074K:
|
2013-11-27 10:07:53 +00:00
|
|
|
case CPU_INTERAPTIV:
|
2013-11-14 16:12:27 +00:00
|
|
|
case CPU_PROAPTIV:
|
2014-01-22 16:19:38 +00:00
|
|
|
case CPU_P5600:
|
2014-11-24 12:59:01 +00:00
|
|
|
case CPU_QEMU_GENERIC:
|
2016-02-03 03:26:38 +00:00
|
|
|
case CPU_P6600:
|
2008-04-28 16:14:26 +00:00
|
|
|
{
|
|
|
|
unsigned long errctl;
|
|
|
|
unsigned int l1parity_present, l2parity_present;
|
|
|
|
|
|
|
|
errctl = read_c0_ecc();
|
|
|
|
errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
|
|
|
|
|
|
|
|
/* probe L1 parity support */
|
|
|
|
write_c0_ecc(errctl | ERRCTL_PE);
|
|
|
|
back_to_back_c0_hazard();
|
|
|
|
l1parity_present = (read_c0_ecc() & ERRCTL_PE);
|
|
|
|
|
|
|
|
/* probe L2 parity support */
|
|
|
|
write_c0_ecc(errctl|ERRCTL_L2P);
|
|
|
|
back_to_back_c0_hazard();
|
|
|
|
l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
|
|
|
|
|
|
|
|
if (l1parity_present && l2parity_present) {
|
|
|
|
if (l1parity)
|
|
|
|
errctl |= ERRCTL_PE;
|
|
|
|
if (l1parity ^ l2parity)
|
|
|
|
errctl |= ERRCTL_L2P;
|
|
|
|
} else if (l1parity_present) {
|
|
|
|
if (l1parity)
|
|
|
|
errctl |= ERRCTL_PE;
|
|
|
|
} else if (l2parity_present) {
|
|
|
|
if (l2parity)
|
|
|
|
errctl |= ERRCTL_L2P;
|
|
|
|
} else {
|
|
|
|
/* No parity available */
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
|
|
|
|
|
|
|
|
write_c0_ecc(errctl);
|
|
|
|
back_to_back_c0_hazard();
|
|
|
|
errctl = read_c0_ecc();
|
|
|
|
printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
|
|
|
|
|
|
|
|
if (l1parity_present)
|
|
|
|
printk(KERN_INFO "Cache parity protection %sabled\n",
|
|
|
|
(errctl & ERRCTL_PE) ? "en" : "dis");
|
|
|
|
|
|
|
|
if (l2parity_present) {
|
|
|
|
if (l1parity_present && l1parity)
|
|
|
|
errctl ^= ERRCTL_L2P;
|
|
|
|
printk(KERN_INFO "L2 cache parity protection %sabled\n",
|
|
|
|
(errctl & ERRCTL_L2P) ? "en" : "dis");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case CPU_5KC:
|
2012-07-06 19:56:01 +00:00
|
|
|
case CPU_5KE:
|
2019-11-04 06:11:20 +00:00
|
|
|
case CPU_LOONGSON32:
|
2005-03-01 18:15:08 +00:00
|
|
|
write_c0_ecc(0x80000000);
|
|
|
|
back_to_back_c0_hazard();
|
|
|
|
/* Set the PE bit (bit 31) in the c0_errctl register. */
|
|
|
|
printk(KERN_INFO "Cache parity protection %sabled\n",
|
|
|
|
(read_c0_ecc() & 0x80000000) ? "en" : "dis");
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
case CPU_20KC:
|
|
|
|
case CPU_25KF:
|
|
|
|
/* Clear the DE bit (bit 16) in the c0_status register. */
|
|
|
|
printk(KERN_INFO "Enable cache parity protection for "
|
|
|
|
"MIPS 20KC/25KF CPUs.\n");
|
|
|
|
clear_c0_status(ST0_DE);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void cache_parity_error(void)
|
|
|
|
{
|
|
|
|
const int field = 2 * sizeof(unsigned long);
|
|
|
|
unsigned int reg_val;
|
|
|
|
|
|
|
|
/* For the moment, report the problem and hang. */
|
|
|
|
printk("Cache error exception:\n");
|
|
|
|
printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
|
|
|
|
reg_val = read_c0_cacheerr();
|
|
|
|
printk("c0_cacheerr == %08x\n", reg_val);
|
|
|
|
|
|
|
|
printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
|
|
|
|
reg_val & (1<<30) ? "secondary" : "primary",
|
|
|
|
reg_val & (1<<31) ? "data" : "insn");
|
2014-11-14 11:25:30 +00:00
|
|
|
if ((cpu_has_mips_r2_r6) &&
|
2014-05-21 11:35:00 +00:00
|
|
|
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
|
2013-10-10 08:58:59 +00:00
|
|
|
pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
|
|
|
|
reg_val & (1<<29) ? "ED " : "",
|
|
|
|
reg_val & (1<<28) ? "ET " : "",
|
|
|
|
reg_val & (1<<27) ? "ES " : "",
|
|
|
|
reg_val & (1<<26) ? "EE " : "",
|
|
|
|
reg_val & (1<<25) ? "EB " : "",
|
|
|
|
reg_val & (1<<24) ? "EI " : "",
|
|
|
|
reg_val & (1<<23) ? "E1 " : "",
|
|
|
|
reg_val & (1<<22) ? "E0 " : "");
|
|
|
|
} else {
|
|
|
|
pr_err("Error bits: %s%s%s%s%s%s%s\n",
|
|
|
|
reg_val & (1<<29) ? "ED " : "",
|
|
|
|
reg_val & (1<<28) ? "ET " : "",
|
|
|
|
reg_val & (1<<26) ? "EE " : "",
|
|
|
|
reg_val & (1<<25) ? "EB " : "",
|
|
|
|
reg_val & (1<<24) ? "EI " : "",
|
|
|
|
reg_val & (1<<23) ? "E1 " : "",
|
|
|
|
reg_val & (1<<22) ? "E0 " : "");
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
|
|
|
|
|
2005-10-07 15:58:15 +00:00
|
|
|
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
|
2005-04-16 22:20:36 +00:00
|
|
|
if (reg_val & (1<<22))
|
|
|
|
printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
|
|
|
|
|
|
|
|
if (reg_val & (1<<23))
|
|
|
|
printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
|
|
|
|
#endif
|
|
|
|
|
|
|
|
panic("Can't handle the cache error!");
|
|
|
|
}
|
|
|
|
|
2013-11-14 16:12:31 +00:00
|
|
|
asmlinkage void do_ftlb(void)
|
|
|
|
{
|
|
|
|
const int field = 2 * sizeof(unsigned long);
|
|
|
|
unsigned int reg_val;
|
|
|
|
|
|
|
|
/* For the moment, report the problem and hang. */
|
2014-11-14 11:25:30 +00:00
|
|
|
if ((cpu_has_mips_r2_r6) &&
|
2016-03-03 01:45:09 +00:00
|
|
|
(((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
|
|
|
|
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
|
2013-11-14 16:12:31 +00:00
|
|
|
pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
|
|
|
|
read_c0_ecc());
|
|
|
|
pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
|
|
|
|
reg_val = read_c0_cacheerr();
|
|
|
|
pr_err("c0_cacheerr == %08x\n", reg_val);
|
|
|
|
|
|
|
|
if ((reg_val & 0xc0000000) == 0xc0000000) {
|
|
|
|
pr_err("Decoded c0_cacheerr: FTLB parity error\n");
|
|
|
|
} else {
|
|
|
|
pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
|
|
|
|
reg_val & (1<<30) ? "secondary" : "primary",
|
|
|
|
reg_val & (1<<31) ? "data" : "insn");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pr_err("FTLB error exception\n");
|
|
|
|
}
|
|
|
|
/* Just print the cacheerr bits for now */
|
|
|
|
cache_parity_error();
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* SDBBP EJTAG debug exception handler.
|
|
|
|
* We skip the instruction and return to the next instruction.
|
|
|
|
*/
|
|
|
|
void ejtag_exception_handler(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
const int field = 2 * sizeof(unsigned long);
|
2013-03-25 17:15:55 +00:00
|
|
|
unsigned long depc, old_epc, old_ra;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned int debug;
|
|
|
|
|
2006-06-30 11:32:37 +00:00
|
|
|
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
depc = read_c0_depc();
|
|
|
|
debug = read_c0_debug();
|
2006-06-30 11:32:37 +00:00
|
|
|
printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (debug & 0x80000000) {
|
|
|
|
/*
|
|
|
|
* In branch delay slot.
|
|
|
|
* We cheat a little bit here and use EPC to calculate the
|
|
|
|
* debug return address (DEPC). EPC is restored after the
|
|
|
|
* calculation.
|
|
|
|
*/
|
|
|
|
old_epc = regs->cp0_epc;
|
2013-03-25 17:15:55 +00:00
|
|
|
old_ra = regs->regs[31];
|
2005-04-16 22:20:36 +00:00
|
|
|
regs->cp0_epc = depc;
|
2013-03-25 17:15:55 +00:00
|
|
|
compute_return_epc(regs);
|
2005-04-16 22:20:36 +00:00
|
|
|
depc = regs->cp0_epc;
|
|
|
|
regs->cp0_epc = old_epc;
|
2013-03-25 17:15:55 +00:00
|
|
|
regs->regs[31] = old_ra;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else
|
|
|
|
depc += 4;
|
|
|
|
write_c0_depc(depc);
|
|
|
|
|
|
|
|
#if 0
|
2006-06-30 11:32:37 +00:00
|
|
|
printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
write_c0_debug(debug | 0x100);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NMI exception handler.
|
2011-11-16 01:25:44 +00:00
|
|
|
* No lock; only written during early bootup by CPU 0.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2011-11-16 01:25:44 +00:00
|
|
|
static RAW_NOTIFIER_HEAD(nmi_chain);
|
|
|
|
|
|
|
|
int register_nmi_notifier(struct notifier_block *nb)
|
|
|
|
{
|
|
|
|
return raw_notifier_chain_register(&nmi_chain, nb);
|
|
|
|
}
|
|
|
|
|
2012-01-13 01:17:21 +00:00
|
|
|
void __noreturn nmi_exception_handler(struct pt_regs *regs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-10-08 11:39:31 +00:00
|
|
|
char str[100];
|
|
|
|
|
2015-10-19 18:49:52 +00:00
|
|
|
nmi_enter();
|
2011-11-16 01:25:44 +00:00
|
|
|
raw_notifier_call_chain(&nmi_chain, 0, regs);
|
2006-04-05 08:45:45 +00:00
|
|
|
bust_spinlocks(1);
|
2013-10-08 11:39:31 +00:00
|
|
|
snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
|
|
|
|
smp_processor_id(), regs->cp0_epc);
|
|
|
|
regs->cp0_epc = read_c0_errorepc();
|
|
|
|
die(str, regs);
|
2015-10-19 18:49:52 +00:00
|
|
|
nmi_exit();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-07-14 15:57:16 +00:00
|
|
|
#define VECTORSPACING 0x100 /* for EI/VI mode */
|
|
|
|
|
|
|
|
unsigned long ebase;
|
2016-06-09 13:19:14 +00:00
|
|
|
EXPORT_SYMBOL_GPL(ebase);
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long exception_handlers[32];
|
2005-07-14 15:57:16 +00:00
|
|
|
unsigned long vi_handlers[64];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-01-28 14:21:42 +00:00
|
|
|
void __init *set_except_vector(int n, void *addr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long handler = (unsigned long) addr;
|
2013-05-09 15:57:30 +00:00
|
|
|
unsigned long old_handler;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-03-25 17:15:55 +00:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
/*
|
|
|
|
* Only the TLB handlers are cache aligned with an even
|
|
|
|
* address. All other handlers are on an odd address and
|
|
|
|
* require no modification. Otherwise, MIPS32 mode will
|
|
|
|
* be entered when handling any TLB exceptions. That
|
|
|
|
* would be bad...since we must stay in microMIPS mode.
|
|
|
|
*/
|
|
|
|
if (!(handler & 0x1))
|
|
|
|
handler |= 1;
|
|
|
|
#endif
|
2013-05-09 15:57:30 +00:00
|
|
|
old_handler = xchg(&exception_handlers[n], handler);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (n == 0 && cpu_has_divec) {
|
2013-03-25 17:15:55 +00:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
unsigned long jump_mask = ~((1 << 27) - 1);
|
|
|
|
#else
|
2010-01-28 14:22:37 +00:00
|
|
|
unsigned long jump_mask = ~((1 << 28) - 1);
|
2013-03-25 17:15:55 +00:00
|
|
|
#endif
|
2010-01-28 14:22:37 +00:00
|
|
|
u32 *buf = (u32 *)(ebase + 0x200);
|
|
|
|
unsigned int k0 = 26;
|
|
|
|
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
|
|
|
|
uasm_i_j(&buf, handler & ~jump_mask);
|
|
|
|
uasm_i_nop(&buf);
|
|
|
|
} else {
|
|
|
|
UASM_i_LA(&buf, k0, handler);
|
|
|
|
uasm_i_jr(&buf, k0);
|
|
|
|
uasm_i_nop(&buf);
|
|
|
|
}
|
|
|
|
local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
|
2005-07-14 15:57:16 +00:00
|
|
|
}
|
|
|
|
return (void *)old_handler;
|
|
|
|
}
|
|
|
|
|
MIPS: Make tlb exception handler definitions and declarations match.
The code was written as it is because it's more expressive, a bit easier.
But it's always been dirty, if not a bug. But we can't cheat with LTO
compilers, so this results in:
[...]
LDFINAL vmlinux.o
In file included from arch/mips/kernel/topology.c:604:0,
from arch/mips/kernel/time.c:212,
from arch/mips/kernel/syscall.c:300,
from arch/mips/kernel/signal.c:853,
from arch/mips/kernel/setup.c:1030,
from arch/mips/kernel/reset.c:354,
from arch/mips/kernel/ptrace.c:562,
from arch/mips/kernel/process.c:770,
from arch/mips/kernel/irq.c:350,
from arch/mips/kernel/branch.c:321,
from arch/mips/kernel/cpu-probe.c:1370,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:345,
from arch/mips/sgi-ip22/ip22-gio.c:660,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/sgialib.h:219,
from arch/mips/sgi-ip22/ip22-reset.c:224,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/paccess.h:116,
from arch/mips/sgi-ip22/ip22-nvram.c:334,
from include/linux/kernel_stat.h:79,
from arch/mips/sgi-ip22/ip22-int.c:592,
from arch/mips/sgi-ip22/ip22-hpc.c:470,
from arch/mips/sgi-ip22/ip22-mc.c:135,
from init/init_task.c:54,
from init/calibrate.c:744,
from init/noinitramfs.c:62,
from init/do_mounts.c:573,
from init/version.c:1009,
from init/main.c:777,
from :729:
arch/mips/kernel/traps.c:63:49: error: variable ‘handle_tlbl’ redeclared as function
In file included from arch/mips/mm/page.c:310:0,
from arch/mips/mm/mmap.c:208,
from arch/mips/mm/init.c:641,
from arch/mips/mm/gup.c:811,
from arch/mips/mm/fault.c:659,
from include/linux/module.h:682,
from arch/mips/mm/dma-default.c:161,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:397,
from arch/mips/kernel/i8253.c:538,
from arch/mips/kernel/proc.c:145,
from arch/mips/kernel/irq_cpu.c:129,
from arch/mips/kernel/i8259.c:229,
from include/uapi/linux/elf.h:251,
from arch/mips/kernel/mips_ksyms.c:129,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/time.h:50,
from arch/mips/kernel/cevt-r4k.c:90,
from arch/mips/kernel/vdso.c:136,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:351,
from arch/mips/kernel/unaligned.c:809,
from arch/mips/kernel/traps.c:1720,
from arch/mips/kernel/topology.c:684,
from arch/mips/kernel/time.c:212,
from arch/mips/kernel/syscall.c:300,
from arch/mips/kernel/signal.c:853,
from arch/mips/kernel/setup.c:1030,
from arch/mips/kernel/reset.c:354,
from arch/mips/kernel/ptrace.c:562,
from arch/mips/kernel/process.c:770,
from arch/mips/kernel/irq.c:350,
from arch/mips/kernel/branch.c:321,
from arch/mips/kernel/cpu-probe.c:1370,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:345,
from arch/mips/sgi-ip22/ip22-gio.c:660,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/sgialib.h:219,
from arch/mips/sgi-ip22/ip22-reset.c:224,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/paccess.h:116,
from arch/mips/sgi-ip22/ip22-nvram.c:334,
from include/linux/kernel_stat.h:79,
from arch/mips/sgi-ip22/ip22-int.c:592,
from arch/mips/sgi-ip22/ip22-hpc.c:470,
from arch/mips/sgi-ip22/ip22-mc.c:135,
from init/init_task.c:54,
from init/calibrate.c:744,
from init/noinitramfs.c:62,
from init/do_mounts.c:573,
from init/version.c:1009,
from init/main.c:777,
from :729:
arch/mips/mm/tlbex.c:1448:5: note: previously declared here
In file included from arch/mips/kernel/topology.c:604:0,
from arch/mips/kernel/time.c:212,
from arch/mips/kernel/syscall.c:300,
from arch/mips/kernel/signal.c:853,
from arch/mips/kernel/setup.c:1030,
from arch/mips/kernel/reset.c:354,
from arch/mips/kernel/ptrace.c:562,
from arch/mips/kernel/process.c:770,
from arch/mips/kernel/irq.c:350,
from arch/mips/kernel/branch.c:321,
from arch/mips/kernel/cpu-probe.c:1370,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:345,
from arch/mips/sgi-ip22/ip22-gio.c:660,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/sgialib.h:219,
from arch/mips/sgi-ip22/ip22-reset.c:224,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/paccess.h:116,
from arch/mips/sgi-ip22/ip22-nvram.c:334,
from include/linux/kernel_stat.h:79,
from arch/mips/sgi-ip22/ip22-int.c:592,
from arch/mips/sgi-ip22/ip22-hpc.c:470,
from arch/mips/sgi-ip22/ip22-mc.c:135,
from init/init_task.c:54,
from init/calibrate.c:744,
from init/noinitramfs.c:62,
from init/do_mounts.c:573,
from init/version.c:1009,
from init/main.c:777,
from :729:
arch/mips/kernel/traps.c:62:49: error: variable ‘handle_tlbm’ redeclared as function
In file included from arch/mips/mm/page.c:310:0,
from arch/mips/mm/mmap.c:208,
from arch/mips/mm/init.c:641,
from arch/mips/mm/gup.c:811,
from arch/mips/mm/fault.c:659,
from include/linux/module.h:682,
from arch/mips/mm/dma-default.c:161,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:397,
from arch/mips/kernel/i8253.c:538,
from arch/mips/kernel/proc.c:145,
from arch/mips/kernel/irq_cpu.c:129,
from arch/mips/kernel/i8259.c:229,
from include/uapi/linux/elf.h:251,
from arch/mips/kernel/mips_ksyms.c:129,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/time.h:50,
from arch/mips/kernel/cevt-r4k.c:90,
from arch/mips/kernel/vdso.c:136,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:351,
from arch/mips/kernel/unaligned.c:809,
from arch/mips/kernel/traps.c:1720,
from arch/mips/kernel/topology.c:684,
from arch/mips/kernel/time.c:212,
from arch/mips/kernel/syscall.c:300,
from arch/mips/kernel/signal.c:853,
from arch/mips/kernel/setup.c:1030,
from arch/mips/kernel/reset.c:354,
from arch/mips/kernel/ptrace.c:562,
from arch/mips/kernel/process.c:770,
from arch/mips/kernel/irq.c:350,
from arch/mips/kernel/branch.c:321,
from arch/mips/kernel/cpu-probe.c:1370,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:345,
from arch/mips/sgi-ip22/ip22-gio.c:660,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/sgialib.h:219,
from arch/mips/sgi-ip22/ip22-reset.c:224,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/paccess.h:116,
from arch/mips/sgi-ip22/ip22-nvram.c:334,
from include/linux/kernel_stat.h:79,
from arch/mips/sgi-ip22/ip22-int.c:592,
from arch/mips/sgi-ip22/ip22-hpc.c:470,
from arch/mips/sgi-ip22/ip22-mc.c:135,
from init/init_task.c:54,
from init/calibrate.c:744,
from init/noinitramfs.c:62,
from init/do_mounts.c:573,
from init/version.c:1009,
from init/main.c:777,
from :729:
arch/mips/mm/tlbex.c:1450:5: note: previously declared here
In file included from arch/mips/kernel/topology.c:604:0,
from arch/mips/kernel/time.c:212,
from arch/mips/kernel/syscall.c:300,
from arch/mips/kernel/signal.c:853,
from arch/mips/kernel/setup.c:1030,
from arch/mips/kernel/reset.c:354,
from arch/mips/kernel/ptrace.c:562,
from arch/mips/kernel/process.c:770,
from arch/mips/kernel/irq.c:350,
from arch/mips/kernel/branch.c:321,
from arch/mips/kernel/cpu-probe.c:1370,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:345,
from arch/mips/sgi-ip22/ip22-gio.c:660,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/sgialib.h:219,
from arch/mips/sgi-ip22/ip22-reset.c:224,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/paccess.h:116,
from arch/mips/sgi-ip22/ip22-nvram.c:334,
from include/linux/kernel_stat.h:79,
from arch/mips/sgi-ip22/ip22-int.c:592,
from arch/mips/sgi-ip22/ip22-hpc.c:470,
from arch/mips/sgi-ip22/ip22-mc.c:135,
from init/init_task.c:54,
from init/calibrate.c:744,
from init/noinitramfs.c:62,
from init/do_mounts.c:573,
from init/version.c:1009,
from init/main.c:777,
from :729:
arch/mips/kernel/traps.c:64:49: error: variable ‘handle_tlbs’ redeclared as function
In file included from arch/mips/mm/page.c:310:0,
from arch/mips/mm/mmap.c:208,
from arch/mips/mm/init.c:641,
from arch/mips/mm/gup.c:811,
from arch/mips/mm/fault.c:659,
from include/linux/module.h:682,
from arch/mips/mm/dma-default.c:161,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:397,
from arch/mips/kernel/i8253.c:538,
from arch/mips/kernel/proc.c:145,
from arch/mips/kernel/irq_cpu.c:129,
from arch/mips/kernel/i8259.c:229,
from include/uapi/linux/elf.h:251,
from arch/mips/kernel/mips_ksyms.c:129,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/time.h:50,
from arch/mips/kernel/cevt-r4k.c:90,
from arch/mips/kernel/vdso.c:136,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:351,
from arch/mips/kernel/unaligned.c:809,
from arch/mips/kernel/traps.c:1720,
from arch/mips/kernel/topology.c:684,
from arch/mips/kernel/time.c:212,
from arch/mips/kernel/syscall.c:300,
from arch/mips/kernel/signal.c:853,
from arch/mips/kernel/setup.c:1030,
from arch/mips/kernel/reset.c:354,
from arch/mips/kernel/ptrace.c:562,
from arch/mips/kernel/process.c:770,
from arch/mips/kernel/irq.c:350,
from arch/mips/kernel/branch.c:321,
from arch/mips/kernel/cpu-probe.c:1370,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/thread_info.h:345,
from arch/mips/sgi-ip22/ip22-gio.c:660,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/sgialib.h:219,
from arch/mips/sgi-ip22/ip22-reset.c:224,
from /fluff/home/ralf/src/linux/lto/linux-misc/arch/mips/include/asm/paccess.h:116,
from arch/mips/sgi-ip22/ip22-nvram.c:334,
from include/linux/kernel_stat.h:79,
from arch/mips/sgi-ip22/ip22-int.c:592,
from arch/mips/sgi-ip22/ip22-hpc.c:470,
from arch/mips/sgi-ip22/ip22-mc.c:135,
from init/init_task.c:54,
from init/calibrate.c:744,
from init/noinitramfs.c:62,
from init/do_mounts.c:573,
from init/version.c:1009,
from init/main.c:777,
from :729:
arch/mips/mm/tlbex.c:1449:5: note: previously declared here
lto1: fatal error: errors during merging of translation units
compilation terminated.
lto-wrapper: /usr/bin/mips-linux-gcc returned 1 exit status
/usr/lib64/gcc/mips-linux/4.7.1/../../../../mips-linux/bin/ld: lto-wrapper failed
collect2: error: ld returned 1 exit status
make: *** [vmlinux] Error 1
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-02-08 00:21:34 +00:00
|
|
|
static void do_default_vi(void)
|
2007-05-21 14:45:38 +00:00
|
|
|
{
|
|
|
|
show_regs(get_irq_regs());
|
|
|
|
panic("Caught unexpected vectored interrupt.");
|
|
|
|
}
|
|
|
|
|
2007-05-06 17:31:18 +00:00
|
|
|
static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
2005-07-14 15:57:16 +00:00
|
|
|
{
|
|
|
|
unsigned long handler;
|
|
|
|
unsigned long old_handler = vi_handlers[n];
|
2007-11-08 18:02:29 +00:00
|
|
|
int srssets = current_cpu_data.srsets;
|
2013-03-25 17:15:55 +00:00
|
|
|
u16 *h;
|
2005-07-14 15:57:16 +00:00
|
|
|
unsigned char *b;
|
|
|
|
|
2009-03-30 12:49:44 +00:00
|
|
|
BUG_ON(!cpu_has_veic && !cpu_has_vint);
|
2005-07-14 15:57:16 +00:00
|
|
|
|
|
|
|
if (addr == NULL) {
|
|
|
|
handler = (unsigned long) do_default_vi;
|
|
|
|
srs = 0;
|
2006-04-05 08:45:45 +00:00
|
|
|
} else
|
2005-07-14 15:57:16 +00:00
|
|
|
handler = (unsigned long) addr;
|
2013-03-25 17:15:55 +00:00
|
|
|
vi_handlers[n] = handler;
|
2005-07-14 15:57:16 +00:00
|
|
|
|
|
|
|
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
|
|
|
|
|
2007-11-08 18:02:29 +00:00
|
|
|
if (srs >= srssets)
|
2005-07-14 15:57:16 +00:00
|
|
|
panic("Shadow register set %d not supported", srs);
|
|
|
|
|
|
|
|
if (cpu_has_veic) {
|
|
|
|
if (board_bind_eic_interrupt)
|
2007-10-11 22:46:15 +00:00
|
|
|
board_bind_eic_interrupt(n, srs);
|
2006-04-05 08:45:45 +00:00
|
|
|
} else if (cpu_has_vint) {
|
2005-07-14 15:57:16 +00:00
|
|
|
/* SRSMap is only defined if shadow sets are implemented */
|
2007-11-08 18:02:29 +00:00
|
|
|
if (srssets > 1)
|
2007-10-11 22:46:15 +00:00
|
|
|
change_c0_srsmap(0xf << n*4, srs << n*4);
|
2005-07-14 15:57:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (srs == 0) {
|
|
|
|
/*
|
|
|
|
* If no shadow set is selected then use the default handler
|
2013-03-25 17:15:55 +00:00
|
|
|
* that does normal register saving and standard interrupt exit
|
2005-07-14 15:57:16 +00:00
|
|
|
*/
|
|
|
|
extern char except_vec_vi, except_vec_vi_lui;
|
|
|
|
extern char except_vec_vi_ori, except_vec_vi_end;
|
2007-11-11 17:05:18 +00:00
|
|
|
extern char rollback_except_vec_vi;
|
2013-05-21 15:30:36 +00:00
|
|
|
char *vec_start = using_rollback_handler() ?
|
2007-11-11 17:05:18 +00:00
|
|
|
&rollback_except_vec_vi : &except_vec_vi;
|
2013-03-25 17:15:55 +00:00
|
|
|
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
|
|
|
|
const int lui_offset = &except_vec_vi_lui - vec_start + 2;
|
|
|
|
const int ori_offset = &except_vec_vi_ori - vec_start + 2;
|
|
|
|
#else
|
2007-11-11 17:05:18 +00:00
|
|
|
const int lui_offset = &except_vec_vi_lui - vec_start;
|
|
|
|
const int ori_offset = &except_vec_vi_ori - vec_start;
|
2013-03-25 17:15:55 +00:00
|
|
|
#endif
|
|
|
|
const int handler_len = &except_vec_vi_end - vec_start;
|
2005-07-14 15:57:16 +00:00
|
|
|
|
|
|
|
if (handler_len > VECTORSPACING) {
|
|
|
|
/*
|
|
|
|
* Sigh... panicing won't help as the console
|
|
|
|
* is probably not configured :(
|
|
|
|
*/
|
2007-10-11 22:46:15 +00:00
|
|
|
panic("VECTORSPACING too small");
|
2005-07-14 15:57:16 +00:00
|
|
|
}
|
|
|
|
|
2013-03-25 17:15:55 +00:00
|
|
|
set_handler(((unsigned long)b - ebase), vec_start,
|
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
(handler_len - 1));
|
|
|
|
#else
|
|
|
|
handler_len);
|
|
|
|
#endif
|
|
|
|
h = (u16 *)(b + lui_offset);
|
|
|
|
*h = (handler >> 16) & 0xffff;
|
|
|
|
h = (u16 *)(b + ori_offset);
|
|
|
|
*h = (handler & 0xffff);
|
2008-08-04 18:53:57 +00:00
|
|
|
local_flush_icache_range((unsigned long)b,
|
|
|
|
(unsigned long)(b+handler_len));
|
2005-07-14 15:57:16 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/*
|
2013-03-25 17:15:55 +00:00
|
|
|
* In other cases jump directly to the interrupt handler. It
|
|
|
|
* is the handler's responsibility to save registers if required
|
|
|
|
* (eg hi/lo) and return from the exception using "eret".
|
2005-07-14 15:57:16 +00:00
|
|
|
*/
|
2013-03-25 17:15:55 +00:00
|
|
|
u32 insn;
|
|
|
|
|
|
|
|
h = (u16 *)b;
|
|
|
|
/* j handler */
|
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
|
|
|
|
#else
|
|
|
|
insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
|
|
|
|
#endif
|
|
|
|
h[0] = (insn >> 16) & 0xffff;
|
|
|
|
h[1] = insn & 0xffff;
|
|
|
|
h[2] = 0;
|
|
|
|
h[3] = 0;
|
2008-08-04 18:53:57 +00:00
|
|
|
local_flush_icache_range((unsigned long)b,
|
|
|
|
(unsigned long)(b+8));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-07-14 15:57:16 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return (void *)old_handler;
|
|
|
|
}
|
|
|
|
|
2007-05-06 17:31:18 +00:00
|
|
|
void *set_vi_handler(int n, vi_handler_t addr)
|
2005-07-14 15:57:16 +00:00
|
|
|
{
|
2006-03-29 13:12:58 +00:00
|
|
|
return set_vi_srs_handler(n, addr, 0);
|
2005-07-14 15:57:16 +00:00
|
|
|
}
|
2006-06-05 16:24:46 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
extern void tlb_init(void);
|
|
|
|
|
2007-10-18 16:48:11 +00:00
|
|
|
/*
|
|
|
|
* Timer interrupt
|
|
|
|
*/
|
|
|
|
int cp0_compare_irq;
|
2012-07-19 07:13:52 +00:00
|
|
|
EXPORT_SYMBOL_GPL(cp0_compare_irq);
|
2009-12-22 01:49:22 +00:00
|
|
|
int cp0_compare_irq_shift;
|
2007-10-18 16:48:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Performance counter IRQ or -1 if shared with timer
|
|
|
|
*/
|
|
|
|
int cp0_perfcount_irq;
|
|
|
|
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
|
|
|
|
|
2015-01-29 11:14:07 +00:00
|
|
|
/*
|
|
|
|
* Fast debug channel IRQ or -1 if not present
|
|
|
|
*/
|
|
|
|
int cp0_fdc_irq;
|
|
|
|
EXPORT_SYMBOL_GPL(cp0_fdc_irq);
|
|
|
|
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
static int noulri;
|
2007-10-03 09:43:56 +00:00
|
|
|
|
|
|
|
static int __init ulri_disable(char *s)
|
|
|
|
{
|
|
|
|
pr_info("Disabling ulri\n");
|
|
|
|
noulri = 1;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("noulri", ulri_disable);
|
|
|
|
|
2014-03-04 10:20:43 +00:00
|
|
|
/* configure STATUS register */
|
|
|
|
static void configure_status(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Disable coprocessors and select 32-bit or 64-bit addressing
|
|
|
|
* and the 16/32 or 32/32 FPR register model. Reset the BEV
|
|
|
|
* flag that some firmware may have left set and the TS bit (for
|
|
|
|
* IP27). Set XX for ISA IV code to work.
|
|
|
|
*/
|
2014-03-04 10:20:43 +00:00
|
|
|
unsigned int status_set = ST0_CU0;
|
2005-09-03 22:56:16 +00:00
|
|
|
#ifdef CONFIG_64BIT
|
2005-04-16 22:20:36 +00:00
|
|
|
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
|
|
|
|
#endif
|
2013-04-01 18:14:28 +00:00
|
|
|
if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
|
2005-04-16 22:20:36 +00:00
|
|
|
status_set |= ST0_XX;
|
2007-12-13 22:42:19 +00:00
|
|
|
if (cpu_has_dsp)
|
|
|
|
status_set |= ST0_MX;
|
|
|
|
|
2006-02-07 01:20:43 +00:00
|
|
|
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
|
2005-04-16 22:20:36 +00:00
|
|
|
status_set);
|
2014-03-04 10:20:43 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 18:29:53 +00:00
|
|
|
unsigned int hwrena;
|
|
|
|
EXPORT_SYMBOL_GPL(hwrena);
|
|
|
|
|
2014-03-04 10:20:43 +00:00
|
|
|
/* configure HWRENA register */
|
|
|
|
static void configure_hwrena(void)
|
|
|
|
{
|
2016-06-15 18:29:53 +00:00
|
|
|
hwrena = cpu_hwrena_impl_bits;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-11-14 11:25:30 +00:00
|
|
|
if (cpu_has_mips_r2_r6)
|
2016-06-15 18:29:52 +00:00
|
|
|
hwrena |= MIPS_HWRENA_CPUNUM |
|
|
|
|
MIPS_HWRENA_SYNCISTEP |
|
|
|
|
MIPS_HWRENA_CC |
|
|
|
|
MIPS_HWRENA_CCRES;
|
2007-07-10 16:33:02 +00:00
|
|
|
|
2010-10-16 21:22:38 +00:00
|
|
|
if (!noulri && cpu_has_userlocal)
|
2016-06-15 18:29:52 +00:00
|
|
|
hwrena |= MIPS_HWRENA_ULR;
|
2007-07-10 16:33:02 +00:00
|
|
|
|
2010-10-16 21:22:38 +00:00
|
|
|
if (hwrena)
|
|
|
|
write_c0_hwrena(hwrena);
|
2014-03-04 10:20:43 +00:00
|
|
|
}
|
2005-07-14 15:57:16 +00:00
|
|
|
|
2014-03-04 10:20:43 +00:00
|
|
|
static void configure_exception_vector(void)
|
|
|
|
{
|
2019-04-30 22:53:31 +00:00
|
|
|
if (cpu_has_mips_r2_r6) {
|
2009-03-20 22:33:55 +00:00
|
|
|
unsigned long sr = set_c0_status(ST0_BEV);
|
MIPS: traps: Ensure full EBase is written
On CPUs which support the EBase WG (write gate) flag, the most
significant bits of the exception base can be changed. Firmware running
on a VP(E) using MIPS rproc may change EBase to point into the user
segment where the firmware is located such that it can service
interrupts. When control is transferred back to the kernel the EBase
must be switched back into the kernel segment, such that the kernel's
exception vectors are used.
Similarly when vectored interrupts (vint) or vectored external interrupt
controllers (veic) are enabled an exception vector is allocated from
bootmem, and written to the EBase register. Due to the WG flag being
clear, only bits 29:12 will be written. Asside from the rproc case above
this is normally fine (as it will usually be a low allocation within the
KSeg0 range, however when Enhanced Virtual Addressing (EVA) is enabled
the allocation may be outside of the traditional KSeg0/KSeg1 address
range, resulting in the wrong EBase being written.
Correct both cases (configure_exception_vector() for the boot CPU, and
per_cpu_trap_init() for secondary CPUs) to write EBase with the WG flag
first if supported.
On the Malta EVA configuration, KSeg0 is mapped to physical address 0,
and memory is allocated from the KUSeg segment which is mapped to
physical address 0x80000000, which physically aliases the RAM at 0. This
only worked due to the exception base address aliasing the same
underlying RAM that was written to & cache flushed, and due to
flush_icache_range() going beyond the call of duty and flushing from the
L2 cache too (due to the differing physical addresses).
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/14150/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-09-01 16:30:09 +00:00
|
|
|
/* If available, use WG to set top bits of EBASE */
|
|
|
|
if (cpu_has_ebase_wg) {
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
write_c0_ebase_64(ebase | MIPS_EBASE_WG);
|
|
|
|
#else
|
|
|
|
write_c0_ebase(ebase | MIPS_EBASE_WG);
|
|
|
|
#endif
|
|
|
|
}
|
2007-10-11 22:46:15 +00:00
|
|
|
write_c0_ebase(ebase);
|
2009-03-20 22:33:55 +00:00
|
|
|
write_c0_status(sr);
|
2019-04-30 22:53:31 +00:00
|
|
|
}
|
|
|
|
if (cpu_has_veic || cpu_has_vint) {
|
2005-07-14 15:57:16 +00:00
|
|
|
/* Setting vector spacing enables EI/VI mode */
|
2007-10-11 22:46:15 +00:00
|
|
|
change_c0_intctl(0x3e0, VECTORSPACING);
|
2005-07-14 15:57:16 +00:00
|
|
|
}
|
2005-08-17 13:44:26 +00:00
|
|
|
if (cpu_has_divec) {
|
|
|
|
if (cpu_has_mipsmt) {
|
|
|
|
unsigned int vpflags = dvpe();
|
|
|
|
set_c0_cause(CAUSEF_IV);
|
|
|
|
evpe(vpflags);
|
|
|
|
} else
|
|
|
|
set_c0_cause(CAUSEF_IV);
|
|
|
|
}
|
2014-03-04 10:20:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void per_cpu_trap_init(bool is_boot_cpu)
|
|
|
|
{
|
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
|
|
|
|
configure_status();
|
|
|
|
configure_hwrena();
|
|
|
|
|
|
|
|
configure_exception_vector();
|
2007-06-20 21:27:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
|
|
|
|
*
|
|
|
|
* o read IntCtl.IPTI to determine the timer interrupt
|
|
|
|
* o read IntCtl.IPPCI to determine the performance counter interrupt
|
2015-01-29 11:14:07 +00:00
|
|
|
* o read IntCtl.IPFDC to determine the fast debug channel interrupt
|
2007-06-20 21:27:10 +00:00
|
|
|
*/
|
2014-11-14 11:25:30 +00:00
|
|
|
if (cpu_has_mips_r2_r6) {
|
2009-12-22 01:49:22 +00:00
|
|
|
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
|
|
|
|
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
|
|
|
|
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
|
2015-01-29 11:14:07 +00:00
|
|
|
cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
|
|
|
|
if (!cp0_fdc_irq)
|
|
|
|
cp0_fdc_irq = -1;
|
|
|
|
|
2007-06-21 11:59:57 +00:00
|
|
|
} else {
|
|
|
|
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
|
2012-07-06 21:56:00 +00:00
|
|
|
cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
|
2007-06-21 11:59:57 +00:00
|
|
|
cp0_perfcount_irq = -1;
|
2015-01-29 11:14:07 +00:00
|
|
|
cp0_fdc_irq = -1;
|
2007-06-20 21:27:10 +00:00
|
|
|
}
|
|
|
|
|
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to
Address Space IDs (ASIDs). The major difference between the two is that
MMIDs are global - ie. an MMID uniquely identifies an address space
across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs,
wherein each address space is allocated a separate ASID for each CPU
upon which it is used. This global namespace allows a new GINVT
instruction be used to globally invalidate TLB entries associated with a
particular MMID across all coherent CPUs in the system, removing the
need for IPIs to invalidate entries with separate ASIDs on each CPU.
The allocation scheme used here is largely borrowed from arm64 (see
arch/arm64/mm/context.c). In essence we maintain a bitmap to track
available MMIDs, and MMIDs in active use at the time of a rollover to a
new MMID version are preserved in the new version. The allocation scheme
requires efficient 64 bit atomics in order to perform reasonably, so
this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it
will only be included in MIPS64 kernels).
The first, and currently only, available CPU with support for MMIDs is
the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap
our MMIDs to 16 bits wide in order to prevent the bitmap growing to
absurd sizes if any future CPU does implement 32 bit MMIDs as the
architecture manuals suggest is recommended.
When MMIDs are in use we also make use of GINVT instruction which is
available due to the global nature of MMIDs. By executing a sequence of
GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to
each remote CPU in many cases. One complication is that GINVT will
invalidate wired entries (in all cases apart from type 0, which targets
the entire TLB). In order to avoid GINVT invalidating any wired TLB
entries we set up, we make sure to create those entries using a reserved
MMID (0) that we never associate with any address space.
Also of note is that KVM will require further work in order to support
MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in
configuring the MMU. That work is not part of this patch, so for now
when MMIDs are in use KVM is disabled.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
2019-02-02 01:43:28 +00:00
|
|
|
if (cpu_has_mmid)
|
|
|
|
cpu_data[cpu].asid_cache = 0;
|
|
|
|
else if (!cpu_data[cpu].asid_cache)
|
2016-05-06 13:36:23 +00:00
|
|
|
cpu_data[cpu].asid_cache = asid_first_version(cpu);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-02-27 22:30:07 +00:00
|
|
|
mmgrab(&init_mm);
|
2005-04-16 22:20:36 +00:00
|
|
|
current->active_mm = &init_mm;
|
|
|
|
BUG_ON(current->mm);
|
|
|
|
enter_lazy_tlb(&init_mm, current);
|
|
|
|
|
2015-06-24 08:29:20 +00:00
|
|
|
/* Boot CPU's cache setup in setup_arch(). */
|
|
|
|
if (!is_boot_cpu)
|
|
|
|
cpu_cache_init();
|
|
|
|
tlb_init();
|
2010-12-21 22:19:11 +00:00
|
|
|
TLBMISS_HANDLER_SETUP();
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-07-14 15:57:16 +00:00
|
|
|
/* Install CPU exception handler */
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
void set_handler(unsigned long offset, void *addr, unsigned long size)
|
2005-07-14 15:57:16 +00:00
|
|
|
{
|
2013-03-25 17:15:55 +00:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
|
|
|
|
#else
|
2005-07-14 15:57:16 +00:00
|
|
|
memcpy((void *)(ebase + offset), addr, size);
|
2013-03-25 17:15:55 +00:00
|
|
|
#endif
|
2008-08-04 18:53:57 +00:00
|
|
|
local_flush_icache_range(ebase + offset, ebase + offset + size);
|
2005-07-14 15:57:16 +00:00
|
|
|
}
|
|
|
|
|
2017-05-08 22:59:05 +00:00
|
|
|
static const char panic_null_cerr[] =
|
|
|
|
"Trying to set NULL cache error exception handler\n";
|
2007-10-11 22:46:05 +00:00
|
|
|
|
2009-01-28 18:48:23 +00:00
|
|
|
/*
|
|
|
|
* Install uncached CPU exception handler.
|
|
|
|
* This is suitable only for the cache error exception which is the only
|
|
|
|
* exception handler that is being run uncached.
|
|
|
|
*/
|
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings. In any case, they are temporary and harmless.
Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files. MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.
[1] https://lkml.org/lkml/2013/5/20/589
[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-06-18 13:38:59 +00:00
|
|
|
void set_uncached_handler(unsigned long offset, void *addr,
|
2008-03-08 09:56:28 +00:00
|
|
|
unsigned long size)
|
2005-07-14 15:57:16 +00:00
|
|
|
{
|
2010-04-27 20:53:30 +00:00
|
|
|
unsigned long uncached_ebase = CKSEG1ADDR(ebase);
|
2005-07-14 15:57:16 +00:00
|
|
|
|
2007-10-11 22:46:05 +00:00
|
|
|
if (!addr)
|
|
|
|
panic(panic_null_cerr);
|
|
|
|
|
2005-07-14 15:57:16 +00:00
|
|
|
memcpy((void *)(uncached_ebase + offset), addr, size);
|
|
|
|
}
|
|
|
|
|
2006-09-11 08:50:29 +00:00
|
|
|
static int __initdata rdhwr_noopt;
|
|
|
|
static int __init set_rdhwr_noopt(char *str)
|
|
|
|
{
|
|
|
|
rdhwr_noopt = 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("rdhwr_noopt", set_rdhwr_noopt);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
void __init trap_init(void)
|
|
|
|
{
|
2013-03-25 17:15:55 +00:00
|
|
|
extern char except_vec3_generic;
|
2005-04-16 22:20:36 +00:00
|
|
|
extern char except_vec4;
|
2013-03-25 17:15:55 +00:00
|
|
|
extern char except_vec3_r4000;
|
2019-04-30 22:53:30 +00:00
|
|
|
unsigned long i, vec_size;
|
|
|
|
phys_addr_t ebase_pa;
|
2007-11-11 17:05:18 +00:00
|
|
|
|
|
|
|
check_wait();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-04-30 22:53:30 +00:00
|
|
|
if (!cpu_has_mips_r2_r6) {
|
|
|
|
ebase = CAC_BASE;
|
|
|
|
ebase_pa = virt_to_phys((void *)ebase);
|
|
|
|
vec_size = 0x400;
|
|
|
|
|
|
|
|
memblock_reserve(ebase_pa, vec_size);
|
|
|
|
} else {
|
|
|
|
if (cpu_has_veic || cpu_has_vint)
|
|
|
|
vec_size = 0x200 + VECTORSPACING*64;
|
|
|
|
else
|
|
|
|
vec_size = PAGE_SIZE;
|
2016-09-01 16:30:08 +00:00
|
|
|
|
2019-04-30 22:53:30 +00:00
|
|
|
ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
|
2019-04-30 22:53:30 +00:00
|
|
|
if (!ebase_pa)
|
2019-03-12 06:30:31 +00:00
|
|
|
panic("%s: Failed to allocate %lu bytes align=0x%x\n",
|
2019-04-30 22:53:30 +00:00
|
|
|
__func__, vec_size, 1 << fls(vec_size));
|
2016-09-01 16:30:08 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to ensure ebase resides in KSeg0 if possible.
|
|
|
|
*
|
|
|
|
* It shouldn't generally be in XKPhys on MIPS64 to avoid
|
|
|
|
* hitting a poorly defined exception base for Cache Errors.
|
|
|
|
* The allocation is likely to be in the low 512MB of physical,
|
|
|
|
* in which case we should be able to convert to KSeg0.
|
|
|
|
*
|
|
|
|
* EVA is special though as it allows segments to be rearranged
|
|
|
|
* and to become uncached during cache error handling.
|
|
|
|
*/
|
|
|
|
if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
|
|
|
|
ebase = CKSEG0ADDR(ebase_pa);
|
2019-04-30 22:53:30 +00:00
|
|
|
else
|
|
|
|
ebase = (unsigned long)phys_to_virt(ebase_pa);
|
2008-10-24 00:56:35 +00:00
|
|
|
}
|
2005-07-14 15:57:16 +00:00
|
|
|
|
2013-06-05 21:25:17 +00:00
|
|
|
if (cpu_has_mmips) {
|
|
|
|
unsigned int config3 = read_c0_config3();
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
|
|
|
|
write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
|
|
|
|
else
|
|
|
|
write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
|
|
|
|
}
|
|
|
|
|
2011-11-16 01:25:45 +00:00
|
|
|
if (board_ebase_setup)
|
|
|
|
board_ebase_setup();
|
2012-05-15 07:04:50 +00:00
|
|
|
per_cpu_trap_init(true);
|
2018-11-10 03:50:14 +00:00
|
|
|
memblock_set_bottom_up(false);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the generic exception handlers to their final destination.
|
2016-02-25 08:44:58 +00:00
|
|
|
* This will be overridden later as suitable for a particular
|
2005-04-16 22:20:36 +00:00
|
|
|
* configuration.
|
|
|
|
*/
|
2005-07-14 15:57:16 +00:00
|
|
|
set_handler(0x180, &except_vec3_generic, 0x80);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup default vectors
|
|
|
|
*/
|
|
|
|
for (i = 0; i <= 31; i++)
|
|
|
|
set_except_vector(i, handle_reserved);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the EJTAG debug exception vector handler code to it's final
|
|
|
|
* destination.
|
|
|
|
*/
|
2005-07-14 15:57:16 +00:00
|
|
|
if (cpu_has_ejtag && board_ejtag_handler_setup)
|
2007-10-11 22:46:15 +00:00
|
|
|
board_ejtag_handler_setup();
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only some CPUs have the watch exceptions.
|
|
|
|
*/
|
|
|
|
if (cpu_has_watch)
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_WATCH, handle_watch);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
2005-07-14 15:57:16 +00:00
|
|
|
* Initialise interrupt handlers
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-07-14 15:57:16 +00:00
|
|
|
if (cpu_has_veic || cpu_has_vint) {
|
|
|
|
int nvec = cpu_has_veic ? 64 : 8;
|
|
|
|
for (i = 0; i < nvec; i++)
|
2006-03-29 13:12:58 +00:00
|
|
|
set_vi_handler(i, NULL);
|
2005-07-14 15:57:16 +00:00
|
|
|
}
|
|
|
|
else if (cpu_has_divec)
|
|
|
|
set_handler(0x200, &except_vec4, 0x8);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some CPUs can enable/disable for cache parity detection, but does
|
|
|
|
* it different ways.
|
|
|
|
*/
|
|
|
|
parity_protection_init();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The Data Bus Errors / Instruction Bus Errors are signaled
|
|
|
|
* by external hardware. Therefore these two exceptions
|
|
|
|
* may have board specific handlers.
|
|
|
|
*/
|
|
|
|
if (board_be_init)
|
|
|
|
board_be_init();
|
|
|
|
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_INT, using_rollback_handler() ?
|
|
|
|
rollback_handle_int : handle_int);
|
|
|
|
set_except_vector(EXCCODE_MOD, handle_tlbm);
|
|
|
|
set_except_vector(EXCCODE_TLBL, handle_tlbl);
|
|
|
|
set_except_vector(EXCCODE_TLBS, handle_tlbs);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_ADEL, handle_adel);
|
|
|
|
set_except_vector(EXCCODE_ADES, handle_ades);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_IBE, handle_ibe);
|
|
|
|
set_except_vector(EXCCODE_DBE, handle_dbe);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_SYS, handle_sys);
|
|
|
|
set_except_vector(EXCCODE_BP, handle_bp);
|
MIPS: Check TLB before handle_ri_rdhwr() for Loongson-3
Loongson-3's micro TLB (ITLB) is not strictly a subset of JTLB. That
means: when a JTLB entry is replaced by hardware, there may be an old
valid entry exists in ITLB. So, a TLB miss exception may occur while
handle_ri_rdhwr() is running because it try to access EPC's content.
However, handle_ri_rdhwr() doesn't clear EXL, which makes a TLB Refill
exception be treated as a TLB Invalid exception and tlbp may fail. In
this case, if FTLB (which is usually set-associative instead of set-
associative) is enabled, a tlbp failure will cause an invalid tlbwi,
which will hang the whole system.
This patch rename handle_ri_rdhwr_vivt to handle_ri_rdhwr_tlbp and use
it for Loongson-3. It try to solve the same problem described as below,
but more straightforwards.
https://patchwork.linux-mips.org/patch/12591/
I think Loongson-2 has the same problem, but it has no FTLB, so we just
keep it as is.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Rui Wang <wangr@lemote.com>
Cc: John Crispin <john@phrozen.org>
Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/15753/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-16 13:00:26 +00:00
|
|
|
|
|
|
|
if (rdhwr_noopt)
|
|
|
|
set_except_vector(EXCCODE_RI, handle_ri);
|
|
|
|
else {
|
|
|
|
if (cpu_has_vtag_icache)
|
|
|
|
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
|
2019-10-20 14:43:13 +00:00
|
|
|
else if (current_cpu_type() == CPU_LOONGSON64)
|
MIPS: Check TLB before handle_ri_rdhwr() for Loongson-3
Loongson-3's micro TLB (ITLB) is not strictly a subset of JTLB. That
means: when a JTLB entry is replaced by hardware, there may be an old
valid entry exists in ITLB. So, a TLB miss exception may occur while
handle_ri_rdhwr() is running because it try to access EPC's content.
However, handle_ri_rdhwr() doesn't clear EXL, which makes a TLB Refill
exception be treated as a TLB Invalid exception and tlbp may fail. In
this case, if FTLB (which is usually set-associative instead of set-
associative) is enabled, a tlbp failure will cause an invalid tlbwi,
which will hang the whole system.
This patch rename handle_ri_rdhwr_vivt to handle_ri_rdhwr_tlbp and use
it for Loongson-3. It try to solve the same problem described as below,
but more straightforwards.
https://patchwork.linux-mips.org/patch/12591/
I think Loongson-2 has the same problem, but it has no FTLB, so we just
keep it as is.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Rui Wang <wangr@lemote.com>
Cc: John Crispin <john@phrozen.org>
Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/15753/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-16 13:00:26 +00:00
|
|
|
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
|
|
|
|
else
|
|
|
|
set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
|
|
|
|
}
|
|
|
|
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_CPU, handle_cpu);
|
|
|
|
set_except_vector(EXCCODE_OV, handle_ov);
|
|
|
|
set_except_vector(EXCCODE_TR, handle_tr);
|
|
|
|
set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-07-14 15:57:16 +00:00
|
|
|
if (board_nmi_handler_setup)
|
|
|
|
board_nmi_handler_setup();
|
|
|
|
|
2005-05-31 11:49:19 +00:00
|
|
|
if (cpu_has_fpu && !cpu_has_nofpuex)
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_FPE, handle_fpe);
|
2005-05-31 11:49:19 +00:00
|
|
|
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
|
2014-07-15 13:09:56 +00:00
|
|
|
|
|
|
|
if (cpu_has_rixiex) {
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
|
|
|
|
set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
|
2014-07-15 13:09:56 +00:00
|
|
|
}
|
|
|
|
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_MSADIS, handle_msa);
|
|
|
|
set_except_vector(EXCCODE_MDMX, handle_mdmx);
|
2005-05-31 11:49:19 +00:00
|
|
|
|
|
|
|
if (cpu_has_mcheck)
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_MCHECK, handle_mcheck);
|
2005-05-31 11:49:19 +00:00
|
|
|
|
2005-08-17 17:44:08 +00:00
|
|
|
if (cpu_has_mipsmt)
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_THREAD, handle_mt);
|
2005-08-17 17:44:08 +00:00
|
|
|
|
2015-12-16 23:49:35 +00:00
|
|
|
set_except_vector(EXCCODE_DSPDIS, handle_dsp);
|
2005-05-31 11:49:19 +00:00
|
|
|
|
2012-05-15 07:04:46 +00:00
|
|
|
if (board_cache_error_setup)
|
|
|
|
board_cache_error_setup();
|
|
|
|
|
2005-05-31 11:49:19 +00:00
|
|
|
if (cpu_has_vce)
|
|
|
|
/* Special exception: R4[04]00 uses also the divec space. */
|
2013-03-25 17:15:55 +00:00
|
|
|
set_handler(0x180, &except_vec3_r4000, 0x100);
|
2005-05-31 11:49:19 +00:00
|
|
|
else if (cpu_has_4kex)
|
2013-03-25 17:15:55 +00:00
|
|
|
set_handler(0x180, &except_vec3_generic, 0x80);
|
2005-05-31 11:49:19 +00:00
|
|
|
else
|
2013-03-25 17:15:55 +00:00
|
|
|
set_handler(0x080, &except_vec3_generic, 0x80);
|
2005-05-31 11:49:19 +00:00
|
|
|
|
2019-04-30 22:53:31 +00:00
|
|
|
local_flush_icache_range(ebase, ebase + vec_size);
|
2008-08-04 17:44:34 +00:00
|
|
|
|
|
|
|
sort_extable(__start___dbe_table, __stop___dbe_table);
|
2009-11-24 01:24:58 +00:00
|
|
|
|
2010-08-05 12:25:59 +00:00
|
|
|
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2014-03-04 10:20:43 +00:00
|
|
|
|
|
|
|
static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
|
|
|
|
void *v)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case CPU_PM_ENTER_FAILED:
|
|
|
|
case CPU_PM_EXIT:
|
|
|
|
configure_status();
|
|
|
|
configure_hwrena();
|
|
|
|
configure_exception_vector();
|
|
|
|
|
|
|
|
/* Restore register with CPU number for TLB handlers */
|
|
|
|
TLBMISS_HANDLER_RESTORE();
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block trap_pm_notifier_block = {
|
|
|
|
.notifier_call = trap_pm_notifier,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init trap_pm_init(void)
|
|
|
|
{
|
|
|
|
return cpu_pm_register_notifier(&trap_pm_notifier_block);
|
|
|
|
}
|
|
|
|
arch_initcall(trap_pm_init);
|