Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300:
  MN10300: gcc 4.6 vs am33 inline assembly
  MN10300: Deprecate gdbstub
  MN10300: Allow KGDB to use the MN10300 serial ports
  MN10300: Emulate single stepping in KGDB on MN10300
  MN10300: Generalise kernel debugger kernel halt, reboot or power off hook
  KGDB: Notify GDB of machine halt, reboot or power off
  MN10300: Use KGDB
  MN10300: Create generic kernel debugger hooks
  MN10300: Create general kernel debugger cache flushing
  MN10300: Introduce a general config option for kernel debugger hooks
  MN10300: The icache invalidate functions should disable the icache first
  MN10300: gdbstub: Restrict single-stepping to non-preemptable non-SMP configs
This commit is contained in:
Linus Torvalds 2011-03-24 10:07:50 -07:00
commit 3dab04e697
39 changed files with 1919 additions and 497 deletions

View File

@ -3,6 +3,8 @@ config MN10300
select HAVE_OPROFILE
select HAVE_GENERIC_HARDIRQS
select GENERIC_HARDIRQS_NO_DEPRECATED
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
config AM33_2
def_bool n
@ -401,9 +403,9 @@ comment "[!] NOTE: A lower number/level indicates a higher priority (0 is highes
comment "____Non-maskable interrupt levels____"
comment "The following must be set to a higher priority than local_irq_disable() and on-chip serial"
config GDBSTUB_IRQ_LEVEL
int "GDBSTUB interrupt priority"
depends on GDBSTUB
config DEBUGGER_IRQ_LEVEL
int "DEBUGGER interrupt priority"
depends on KERNEL_DEBUGGER
range 0 1 if LINUX_CLI_LEVEL = 2
range 0 2 if LINUX_CLI_LEVEL = 3
range 0 3 if LINUX_CLI_LEVEL = 4
@ -437,7 +439,7 @@ config LINUX_CLI_LEVEL
EPSW.IM from 7. Any interrupt is permitted for which the level is
lower than EPSW.IM.
Certain interrupts, such as GDBSTUB and virtual MN10300 on-chip
Certain interrupts, such as DEBUGGER and virtual MN10300 on-chip
serial DMA interrupts are allowed to interrupt normal disabled
sections.

View File

@ -36,7 +36,7 @@ config KPROBES
config GDBSTUB
bool "Remote GDB kernel debugging"
depends on DEBUG_KERNEL
depends on DEBUG_KERNEL && DEPRECATED
select DEBUG_INFO
select FRAME_POINTER
help
@ -46,6 +46,9 @@ config GDBSTUB
RAM to avoid excessive linking time. This is only useful for kernel
hackers. If unsure, say N.
This is deprecated in favour of KGDB and will be removed in a later
version.
config GDBSTUB_IMMEDIATE
bool "Break into GDB stub immediately"
depends on GDBSTUB
@ -54,6 +57,14 @@ config GDBSTUB_IMMEDIATE
possible, leaving the program counter at the beginning of
start_kernel() in init/main.c.
config GDBSTUB_ALLOW_SINGLE_STEP
bool "Allow software single-stepping in GDB stub"
depends on GDBSTUB && !SMP && !PREEMPT
help
Allow GDB stub to perform software single-stepping through the
kernel. This doesn't work very well on SMP or preemptible kernels as
it uses temporary breakpoints to emulate single-stepping.
config GDB_CONSOLE
bool "Console output to GDB"
depends on GDBSTUB
@ -142,3 +153,7 @@ config GDBSTUB_ON_TTYSx
default y
endmenu
config KERNEL_DEBUGGER
def_bool y
depends on GDBSTUB || KGDB

View File

@ -0,0 +1,43 @@
/* Kernel debugger for MN10300
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_DEBUGGER_H
#define _ASM_DEBUGGER_H
#if defined(CONFIG_KERNEL_DEBUGGER)
extern int debugger_intercept(enum exception_code, int, int, struct pt_regs *);
extern int at_debugger_breakpoint(struct pt_regs *);
#ifndef CONFIG_MN10300_DEBUGGER_CACHE_NO_FLUSH
extern void debugger_local_cache_flushinv(void);
extern void debugger_local_cache_flushinv_one(u8 *);
#else
static inline void debugger_local_cache_flushinv(void) {}
static inline void debugger_local_cache_flushinv_one(u8 *addr) {}
#endif
#else /* CONFIG_KERNEL_DEBUGGER */
static inline int debugger_intercept(enum exception_code excep,
int signo, int si_code,
struct pt_regs *regs)
{
return 0;
}
static inline int at_debugger_breakpoint(struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_KERNEL_DEBUGGER */
#endif /* _ASM_DEBUGGER_H */

View File

@ -15,6 +15,19 @@
extern void ____unhandled_size_in_do_div___(void);
/*
* Beginning with gcc 4.6, the MDR register is represented explicitly. We
* must, therefore, at least explicitly clobber the register when we make
* changes to it. The following assembly fragments *could* be rearranged in
* order to leave the moves to/from the MDR register to the compiler, but the
* gains would be minimal at best.
*/
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
# define CLOBBER_MDR_CC "mdr", "cc"
#else
# define CLOBBER_MDR_CC "cc"
#endif
/*
* divide n by base, leaving the result in n and returning the remainder
* - we can do this quite efficiently on the MN10300 by cascading the divides
@ -29,7 +42,7 @@ extern void ____unhandled_size_in_do_div___(void);
"mov mdr,%1 \n" \
: "+r"(n), "=d"(__rem) \
: "r"(base), "1"(__rem) \
: "cc" \
: CLOBBER_MDR_CC \
); \
} else if (sizeof(n) <= 8) { \
union { \
@ -48,7 +61,7 @@ extern void ____unhandled_size_in_do_div___(void);
: "=d"(__rem), "=r"(__quot.w[1]), "=r"(__quot.w[0]) \
: "r"(base), "0"(__rem), "1"(__quot.w[1]), \
"2"(__quot.w[0]) \
: "cc" \
: CLOBBER_MDR_CC \
); \
n = __quot.l; \
} else { \
@ -72,7 +85,7 @@ unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
: "cc"
: CLOBBER_MDR_CC
);
return result;
@ -93,7 +106,7 @@ signed __muldiv64s(signed val, signed mult, signed div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
: "cc"
: CLOBBER_MDR_CC
);
return result;

View File

@ -55,7 +55,6 @@ static inline void clear_using_fpu(struct task_struct *tsk)
extern asmlinkage void fpu_kill_state(struct task_struct *);
extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_save(struct fpu_state_struct *);
extern int fpu_setup_sigcontext(struct fpucontext *buf);
@ -113,7 +112,6 @@ static inline void flush_fpu(void)
extern asmlinkage
void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
#define fpu_invalid_op unexpected_fpu_exception
#define fpu_exception unexpected_fpu_exception
struct task_struct;

View File

@ -20,7 +20,7 @@
/*
* interrupt control
* - "disabled": run in IM1/2
* - level 0 - GDB stub
* - level 0 - kernel debugger
* - level 1 - virtual serial DMA (if present)
* - level 5 - normal interrupt priority
* - level 6 - timer interrupt

View File

@ -0,0 +1,81 @@
/* Kernel debugger for MN10300
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_KGDB_H
#define _ASM_KGDB_H
/*
* BUFMAX defines the maximum number of characters in inbound/outbound
* buffers at least NUMREGBYTES*2 are needed for register packets
* Longer buffer is needed to list all threads
*/
#define BUFMAX 1024
/*
* Note that this register image is in a different order than the register
* image that Linux produces at interrupt time.
*/
enum regnames {
GDB_FR_D0 = 0,
GDB_FR_D1 = 1,
GDB_FR_D2 = 2,
GDB_FR_D3 = 3,
GDB_FR_A0 = 4,
GDB_FR_A1 = 5,
GDB_FR_A2 = 6,
GDB_FR_A3 = 7,
GDB_FR_SP = 8,
GDB_FR_PC = 9,
GDB_FR_MDR = 10,
GDB_FR_EPSW = 11,
GDB_FR_LIR = 12,
GDB_FR_LAR = 13,
GDB_FR_MDRQ = 14,
GDB_FR_E0 = 15,
GDB_FR_E1 = 16,
GDB_FR_E2 = 17,
GDB_FR_E3 = 18,
GDB_FR_E4 = 19,
GDB_FR_E5 = 20,
GDB_FR_E6 = 21,
GDB_FR_E7 = 22,
GDB_FR_SSP = 23,
GDB_FR_MSP = 24,
GDB_FR_USP = 25,
GDB_FR_MCRH = 26,
GDB_FR_MCRL = 27,
GDB_FR_MCVF = 28,
GDB_FR_FPCR = 29,
GDB_FR_DUMMY0 = 30,
GDB_FR_DUMMY1 = 31,
GDB_FR_FS0 = 32,
GDB_FR_SIZE = 64,
};
#define GDB_ORIG_D0 41
#define NUMREGBYTES (GDB_FR_SIZE*4)
static inline void arch_kgdb_breakpoint(void)
{
asm(".globl __arch_kgdb_breakpoint; __arch_kgdb_breakpoint: break");
}
extern u8 __arch_kgdb_breakpoint;
#define BREAK_INSTR_SIZE 1
#define CACHE_FLUSH_IS_SAFE 1
#endif /* _ASM_KGDB_H */

View File

@ -34,7 +34,7 @@
#define LOCAL_TIMER_IPI 193
#define FLUSH_CACHE_IPI 194
#define CALL_FUNCTION_NMI_IPI 195
#define GDB_NMI_IPI 196
#define DEBUGGER_NMI_IPI 196
#define SMP_BOOT_IRQ 195
@ -43,6 +43,7 @@
#define LOCAL_TIMER_GxICR_LV GxICR_LEVEL_4
#define FLUSH_CACHE_GxICR_LV GxICR_LEVEL_0
#define SMP_BOOT_GxICR_LV GxICR_LEVEL_0
#define DEBUGGER_GxICR_LV CONFIG_DEBUGGER_IRQ_LEVEL
#define TIME_OUT_COUNT_BOOT_IPI 100
#define DELAY_TIME_BOOT_IPI 75000
@ -61,8 +62,9 @@
* An alternate way of dealing with this could be to use the EPSW.S bits to
* cache this information for systems with up to four CPUs.
*/
#define arch_smp_processor_id() (CPUID)
#if 0
#define raw_smp_processor_id() (CPUID)
#define raw_smp_processor_id() (arch_smp_processor_id())
#else
#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif

View File

@ -131,7 +131,11 @@ static inline unsigned long current_stack_pointer(void)
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#ifndef CONFIG_KGDB
#define free_thread_info(ti) kfree((ti))
#else
extern void free_thread_info(struct thread_info *);
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)

View File

@ -21,11 +21,8 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y)
obj-$(CONFIG_GDBSTUB) += gdb-cache.o
endif
obj-$(CONFIG_MN10300_RTC) += rtc.o
obj-$(CONFIG_PROFILE) += profile.o profile-low.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KGDB) += kgdb.o

View File

@ -266,7 +266,11 @@ ENTRY(raw_bus_error)
###############################################################################
#
# Miscellaneous exception entry points
# NMI exception entry points
#
# This is used by ordinary interrupt channels that have the GxICR_NMI bit set
# in addition to the main NMI and Watchdog channels. SMP NMI IPIs use this
# facility.
#
###############################################################################
ENTRY(nmi_handler)
@ -281,7 +285,7 @@ ENTRY(nmi_handler)
and NMIAGR_GN,d0
lsr 0x2,d0
cmp CALL_FUNCTION_NMI_IPI,d0
bne 5f # if not call function, jump
bne nmi_not_smp_callfunc # if not call function, jump
# function call nmi ipi
add 4,sp # no need to store TBR
@ -295,59 +299,38 @@ ENTRY(nmi_handler)
call smp_nmi_call_function_interrupt[],0
RESTORE_ALL
5:
#ifdef CONFIG_GDBSTUB
cmp GDB_NMI_IPI,d0
bne 3f # if not gdb nmi ipi, jump
nmi_not_smp_callfunc:
#ifdef CONFIG_KERNEL_DEBUGGER
cmp DEBUGGER_NMI_IPI,d0
bne nmi_not_debugger # if not kernel debugger NMI IPI, jump
# gdb nmi ipi
# kernel debugger NMI IPI
add 4,sp # no need to store TBR
mov GxICR_DETECT,d0 # clear NMI
movbu d0,(GxICR(GDB_NMI_IPI))
movhu (GxICR(GDB_NMI_IPI)),d0
movbu d0,(GxICR(DEBUGGER_NMI_IPI))
movhu (GxICR(DEBUGGER_NMI_IPI)),d0
and ~EPSW_NMID,epsw # enable NMI
#ifdef CONFIG_MN10300_CACHE_ENABLED
mov (gdbstub_nmi_opr_type),d0
cmp GDBSTUB_NMI_CACHE_PURGE,d0
bne 4f # if not gdb cache purge, jump
# gdb cache purge nmi ipi
add -20,sp
mov d1,(4,sp)
mov a0,(8,sp)
mov a1,(12,sp)
mov mdr,d0
mov d0,(16,sp)
call gdbstub_local_purge_cache[],0
mov 0x1,d0
mov (CPUID),d1
asl d1,d0
mov gdbstub_nmi_cpumask,a0
bclr d0,(a0)
mov (4,sp),d1
mov (8,sp),a0
mov (12,sp),a1
mov (16,sp),d0
mov d0,mdr
add 20,sp
mov (sp),d0
add 4,sp
rti
4:
#endif /* CONFIG_MN10300_CACHE_ENABLED */
# gdb wait nmi ipi
mov (sp),d0
SAVE_ALL
call gdbstub_nmi_wait[],0
mov fp,d0 # arg 0: stacked register file
mov a2,d1 # arg 1: exception number
call debugger_nmi_interrupt[],0
RESTORE_ALL
3:
#endif /* CONFIG_GDBSTUB */
nmi_not_debugger:
#endif /* CONFIG_KERNEL_DEBUGGER */
mov (sp),d0 # restore TBR to d0
add 4,sp
#endif /* CONFIG_SMP */
bra __common_exception_nonmi
###############################################################################
#
# General exception entry point
#
###############################################################################
ENTRY(__common_exception)
add -4,sp
mov d0,(sp)

View File

@ -69,24 +69,6 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
force_sig_info(SIGFPE, &info, tsk);
}
/*
* handle an FPU invalid_op exception
* - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c
*/
asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code)
{
siginfo_t info;
if (!user_mode(regs))
die_if_no_fixup("FPU invalid opcode", regs, code);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_COPROC;
info.si_addr = (void *) regs->pc;
force_sig_info(info.si_signo, &info, current);
}
/*
* save the FPU state to a signal context
*/

View File

@ -1,105 +0,0 @@
###############################################################################
#
# MN10300 Low-level cache purging routines for gdbstub
#
# Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
# Written by David Howells (dhowells@redhat.com)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public Licence
# as published by the Free Software Foundation; either version
# 2 of the Licence, or (at your option) any later version.
#
###############################################################################
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/cache.h>
#include <asm/cpu-regs.h>
#include <asm/exceptions.h>
#include <asm/frame.inc>
#include <asm/serial-regs.h>
.text
###############################################################################
#
# GDB stub cache purge
#
###############################################################################
.type gdbstub_purge_cache,@function
ENTRY(gdbstub_purge_cache)
#######################################################################
# read the addresses tagged in the cache's tag RAM and attempt to flush
# those addresses specifically
# - we rely on the hardware to filter out invalid tag entry addresses
mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
mov DCACHE_PURGE(0,0),a1 # dcache purge request address
mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
mn10300_dcache_flush_loop:
mov (a0),d0
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
# cache
mov d0,(a1) # conditional purge
mn10300_dcache_flush_skip:
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
add -1,d1
bne mn10300_dcache_flush_loop
;; # unconditionally flush and invalidate the dcache
;; mov DCACHE_PURGE(0,0),a1 # dcache purge request address
;; mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of
;; # entries
;;
;; gdbstub_purge_cache__dcache_loop:
;; mov (a1),d0 # unconditional purge
;;
;; add L1_CACHE_BYTES,a1
;; add -1,d1
;; bne gdbstub_purge_cache__dcache_loop
#######################################################################
# now invalidate the icache
mov CHCTR,a0
movhu (a0),a1
mov epsw,d1
and ~EPSW_IE,epsw
nop
nop
# disable the icache
and ~CHCTR_ICEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
# wait for the cache to finish
mov CHCTR,a0
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
movhu a1,(a0)
movhu (a0),d0 # read back to flush
# (SIGILLs all over without this)
mov d1,epsw
ret [],0
.size gdbstub_purge_cache,.-gdbstub_purge_cache

View File

@ -59,10 +59,10 @@ void __init gdbstub_io_init(void)
/* we want to get serial receive interrupts */
set_intr_level(gdbstub_port->rx_irq,
NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
NUM2GxICR_LEVEL(CONFIG_DEBUGGER_IRQ_LEVEL));
set_intr_level(gdbstub_port->tx_irq,
NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
NUM2GxICR_LEVEL(CONFIG_DEBUGGER_IRQ_LEVEL));
set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_DEBUGGER_IRQ_LEVEL),
gdbstub_io_rx_handler);
*gdbstub_port->rx_icr |= GxICR_ENABLE;
@ -88,7 +88,7 @@ void __init gdbstub_io_init(void)
/* permit level 0 IRQs only */
arch_local_change_intr_mask_level(
NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
NUM2EPSW_IM(CONFIG_DEBUGGER_IRQ_LEVEL + 1));
}
/*

View File

@ -133,7 +133,7 @@
#include <asm/system.h>
#include <asm/gdb-stub.h>
#include <asm/exceptions.h>
#include <asm/cacheflush.h>
#include <asm/debugger.h>
#include <asm/serial-regs.h>
#include <asm/busctl-regs.h>
#include <unit/leds.h>
@ -405,6 +405,7 @@ static int hexToInt(char **ptr, int *intValue)
return (numChars);
}
#ifdef CONFIG_GDBSTUB_ALLOW_SINGLE_STEP
/*
* We single-step by setting breakpoints. When an exception
* is handled, we need to restore the instructions hoisted
@ -729,6 +730,7 @@ static int gdbstub_single_step(struct pt_regs *regs)
__gdbstub_restore_bp();
return -EFAULT;
}
#endif /* CONFIG_GDBSTUB_ALLOW_SINGLE_STEP */
#ifdef CONFIG_GDBSTUB_CONSOLE
@ -1171,7 +1173,7 @@ int gdbstub_clear_breakpoint(u8 *addr, int len)
/*
* This function does all command processing for interfacing to gdb
* - returns 1 if the exception should be skipped, 0 otherwise.
* - returns 0 if the exception should be skipped, -ERROR otherwise.
*/
static int gdbstub(struct pt_regs *regs, enum exception_code excep)
{
@ -1186,7 +1188,7 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
int loop;
if (excep == EXCEP_FPU_DISABLED)
return 0;
return -ENOTSUPP;
gdbstub_flush_caches = 0;
@ -1195,7 +1197,7 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
asm volatile("mov mdr,%0" : "=d"(mdr));
local_save_flags(epsw);
arch_local_change_intr_mask_level(
NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
NUM2EPSW_IM(CONFIG_DEBUGGER_IRQ_LEVEL + 1));
gdbstub_store_fpu();
@ -1208,11 +1210,13 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
/* if we were single stepping, restore the opcodes hoisted for the
* breakpoint[s] */
broke = 0;
#ifdef CONFIG_GDBSTUB_ALLOW_SINGLE_STEP
if ((step_bp[0].addr && step_bp[0].addr == (u8 *) regs->pc) ||
(step_bp[1].addr && step_bp[1].addr == (u8 *) regs->pc))
broke = 1;
__gdbstub_restore_bp();
#endif
if (gdbstub_rx_unget) {
sigval = SIGINT;
@ -1548,17 +1552,21 @@ packet_waiting:
* Step to next instruction
*/
case 's':
/*
* using the T flag doesn't seem to perform single
/* Using the T flag doesn't seem to perform single
* stepping (it seems to wind up being caught by the
* JTAG unit), so we have to use breakpoints and
* continue instead.
*/
#ifdef CONFIG_GDBSTUB_ALLOW_SINGLE_STEP
if (gdbstub_single_step(regs) < 0)
/* ignore any fault error for now */
gdbstub_printk("unable to set single-step"
" bp\n");
goto done;
#else
gdbstub_strcpy(output_buffer, "E01");
break;
#endif
/*
* Set baud rate (bBB)
@ -1657,7 +1665,7 @@ done:
* NB: We flush both caches, just to be sure...
*/
if (gdbstub_flush_caches)
gdbstub_purge_cache();
debugger_local_cache_flushinv();
gdbstub_load_fpu();
mn10300_set_gdbleds(0);
@ -1667,14 +1675,23 @@ done:
touch_softlockup_watchdog();
local_irq_restore(epsw);
return 1;
return 0;
}
/*
* Determine if we hit a debugger special breakpoint that needs skipping over
* automatically.
*/
int at_debugger_breakpoint(struct pt_regs *regs)
{
return 0;
}
/*
* handle event interception
*/
asmlinkage int gdbstub_intercept(struct pt_regs *regs,
enum exception_code excep)
asmlinkage int debugger_intercept(enum exception_code excep,
int signo, int si_code, struct pt_regs *regs)
{
static u8 notfirst = 1;
int ret;
@ -1688,7 +1705,7 @@ asmlinkage int gdbstub_intercept(struct pt_regs *regs,
asm("mov mdr,%0" : "=d"(mdr));
gdbstub_entry(
"--> gdbstub_intercept(%p,%04x) [MDR=%lx PC=%lx]\n",
"--> debugger_intercept(%p,%04x) [MDR=%lx PC=%lx]\n",
regs, excep, mdr, regs->pc);
gdbstub_entry(
@ -1722,7 +1739,7 @@ asmlinkage int gdbstub_intercept(struct pt_regs *regs,
ret = gdbstub(regs, excep);
gdbstub_entry("<-- gdbstub_intercept()\n");
gdbstub_entry("<-- debugger_intercept()\n");
gdbstub_busy = 0;
return ret;
}

View File

@ -29,6 +29,13 @@ extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
extern void mn10300_low_ipi_handler(void);
#endif
/*
* smp.c
*/
#ifdef CONFIG_SMP
extern void smp_jump_to_debugger(void);
#endif
/*
* time.c
*/

View File

@ -153,7 +153,7 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
case LOCAL_TIMER_IPI:
case FLUSH_CACHE_IPI:
case CALL_FUNCTION_NMI_IPI:
case GDB_NMI_IPI:
case DEBUGGER_NMI_IPI:
#ifdef CONFIG_MN10300_TTYSM0
case SC0RXIRQ:
case SC0TXIRQ:

502
arch/mn10300/kernel/kgdb.c Normal file
View File

@ -0,0 +1,502 @@
/* kgdb support for MN10300
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/slab.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/uaccess.h>
#include <unit/leds.h>
#include <unit/serial.h>
#include <asm/debugger.h>
#include <asm/serial-regs.h>
#include "internal.h"
/*
* Software single-stepping breakpoint save (used by __switch_to())
*/
static struct thread_info *kgdb_sstep_thread;
u8 *kgdb_sstep_bp_addr[2];
u8 kgdb_sstep_bp[2];
/*
* Copy kernel exception frame registers to the GDB register file
*/
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
unsigned long ssp = (unsigned long) (regs + 1);
gdb_regs[GDB_FR_D0] = regs->d0;
gdb_regs[GDB_FR_D1] = regs->d1;
gdb_regs[GDB_FR_D2] = regs->d2;
gdb_regs[GDB_FR_D3] = regs->d3;
gdb_regs[GDB_FR_A0] = regs->a0;
gdb_regs[GDB_FR_A1] = regs->a1;
gdb_regs[GDB_FR_A2] = regs->a2;
gdb_regs[GDB_FR_A3] = regs->a3;
gdb_regs[GDB_FR_SP] = (regs->epsw & EPSW_nSL) ? regs->sp : ssp;
gdb_regs[GDB_FR_PC] = regs->pc;
gdb_regs[GDB_FR_MDR] = regs->mdr;
gdb_regs[GDB_FR_EPSW] = regs->epsw;
gdb_regs[GDB_FR_LIR] = regs->lir;
gdb_regs[GDB_FR_LAR] = regs->lar;
gdb_regs[GDB_FR_MDRQ] = regs->mdrq;
gdb_regs[GDB_FR_E0] = regs->e0;
gdb_regs[GDB_FR_E1] = regs->e1;
gdb_regs[GDB_FR_E2] = regs->e2;
gdb_regs[GDB_FR_E3] = regs->e3;
gdb_regs[GDB_FR_E4] = regs->e4;
gdb_regs[GDB_FR_E5] = regs->e5;
gdb_regs[GDB_FR_E6] = regs->e6;
gdb_regs[GDB_FR_E7] = regs->e7;
gdb_regs[GDB_FR_SSP] = ssp;
gdb_regs[GDB_FR_MSP] = 0;
gdb_regs[GDB_FR_USP] = regs->sp;
gdb_regs[GDB_FR_MCRH] = regs->mcrh;
gdb_regs[GDB_FR_MCRL] = regs->mcrl;
gdb_regs[GDB_FR_MCVF] = regs->mcvf;
gdb_regs[GDB_FR_DUMMY0] = 0;
gdb_regs[GDB_FR_DUMMY1] = 0;
gdb_regs[GDB_FR_FS0] = 0;
}
/*
* Extracts kernel SP/PC values understandable by gdb from the values
* saved by switch_to().
*/
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
gdb_regs[GDB_FR_SSP] = p->thread.sp;
gdb_regs[GDB_FR_PC] = p->thread.pc;
gdb_regs[GDB_FR_A3] = p->thread.a3;
gdb_regs[GDB_FR_USP] = p->thread.usp;
gdb_regs[GDB_FR_FPCR] = p->thread.fpu_state.fpcr;
}
/*
* Fill kernel exception frame registers from the GDB register file
*/
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
regs->d0 = gdb_regs[GDB_FR_D0];
regs->d1 = gdb_regs[GDB_FR_D1];
regs->d2 = gdb_regs[GDB_FR_D2];
regs->d3 = gdb_regs[GDB_FR_D3];
regs->a0 = gdb_regs[GDB_FR_A0];
regs->a1 = gdb_regs[GDB_FR_A1];
regs->a2 = gdb_regs[GDB_FR_A2];
regs->a3 = gdb_regs[GDB_FR_A3];
regs->sp = gdb_regs[GDB_FR_SP];
regs->pc = gdb_regs[GDB_FR_PC];
regs->mdr = gdb_regs[GDB_FR_MDR];
regs->epsw = gdb_regs[GDB_FR_EPSW];
regs->lir = gdb_regs[GDB_FR_LIR];
regs->lar = gdb_regs[GDB_FR_LAR];
regs->mdrq = gdb_regs[GDB_FR_MDRQ];
regs->e0 = gdb_regs[GDB_FR_E0];
regs->e1 = gdb_regs[GDB_FR_E1];
regs->e2 = gdb_regs[GDB_FR_E2];
regs->e3 = gdb_regs[GDB_FR_E3];
regs->e4 = gdb_regs[GDB_FR_E4];
regs->e5 = gdb_regs[GDB_FR_E5];
regs->e6 = gdb_regs[GDB_FR_E6];
regs->e7 = gdb_regs[GDB_FR_E7];
regs->sp = gdb_regs[GDB_FR_SSP];
/* gdb_regs[GDB_FR_MSP]; */
// regs->usp = gdb_regs[GDB_FR_USP];
regs->mcrh = gdb_regs[GDB_FR_MCRH];
regs->mcrl = gdb_regs[GDB_FR_MCRL];
regs->mcvf = gdb_regs[GDB_FR_MCVF];
/* gdb_regs[GDB_FR_DUMMY0]; */
/* gdb_regs[GDB_FR_DUMMY1]; */
// regs->fpcr = gdb_regs[GDB_FR_FPCR];
// regs->fs0 = gdb_regs[GDB_FR_FS0];
}
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = { 0xff },
.flags = KGDB_HW_BREAKPOINT,
};
static const unsigned char mn10300_kgdb_insn_sizes[256] =
{
/* 1 2 3 4 5 6 7 8 9 a b c d e f */
1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, /* 0 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */
2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, /* 2 */
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, /* 3 */
1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, /* 4 */
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, /* 5 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */
2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 8 */
2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 9 */
2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* a */
2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* b */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, /* c */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */
0, 2, 2, 2, 2, 2, 2, 4, 0, 3, 0, 4, 0, 6, 7, 1 /* f */
};
/*
* Attempt to emulate single stepping by means of breakpoint instructions.
* Although there is a single-step trace flag in EPSW, its use is not
* sufficiently documented and is only intended for use with the JTAG debugger.
*/
static int kgdb_arch_do_singlestep(struct pt_regs *regs)
{
unsigned long arg;
unsigned size;
u8 *pc = (u8 *)regs->pc, *sp = (u8 *)(regs + 1), cur;
u8 *x = NULL, *y = NULL;
int ret;
ret = probe_kernel_read(&cur, pc, 1);
if (ret < 0)
return ret;
size = mn10300_kgdb_insn_sizes[cur];
if (size > 0) {
x = pc + size;
goto set_x;
}
switch (cur) {
/* Bxx (d8,PC) */
case 0xc0 ... 0xca:
ret = probe_kernel_read(&arg, pc + 1, 1);
if (ret < 0)
return ret;
x = pc + 2;
if (arg >= 0 && arg <= 2)
goto set_x;
y = pc + (s8)arg;
goto set_x_and_y;
/* LXX (d8,PC) */
case 0xd0 ... 0xda:
x = pc + 1;
if (regs->pc == regs->lar)
goto set_x;
y = (u8 *)regs->lar;
goto set_x_and_y;
/* SETLB - loads the next four bytes into the LIR register
* (which mustn't include a breakpoint instruction) */
case 0xdb:
x = pc + 5;
goto set_x;
/* JMP (d16,PC) or CALL (d16,PC) */
case 0xcc:
case 0xcd:
ret = probe_kernel_read(&arg, pc + 1, 2);
if (ret < 0)
return ret;
x = pc + (s16)arg;
goto set_x;
/* JMP (d32,PC) or CALL (d32,PC) */
case 0xdc:
case 0xdd:
ret = probe_kernel_read(&arg, pc + 1, 4);
if (ret < 0)
return ret;
x = pc + (s32)arg;
goto set_x;
/* RETF */
case 0xde:
x = (u8 *)regs->mdr;
goto set_x;
/* RET */
case 0xdf:
ret = probe_kernel_read(&arg, pc + 2, 1);
if (ret < 0)
return ret;
ret = probe_kernel_read(&x, sp + (s8)arg, 4);
if (ret < 0)
return ret;
goto set_x;
case 0xf0:
ret = probe_kernel_read(&cur, pc + 1, 1);
if (ret < 0)
return ret;
if (cur >= 0xf0 && cur <= 0xf7) {
/* JMP (An) / CALLS (An) */
switch (cur & 3) {
case 0: x = (u8 *)regs->a0; break;
case 1: x = (u8 *)regs->a1; break;
case 2: x = (u8 *)regs->a2; break;
case 3: x = (u8 *)regs->a3; break;
}
goto set_x;
} else if (cur == 0xfc) {
/* RETS */
ret = probe_kernel_read(&x, sp, 4);
if (ret < 0)
return ret;
goto set_x;
} else if (cur == 0xfd) {
/* RTI */
ret = probe_kernel_read(&x, sp + 4, 4);
if (ret < 0)
return ret;
goto set_x;
} else {
x = pc + 2;
goto set_x;
}
break;
/* potential 3-byte conditional branches */
case 0xf8:
ret = probe_kernel_read(&cur, pc + 1, 1);
if (ret < 0)
return ret;
x = pc + 3;
if (cur >= 0xe8 && cur <= 0xeb) {
ret = probe_kernel_read(&arg, pc + 2, 1);
if (ret < 0)
return ret;
if (arg >= 0 && arg <= 3)
goto set_x;
y = pc + (s8)arg;
goto set_x_and_y;
}
goto set_x;
case 0xfa:
ret = probe_kernel_read(&cur, pc + 1, 1);
if (ret < 0)
return ret;
if (cur == 0xff) {
/* CALLS (d16,PC) */
ret = probe_kernel_read(&arg, pc + 2, 2);
if (ret < 0)
return ret;
x = pc + (s16)arg;
goto set_x;
}
x = pc + 4;
goto set_x;
case 0xfc:
ret = probe_kernel_read(&cur, pc + 1, 1);
if (ret < 0)
return ret;
if (cur == 0xff) {
/* CALLS (d32,PC) */
ret = probe_kernel_read(&arg, pc + 2, 4);
if (ret < 0)
return ret;
x = pc + (s32)arg;
goto set_x;
}
x = pc + 6;
goto set_x;
}
return 0;
set_x:
kgdb_sstep_bp_addr[0] = x;
kgdb_sstep_bp_addr[1] = NULL;
ret = probe_kernel_read(&kgdb_sstep_bp[0], x, 1);
if (ret < 0)
return ret;
ret = probe_kernel_write(x, &arch_kgdb_ops.gdb_bpt_instr, 1);
if (ret < 0)
return ret;
kgdb_sstep_thread = current_thread_info();
debugger_local_cache_flushinv_one(x);
return ret;
set_x_and_y:
kgdb_sstep_bp_addr[0] = x;
kgdb_sstep_bp_addr[1] = y;
ret = probe_kernel_read(&kgdb_sstep_bp[0], x, 1);
if (ret < 0)
return ret;
ret = probe_kernel_read(&kgdb_sstep_bp[1], y, 1);
if (ret < 0)
return ret;
ret = probe_kernel_write(x, &arch_kgdb_ops.gdb_bpt_instr, 1);
if (ret < 0)
return ret;
ret = probe_kernel_write(y, &arch_kgdb_ops.gdb_bpt_instr, 1);
if (ret < 0) {
probe_kernel_write(kgdb_sstep_bp_addr[0],
&kgdb_sstep_bp[0], 1);
} else {
kgdb_sstep_thread = current_thread_info();
}
debugger_local_cache_flushinv_one(x);
debugger_local_cache_flushinv_one(y);
return ret;
}
/*
* Remove emplaced single-step breakpoints, returning true if we hit one of
* them.
*/
static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
{
bool hit = false;
u8 *x = kgdb_sstep_bp_addr[0], *y = kgdb_sstep_bp_addr[1];
u8 opcode;
if (kgdb_sstep_thread == current_thread_info()) {
if (x) {
if (x == (u8 *)regs->pc)
hit = true;
if (probe_kernel_read(&opcode, x,
1) < 0 ||
opcode != 0xff)
BUG();
probe_kernel_write(x, &kgdb_sstep_bp[0], 1);
debugger_local_cache_flushinv_one(x);
}
if (y) {
if (y == (u8 *)regs->pc)
hit = true;
if (probe_kernel_read(&opcode, y,
1) < 0 ||
opcode != 0xff)
BUG();
probe_kernel_write(y, &kgdb_sstep_bp[1], 1);
debugger_local_cache_flushinv_one(y);
}
}
kgdb_sstep_bp_addr[0] = NULL;
kgdb_sstep_bp_addr[1] = NULL;
kgdb_sstep_thread = NULL;
return hit;
}
/*
* Catch a single-step-pending thread being deleted and make sure the global
* single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to().
*/
void free_thread_info(struct thread_info *ti)
{
if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL;
/* However, we may now be running in degraded mode, with most
* of the CPUs disabled until such a time as KGDB is reentered,
* so force immediate reentry */
kgdb_breakpoint();
}
kfree(ti);
}
/*
* Handle unknown packets and [CcsDk] packets
* - at this point breakpoints have been installed
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *regs)
{
long addr;
char *ptr;
switch (remcom_in_buffer[0]) {
case 'c':
case 's':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &addr))
regs->pc = addr;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
if (remcom_in_buffer[0] == 's') {
kgdb_arch_do_singlestep(regs);
kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
return 0;
}
return -1; /* this means that we do not want to exit from the handler */
}
/*
* Handle event interception
* - returns 0 if the exception should be skipped, -ERROR otherwise.
*/
int debugger_intercept(enum exception_code excep, int signo, int si_code,
struct pt_regs *regs)
{
int ret;
if (kgdb_arch_undo_singlestep(regs)) {
excep = EXCEP_TRAP;
signo = SIGTRAP;
si_code = TRAP_TRACE;
}
ret = kgdb_handle_exception(excep, signo, si_code, regs);
debugger_local_cache_flushinv();
return ret;
}
/*
* Determine if we've hit a debugger special breakpoint
*/
int at_debugger_breakpoint(struct pt_regs *regs)
{
return regs->pc == (unsigned long)&__arch_kgdb_breakpoint;
}
/*
* Initialise kgdb
*/
int kgdb_arch_init(void)
{
return 0;
}
/*
* Do something, perhaps, but don't know what.
*/
void kgdb_arch_exit(void)
{
}
#ifdef CONFIG_SMP
void debugger_nmi_interrupt(struct pt_regs *regs, enum exception_code code)
{
kgdb_nmicallback(arch_smp_processor_id(), regs);
debugger_local_cache_flushinv();
}
void kgdb_roundup_cpus(unsigned long flags)
{
smp_jump_to_debugger();
}
#endif

View File

@ -119,6 +119,10 @@ static int mn10300_serial_request_port(struct uart_port *);
static void mn10300_serial_config_port(struct uart_port *, int);
static int mn10300_serial_verify_port(struct uart_port *,
struct serial_struct *);
#ifdef CONFIG_CONSOLE_POLL
static void mn10300_serial_poll_put_char(struct uart_port *, unsigned char);
static int mn10300_serial_poll_get_char(struct uart_port *);
#endif
static const struct uart_ops mn10300_serial_ops = {
.tx_empty = mn10300_serial_tx_empty,
@ -138,6 +142,10 @@ static const struct uart_ops mn10300_serial_ops = {
.request_port = mn10300_serial_request_port,
.config_port = mn10300_serial_config_port,
.verify_port = mn10300_serial_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = mn10300_serial_poll_put_char,
.poll_get_char = mn10300_serial_poll_get_char,
#endif
};
static irqreturn_t mn10300_serial_interrupt(int irq, void *dev_id);
@ -1634,3 +1642,70 @@ static int __init mn10300_serial_console_init(void)
console_initcall(mn10300_serial_console_init);
#endif
#ifdef CONFIG_CONSOLE_POLL
/*
* Polled character reception for the kernel debugger
*/
static int mn10300_serial_poll_get_char(struct uart_port *_port)
{
struct mn10300_serial_port *port =
container_of(_port, struct mn10300_serial_port, uart);
unsigned ix;
u8 st, ch;
_enter("%s", port->name);
do {
/* pull chars out of the hat */
ix = port->rx_outp;
if (ix == port->rx_inp)
return NO_POLL_CHAR;
ch = port->rx_buffer[ix++];
st = port->rx_buffer[ix++];
smp_rmb();
port->rx_outp = ix & (MNSC_BUFFER_SIZE - 1);
} while (st & (SC01STR_FEF | SC01STR_PEF | SC01STR_OEF));
return ch;
}
/*
* Polled character transmission for the kernel debugger
*/
static void mn10300_serial_poll_put_char(struct uart_port *_port,
unsigned char ch)
{
struct mn10300_serial_port *port =
container_of(_port, struct mn10300_serial_port, uart);
u8 intr, tmp;
/* wait for the transmitter to finish anything it might be doing (and
* this includes the virtual DMA handler, so it might take a while) */
while (*port->_status & (SC01STR_TBF | SC01STR_TXF))
continue;
/* disable the Tx ready interrupt */
intr = *port->_intr;
*port->_intr = intr & ~SC01ICR_TI;
tmp = *port->_intr;
if (ch == 0x0a) {
*(u8 *) port->_txb = 0x0d;
while (*port->_status & SC01STR_TBF)
continue;
}
*(u8 *) port->_txb = ch;
while (*port->_status & SC01STR_TBF)
continue;
/* restore the Tx interrupt flag */
*port->_intr = intr;
tmp = *port->_intr;
}
#endif /* CONFIG_CONSOLE_POLL */

View File

@ -135,7 +135,7 @@ void release_segments(struct mm_struct *mm)
void machine_restart(char *cmd)
{
#ifdef CONFIG_GDBSTUB
#ifdef CONFIG_KERNEL_DEBUGGER
gdbstub_exit(0);
#endif
@ -148,14 +148,14 @@ void machine_restart(char *cmd)
void machine_halt(void)
{
#ifdef CONFIG_GDBSTUB
#ifdef CONFIG_KERNEL_DEBUGGER
gdbstub_exit(0);
#endif
}
void machine_power_off(void)
{
#ifdef CONFIG_GDBSTUB
#ifdef CONFIG_KERNEL_DEBUGGER
gdbstub_exit(0);
#endif
}

View File

@ -439,6 +439,22 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
return ret;
}
/**
* smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI
*
* Send a non-maskable request to all other CPUs in the system, instructing
* them to jump into the debugger. The caller is responsible for checking that
* the other CPUs responded to the instruction.
*
* The caller should make sure that this CPU's debugger IPI is disabled.
*/
void smp_jump_to_debugger(void)
{
if (num_online_cpus() > 1)
/* Send a message to all other CPUs */
send_IPI_allbutself(DEBUGGER_NMI_IPI);
}
/**
* stop_this_cpu - Callback to stop a CPU.
* @unused: Callback context (ignored).
@ -603,7 +619,7 @@ static void __init smp_cpu_init(void)
/**
* smp_prepare_cpu_init - Initialise CPU in startup_secondary
*
* Set interrupt level 0-6 setting and init ICR of gdbstub.
* Set interrupt level 0-6 setting and init ICR of the kernel debugger.
*/
void smp_prepare_cpu_init(void)
{
@ -622,15 +638,15 @@ void smp_prepare_cpu_init(void)
for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
#ifdef CONFIG_GDBSTUB
/* initialise GDB-stub */
#ifdef CONFIG_KERNEL_DEBUGGER
/* initialise the kernel debugger interrupt */
do {
unsigned long flags;
u16 tmp16;
flags = arch_local_cli_save();
GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(GDB_NMI_IPI);
GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(DEBUGGER_NMI_IPI);
arch_local_irq_restore(flags);
} while (0);
#endif

View File

@ -39,11 +39,17 @@ ENTRY(__switch_to)
# save prev context
mov __switch_back,d0
mov d0,(THREAD_PC,a0)
mov sp,a2
mov a2,(THREAD_SP,a0)
mov a3,(THREAD_A3,a0)
#ifdef CONFIG_KGDB
btst 0xff,(kgdb_single_step)
bne __switch_to__lift_sstep_bp
__switch_to__continue:
#endif
mov d0,(THREAD_PC,a0)
mov (THREAD_A3,a1),a3
mov (THREAD_SP,a1),a2
@ -68,3 +74,106 @@ ENTRY(__switch_to)
__switch_back:
and ~EPSW_NMID,epsw
ret [d2,d3,a2,a3,exreg1],32
#ifdef CONFIG_KGDB
###############################################################################
#
# Lift the single-step breakpoints when the task being traced is switched out
# A0 = prev
# A1 = next
#
###############################################################################
__switch_to__lift_sstep_bp:
add -12,sp
mov a0,e4
mov a1,e5
# Clear the single-step flag to prevent us coming this way until we get
# switched back in
bclr 0xff,(kgdb_single_step)
# Remove first breakpoint
mov (kgdb_sstep_bp_addr),a2
cmp 0,a2
beq 1f
movbu (kgdb_sstep_bp),d0
movbu d0,(a2)
#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE) || defined(CONFIG_MN10300_CACHE_INV_ICACHE)
mov a2,d0
mov a2,d1
add 1,d1
calls flush_icache_range
#endif
1:
# Remove second breakpoint
mov (kgdb_sstep_bp_addr+4),a2
cmp 0,a2
beq 2f
movbu (kgdb_sstep_bp+1),d0
movbu d0,(a2)
#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE) || defined(CONFIG_MN10300_CACHE_INV_ICACHE)
mov a2,d0
mov a2,d1
add 1,d1
calls flush_icache_range
#endif
2:
# Change the resumption address and return
mov __switch_back__reinstall_sstep_bp,d0
mov e4,a0
mov e5,a1
add 12,sp
bra __switch_to__continue
###############################################################################
#
# Reinstall the single-step breakpoints when the task being traced is switched
# back in (A1 points to the new thread_struct).
#
###############################################################################
__switch_back__reinstall_sstep_bp:
add -12,sp
mov a0,e4 # save the return value
mov 0xff,d3
# Reinstall first breakpoint
mov (kgdb_sstep_bp_addr),a2
cmp 0,a2
beq 1f
movbu (a2),d0
movbu d0,(kgdb_sstep_bp)
movbu d3,(a2)
#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE) || defined(CONFIG_MN10300_CACHE_INV_ICACHE)
mov a2,d0
mov a2,d1
add 1,d1
calls flush_icache_range
#endif
1:
# Reinstall second breakpoint
mov (kgdb_sstep_bp_addr+4),a2
cmp 0,a2
beq 2f
movbu (a2),d0
movbu d0,(kgdb_sstep_bp+1)
movbu d3,(a2)
#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE) || defined(CONFIG_MN10300_CACHE_INV_ICACHE)
mov a2,d0
mov a2,d1
add 1,d1
calls flush_icache_range
#endif
2:
mov d3,(kgdb_single_step)
# Restore the return value (the previous thread_struct pointer)
mov e4,a0
mov a0,d0
add 12,sp
bra __switch_back
#endif /* CONFIG_KGDB */

View File

@ -38,8 +38,9 @@
#include <asm/busctl-regs.h>
#include <unit/leds.h>
#include <asm/fpu.h>
#include <asm/gdb-stub.h>
#include <asm/sections.h>
#include <asm/debugger.h>
#include "internal.h"
#if (CONFIG_INTERRUPT_VECTOR_BASE & 0xffffff)
#error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
@ -49,74 +50,178 @@ int kstack_depth_to_print = 24;
spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock);
ATOMIC_NOTIFIER_HEAD(mn10300_die_chain);
struct exception_to_signal_map {
u8 signo;
u32 si_code;
};
static const struct exception_to_signal_map exception_to_signal_map[256] = {
/* MMU exceptions */
[EXCEP_ITLBMISS >> 3] = { 0, 0 },
[EXCEP_DTLBMISS >> 3] = { 0, 0 },
[EXCEP_IAERROR >> 3] = { 0, 0 },
[EXCEP_DAERROR >> 3] = { 0, 0 },
/* system exceptions */
[EXCEP_TRAP >> 3] = { SIGTRAP, TRAP_BRKPT },
[EXCEP_ISTEP >> 3] = { SIGTRAP, TRAP_TRACE }, /* Monitor */
[EXCEP_IBREAK >> 3] = { SIGTRAP, TRAP_HWBKPT }, /* Monitor */
[EXCEP_OBREAK >> 3] = { SIGTRAP, TRAP_HWBKPT }, /* Monitor */
[EXCEP_PRIVINS >> 3] = { SIGILL, ILL_PRVOPC },
[EXCEP_UNIMPINS >> 3] = { SIGILL, ILL_ILLOPC },
[EXCEP_UNIMPEXINS >> 3] = { SIGILL, ILL_ILLOPC },
[EXCEP_MEMERR >> 3] = { SIGSEGV, SEGV_ACCERR },
[EXCEP_MISALIGN >> 3] = { SIGBUS, BUS_ADRALN },
[EXCEP_BUSERROR >> 3] = { SIGBUS, BUS_ADRERR },
[EXCEP_ILLINSACC >> 3] = { SIGSEGV, SEGV_ACCERR },
[EXCEP_ILLDATACC >> 3] = { SIGSEGV, SEGV_ACCERR },
[EXCEP_IOINSACC >> 3] = { SIGSEGV, SEGV_ACCERR },
[EXCEP_PRIVINSACC >> 3] = { SIGSEGV, SEGV_ACCERR }, /* userspace */
[EXCEP_PRIVDATACC >> 3] = { SIGSEGV, SEGV_ACCERR }, /* userspace */
[EXCEP_DATINSACC >> 3] = { SIGSEGV, SEGV_ACCERR },
[EXCEP_DOUBLE_FAULT >> 3] = { SIGILL, ILL_BADSTK },
/* FPU exceptions */
[EXCEP_FPU_DISABLED >> 3] = { SIGILL, ILL_COPROC },
[EXCEP_FPU_UNIMPINS >> 3] = { SIGILL, ILL_COPROC },
[EXCEP_FPU_OPERATION >> 3] = { SIGFPE, FPE_INTDIV },
/* interrupts */
[EXCEP_WDT >> 3] = { SIGALRM, 0 },
[EXCEP_NMI >> 3] = { SIGQUIT, 0 },
[EXCEP_IRQ_LEVEL0 >> 3] = { SIGINT, 0 },
[EXCEP_IRQ_LEVEL1 >> 3] = { 0, 0 },
[EXCEP_IRQ_LEVEL2 >> 3] = { 0, 0 },
[EXCEP_IRQ_LEVEL3 >> 3] = { 0, 0 },
[EXCEP_IRQ_LEVEL4 >> 3] = { 0, 0 },
[EXCEP_IRQ_LEVEL5 >> 3] = { 0, 0 },
[EXCEP_IRQ_LEVEL6 >> 3] = { 0, 0 },
/* system calls */
[EXCEP_SYSCALL0 >> 3] = { 0, 0 },
[EXCEP_SYSCALL1 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL2 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL3 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL4 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL5 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL6 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL7 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL8 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL9 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL10 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL11 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL12 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL13 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL14 >> 3] = { SIGILL, ILL_ILLTRP },
[EXCEP_SYSCALL15 >> 3] = { SIGABRT, 0 },
};
/*
* These constants are for searching for possible module text
* segments. MODULE_RANGE is a guess of how much space is likely
* to be vmalloced.
* Handle kernel exceptions.
*
* See if there's a fixup handler we can force a jump to when an exception
* happens due to something kernel code did
*/
#define MODULE_RANGE (8 * 1024 * 1024)
int die_if_no_fixup(const char *str, struct pt_regs *regs,
enum exception_code code)
{
u8 opcode;
int signo, si_code;
#define DO_ERROR(signr, prologue, str, name) \
asmlinkage void name(struct pt_regs *regs, u32 intcode) \
{ \
prologue; \
if (die_if_no_fixup(str, regs, intcode)) \
return; \
force_sig(signr, current); \
if (user_mode(regs))
return 0;
peripheral_leds_display_exception(code);
signo = exception_to_signal_map[code >> 3].signo;
si_code = exception_to_signal_map[code >> 3].si_code;
switch (code) {
/* see if we can fixup the kernel accessing memory */
case EXCEP_ITLBMISS:
case EXCEP_DTLBMISS:
case EXCEP_IAERROR:
case EXCEP_DAERROR:
case EXCEP_MEMERR:
case EXCEP_MISALIGN:
case EXCEP_BUSERROR:
case EXCEP_ILLDATACC:
case EXCEP_IOINSACC:
case EXCEP_PRIVINSACC:
case EXCEP_PRIVDATACC:
case EXCEP_DATINSACC:
if (fixup_exception(regs))
return 1;
break;
case EXCEP_TRAP:
case EXCEP_UNIMPINS:
if (get_user(opcode, (uint8_t __user *)regs->pc) != 0)
break;
if (opcode == 0xff) {
if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0))
return 1;
if (at_debugger_breakpoint(regs))
regs->pc++;
signo = SIGTRAP;
si_code = TRAP_BRKPT;
}
break;
case EXCEP_SYSCALL1 ... EXCEP_SYSCALL14:
/* syscall return addr is _after_ the instruction */
regs->pc -= 2;
break;
case EXCEP_SYSCALL15:
if (report_bug(regs->pc, regs) == BUG_TRAP_TYPE_WARN)
return 1;
/* syscall return addr is _after_ the instruction */
regs->pc -= 2;
break;
default:
break;
}
if (debugger_intercept(code, signo, si_code, regs) == 0)
return 1;
if (notify_die(DIE_GPF, str, regs, code, 0, 0))
return 1;
/* make the process die as the last resort */
die(str, regs, code);
}
#define DO_EINFO(signr, prologue, str, name, sicode) \
asmlinkage void name(struct pt_regs *regs, u32 intcode) \
{ \
siginfo_t info; \
prologue; \
if (die_if_no_fixup(str, regs, intcode)) \
return; \
info.si_signo = signr; \
if (signr == SIGILL && sicode == ILL_ILLOPC) { \
uint8_t opcode; \
if (get_user(opcode, (uint8_t __user *)regs->pc) == 0) \
if (opcode == 0xff) \
info.si_signo = SIGTRAP; \
} \
info.si_errno = 0; \
info.si_code = sicode; \
info.si_addr = (void *) regs->pc; \
force_sig_info(info.si_signo, &info, current); \
/*
* General exception handler
*/
asmlinkage void handle_exception(struct pt_regs *regs, u32 intcode)
{
siginfo_t info;
/* deal with kernel exceptions here */
if (die_if_no_fixup(NULL, regs, intcode))
return;
/* otherwise it's a userspace exception */
info.si_signo = exception_to_signal_map[intcode >> 3].signo;
info.si_code = exception_to_signal_map[intcode >> 3].si_code;
info.si_errno = 0;
info.si_addr = (void *) regs->pc;
force_sig_info(info.si_signo, &info, current);
}
DO_ERROR(SIGTRAP, {}, "trap", trap);
DO_ERROR(SIGSEGV, {}, "ibreak", ibreak);
DO_ERROR(SIGSEGV, {}, "obreak", obreak);
DO_EINFO(SIGSEGV, {}, "access error", access_error, SEGV_ACCERR);
DO_EINFO(SIGSEGV, {}, "insn access error", insn_acc_error, SEGV_ACCERR);
DO_EINFO(SIGSEGV, {}, "data access error", data_acc_error, SEGV_ACCERR);
DO_EINFO(SIGILL, {}, "privileged opcode", priv_op, ILL_PRVOPC);
DO_EINFO(SIGILL, {}, "invalid opcode", invalid_op, ILL_ILLOPC);
DO_EINFO(SIGILL, {}, "invalid ex opcode", invalid_exop, ILL_ILLOPC);
DO_EINFO(SIGBUS, {}, "invalid address", mem_error, BUS_ADRERR);
DO_EINFO(SIGBUS, {}, "bus error", bus_error, BUS_ADRERR);
DO_ERROR(SIGTRAP,
#ifndef CONFIG_MN10300_USING_JTAG
DCR &= ~0x0001,
#else
{},
#endif
"single step", istep);
/*
* handle NMI
*/
asmlinkage void nmi(struct pt_regs *regs, enum exception_code code)
{
/* see if gdbstub wants to deal with it */
#ifdef CONFIG_GDBSTUB
if (gdbstub_intercept(regs, code))
if (debugger_intercept(code, SIGQUIT, 0, regs))
return;
#endif
printk(KERN_WARNING "--- Register Dump ---\n");
show_registers(regs);
@ -128,29 +233,36 @@ asmlinkage void nmi(struct pt_regs *regs, enum exception_code code)
*/
void show_trace(unsigned long *sp)
{
unsigned long *stack, addr, module_start, module_end;
int i;
unsigned long bottom, stack, addr, fp, raslot;
printk(KERN_EMERG "\nCall Trace:");
printk(KERN_EMERG "\nCall Trace:\n");
stack = sp;
i = 0;
module_start = VMALLOC_START;
module_end = VMALLOC_END;
//stack = (unsigned long)sp;
asm("mov sp,%0" : "=a"(stack));
asm("mov a3,%0" : "=r"(fp));
raslot = ULONG_MAX;
bottom = (stack + THREAD_SIZE) & ~(THREAD_SIZE - 1);
for (; stack < bottom; stack += sizeof(addr)) {
addr = *(unsigned long *)stack;
if (stack == fp) {
if (addr > stack && addr < bottom) {
fp = addr;
raslot = stack + sizeof(addr);
continue;
}
fp = 0;
raslot = ULONG_MAX;
}
while (((long) stack & (THREAD_SIZE - 1)) != 0) {
addr = *stack++;
if (__kernel_text_address(addr)) {
#if 1
printk(" [<%08lx>]", addr);
if (stack >= raslot)
raslot = ULONG_MAX;
else
printk(" ?");
print_symbol(" %s", addr);
printk("\n");
#else
if ((i % 6) == 0)
printk(KERN_EMERG " ");
printk("[<%08lx>] ", addr);
i++;
#endif
}
}
@ -322,86 +434,6 @@ void die(const char *str, struct pt_regs *regs, enum exception_code code)
do_exit(SIGSEGV);
}
/*
* see if there's a fixup handler we can force a jump to when an exception
* happens due to something kernel code did
*/
int die_if_no_fixup(const char *str, struct pt_regs *regs,
enum exception_code code)
{
if (user_mode(regs))
return 0;
peripheral_leds_display_exception(code);
switch (code) {
/* see if we can fixup the kernel accessing memory */
case EXCEP_ITLBMISS:
case EXCEP_DTLBMISS:
case EXCEP_IAERROR:
case EXCEP_DAERROR:
case EXCEP_MEMERR:
case EXCEP_MISALIGN:
case EXCEP_BUSERROR:
case EXCEP_ILLDATACC:
case EXCEP_IOINSACC:
case EXCEP_PRIVINSACC:
case EXCEP_PRIVDATACC:
case EXCEP_DATINSACC:
if (fixup_exception(regs))
return 1;
case EXCEP_UNIMPINS:
if (regs->pc && *(uint8_t *)regs->pc == 0xff)
if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0))
return 1;
break;
default:
break;
}
/* see if gdbstub wants to deal with it */
#ifdef CONFIG_GDBSTUB
if (gdbstub_intercept(regs, code))
return 1;
#endif
if (notify_die(DIE_GPF, str, regs, code, 0, 0))
return 1;
/* make the process die as the last resort */
die(str, regs, code);
}
/*
* handle unsupported syscall instructions (syscall 1-15)
*/
static asmlinkage void unsupported_syscall(struct pt_regs *regs,
enum exception_code code)
{
struct task_struct *tsk = current;
siginfo_t info;
/* catch a kernel BUG() */
if (code == EXCEP_SYSCALL15 && !user_mode(regs)) {
if (report_bug(regs->pc, regs) == BUG_TRAP_TYPE_BUG) {
#ifdef CONFIG_GDBSTUB
gdbstub_intercept(regs, code);
#endif
}
}
regs->pc -= 2; /* syscall return addr is _after_ the instruction */
die_if_no_fixup("An unsupported syscall insn was used by the kernel\n",
regs, code);
info.si_signo = SIGILL;
info.si_errno = ENOSYS;
info.si_code = ILL_ILLTRP;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGILL, &info, tsk);
}
/*
* display the register file when the stack pointer gets clobbered
*/
@ -481,10 +513,8 @@ asmlinkage void uninitialised_exception(struct pt_regs *regs,
{
/* see if gdbstub wants to deal with it */
#ifdef CONFIG_GDBSTUB
if (gdbstub_intercept(regs, code))
if (debugger_intercept(code, SIGSYS, 0, regs) == 0)
return;
#endif
peripheral_leds_display_exception(code);
printk(KERN_EMERG "Uninitialised Exception 0x%04x\n", code & 0xFFFF);
@ -549,43 +579,43 @@ void __init set_intr_stub(enum exception_code code, void *handler)
*/
void __init trap_init(void)
{
set_excp_vector(EXCEP_TRAP, trap);
set_excp_vector(EXCEP_ISTEP, istep);
set_excp_vector(EXCEP_IBREAK, ibreak);
set_excp_vector(EXCEP_OBREAK, obreak);
set_excp_vector(EXCEP_TRAP, handle_exception);
set_excp_vector(EXCEP_ISTEP, handle_exception);
set_excp_vector(EXCEP_IBREAK, handle_exception);
set_excp_vector(EXCEP_OBREAK, handle_exception);
set_excp_vector(EXCEP_PRIVINS, priv_op);
set_excp_vector(EXCEP_UNIMPINS, invalid_op);
set_excp_vector(EXCEP_UNIMPEXINS, invalid_exop);
set_excp_vector(EXCEP_MEMERR, mem_error);
set_excp_vector(EXCEP_PRIVINS, handle_exception);
set_excp_vector(EXCEP_UNIMPINS, handle_exception);
set_excp_vector(EXCEP_UNIMPEXINS, handle_exception);
set_excp_vector(EXCEP_MEMERR, handle_exception);
set_excp_vector(EXCEP_MISALIGN, misalignment);
set_excp_vector(EXCEP_BUSERROR, bus_error);
set_excp_vector(EXCEP_ILLINSACC, insn_acc_error);
set_excp_vector(EXCEP_ILLDATACC, data_acc_error);
set_excp_vector(EXCEP_IOINSACC, insn_acc_error);
set_excp_vector(EXCEP_PRIVINSACC, insn_acc_error);
set_excp_vector(EXCEP_PRIVDATACC, data_acc_error);
set_excp_vector(EXCEP_DATINSACC, insn_acc_error);
set_excp_vector(EXCEP_FPU_UNIMPINS, fpu_invalid_op);
set_excp_vector(EXCEP_BUSERROR, handle_exception);
set_excp_vector(EXCEP_ILLINSACC, handle_exception);
set_excp_vector(EXCEP_ILLDATACC, handle_exception);
set_excp_vector(EXCEP_IOINSACC, handle_exception);
set_excp_vector(EXCEP_PRIVINSACC, handle_exception);
set_excp_vector(EXCEP_PRIVDATACC, handle_exception);
set_excp_vector(EXCEP_DATINSACC, handle_exception);
set_excp_vector(EXCEP_FPU_UNIMPINS, handle_exception);
set_excp_vector(EXCEP_FPU_OPERATION, fpu_exception);
set_excp_vector(EXCEP_NMI, nmi);
set_excp_vector(EXCEP_SYSCALL1, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL2, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL3, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL4, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL5, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL6, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL7, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL8, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL9, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL10, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL11, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL12, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL13, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL14, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL15, unsupported_syscall);
set_excp_vector(EXCEP_SYSCALL1, handle_exception);
set_excp_vector(EXCEP_SYSCALL2, handle_exception);
set_excp_vector(EXCEP_SYSCALL3, handle_exception);
set_excp_vector(EXCEP_SYSCALL4, handle_exception);
set_excp_vector(EXCEP_SYSCALL5, handle_exception);
set_excp_vector(EXCEP_SYSCALL6, handle_exception);
set_excp_vector(EXCEP_SYSCALL7, handle_exception);
set_excp_vector(EXCEP_SYSCALL8, handle_exception);
set_excp_vector(EXCEP_SYSCALL9, handle_exception);
set_excp_vector(EXCEP_SYSCALL10, handle_exception);
set_excp_vector(EXCEP_SYSCALL11, handle_exception);
set_excp_vector(EXCEP_SYSCALL12, handle_exception);
set_excp_vector(EXCEP_SYSCALL13, handle_exception);
set_excp_vector(EXCEP_SYSCALL14, handle_exception);
set_excp_vector(EXCEP_SYSCALL15, handle_exception);
}
/*

View File

@ -99,3 +99,49 @@ config MN10300_CACHE_INV_ICACHE
help
Set if we need the icache to be invalidated, even if the dcache is in
write-through mode and doesn't need flushing.
#
# The kernel debugger gets its own separate cache flushing functions
#
config MN10300_DEBUGGER_CACHE_FLUSH_BY_TAG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WBACK && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_TAG
help
Set if the debugger needs to flush the dcache and invalidate the
icache using the cache tag registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_FLUSH_BY_REG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WBACK && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_REG
help
Set if the debugger needs to flush the dcache and invalidate the
icache using automatic purge registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_INV_BY_TAG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WTHRU && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_TAG
help
Set if the debugger needs to invalidate the icache using the cache
tag registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_INV_BY_REG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WTHRU && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_REG
help
Set if the debugger needs to invalidate the icache using automatic
purge registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_NO_FLUSH
def_bool y if KERNEL_DEBUGGER && \
(MN10300_CACHE_DISABLED || MN10300_CACHE_SNOOP)
help
Set if the debugger does not need to flush the dcache and/or
invalidate the icache to make breakpoints work.

View File

@ -13,6 +13,15 @@ cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_FLUSH_BY_TAG) += \
cache-dbg-flush-by-tag.o cache-dbg-inv-by-tag.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_FLUSH_BY_REG) += \
cache-dbg-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_TAG) += \
cache-dbg-inv-by-tag.o cache-dbg-inv.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_REG) += \
cache-dbg-inv-by-reg.o cache-dbg-inv.o
cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
obj-y := \

View File

@ -0,0 +1,160 @@
/* MN10300 CPU cache invalidation routines, using automatic purge registers
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
###############################################################################
#
# void debugger_local_cache_flushinv(void)
# Flush the entire data cache back to RAM and invalidate the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv
.type debugger_local_cache_flushinv,@function
debugger_local_cache_flushinv:
#
# firstly flush the dcache
#
movhu (CHCTR),d0
btst CHCTR_DCEN|CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
mov DCPGCR,a0
mov epsw,d1
and ~EPSW_IE,epsw
or EPSW_NMID,epsw
nop
btst CHCTR_DCEN,d0
beq debugger_local_cache_flushinv_no_dcache
# wait for busy bit of area purge
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
# set mask
clr d0
mov d0,(DCPGMR)
# area purge
#
# DCPGCR = DCPGCR_DCP
#
mov DCPGCR_DCP,d0
mov d0,(a0)
# wait for busy bit of area purge
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
debugger_local_cache_flushinv_no_dcache:
#
# secondly, invalidate the icache if it is enabled
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_done
invalidate_icache 0
debugger_local_cache_flushinv_done:
mov d1,epsw
debugger_local_cache_flushinv_end:
ret [],0
.size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one:
movhu (CHCTR),d1
btst CHCTR_DCEN|CHCTR_ICEN,d1
beq debugger_local_cache_flushinv_one_end
btst CHCTR_DCEN,d1
beq debugger_local_cache_flushinv_one_no_dcache
# round cacheline addr down
and L1_CACHE_TAG_MASK,d0
mov d0,a1
mov d0,d1
# determine the dcache purge control reg address
mov DCACHE_PURGE(0,0),a0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0
# retain valid entries in the cache
or L1_CACHE_TAG_VALID,d1
# conditionally purge this line in all ways
mov d1,(L1_CACHE_WAYDISP*0,a0)
debugger_local_cache_flushinv_no_dcache:
#
# now try to flush the icache
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_range_reg_end
LOCAL_CLI_SAVE(d1)
mov ICIVCR,a0
# wait for the invalidator to quiesce
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
# set the mask
mov L1_CACHE_TAG_MASK,d0
mov d0,(ICIVMR)
# invalidate the cache line at the given address
or ICIVCR_ICI,a1
mov a1,(a0)
# wait for the invalidator to quiesce again
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
LOCAL_IRQ_RESTORE(d1)
debugger_local_cache_flushinv_one_end:
ret [],0
.size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one

View File

@ -0,0 +1,114 @@
/* MN10300 CPU cache invalidation routines, using direct tag flushing
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
###############################################################################
#
# void debugger_local_cache_flushinv(void)
#
# Flush the entire data cache back to RAM and invalidate the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv
.type debugger_local_cache_flushinv,@function
debugger_local_cache_flushinv:
#
# firstly flush the dcache
#
movhu (CHCTR),d0
btst CHCTR_DCEN|CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
btst CHCTR_DCEN,d0
beq debugger_local_cache_flushinv_no_dcache
# read the addresses tagged in the cache's tag RAM and attempt to flush
# those addresses specifically
# - we rely on the hardware to filter out invalid tag entry addresses
mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
mov DCACHE_PURGE(0,0),a1 # dcache purge request address
mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,e0 # total number of entries
mn10300_local_dcache_flush_loop:
mov (a0),d0
and L1_CACHE_TAG_MASK,d0
or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
# cache
mov d0,(a1) # conditional purge
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
add -1,e0
bne mn10300_local_dcache_flush_loop
debugger_local_cache_flushinv_no_dcache:
#
# secondly, invalidate the icache if it is enabled
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
invalidate_icache 1
debugger_local_cache_flushinv_end:
ret [],0
.size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one:
movhu (CHCTR),d1
btst CHCTR_DCEN|CHCTR_ICEN,d1
beq debugger_local_cache_flushinv_one_end
btst CHCTR_DCEN,d1
beq debugger_local_cache_flushinv_one_icache
# round cacheline addr down
and L1_CACHE_TAG_MASK,d0
mov d0,a1
# determine the dcache purge control reg address
mov DCACHE_PURGE(0,0),a0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0
# retain valid entries in the cache
or L1_CACHE_TAG_VALID,a1
# conditionally purge this line in all ways
mov a1,(L1_CACHE_WAYDISP*0,a0)
# now go and do the icache
bra debugger_local_cache_flushinv_one_icache
debugger_local_cache_flushinv_one_end:
ret [],0
.size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one

View File

@ -0,0 +1,69 @@
/* MN10300 CPU cache invalidation routines, using automatic purge registers
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
.globl debugger_local_cache_flushinv_one
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one:
mov d0,a1
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_range_reg_end
LOCAL_CLI_SAVE(d1)
mov ICIVCR,a0
# wait for the invalidator to quiesce
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
# set the mask
mov ~L1_CACHE_TAG_MASK,d0
mov d0,(ICIVMR)
# invalidate the cache line at the given address
and ~L1_CACHE_TAG_MASK,a1
or ICIVCR_ICI,a1
mov a1,(a0)
# wait for the invalidator to quiesce again
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
LOCAL_IRQ_RESTORE(d1)
mn10300_local_icache_inv_range_reg_end:
ret [],0
.size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one

View File

@ -0,0 +1,120 @@
/* MN10300 CPU cache invalidation routines, using direct tag flushing
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
.globl debugger_local_cache_flushinv_one_icache
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one_icache
.type debugger_local_cache_flushinv_one_icache,@function
debugger_local_cache_flushinv_one_icache:
movm [d3,a2],(sp)
mov CHCTR,a2
movhu (a2),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_one_icache_end
mov d0,a1
and L1_CACHE_TAG_MASK,a1
# read the tags from the tag RAM, and if they indicate a matching valid
# cache line then we invalidate that line
mov ICACHE_TAG(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting icache tag RAM
# access address
and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
or L1_CACHE_TAG_VALID,a1
mov L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_VALID,d1
LOCAL_CLI_SAVE(d3)
# disable the icache
movhu (a2),d0
and ~CHCTR_ICEN,d0
movhu d0,(a2)
# and wait for it to calm down
setlb
movhu (a2),d0
btst CHCTR_ICBUSY,d0
lne
# check all the way tags for this cache entry
mov (a0),d0 # read the tag in the way 0 slot
xor a1,d0
and d1,d0
beq debugger_local_icache_kill # jump if matched
add L1_CACHE_WAYDISP,a0
mov (a0),d0 # read the tag in the way 1 slot
xor a1,d0
and d1,d0
beq debugger_local_icache_kill # jump if matched
add L1_CACHE_WAYDISP,a0
mov (a0),d0 # read the tag in the way 2 slot
xor a1,d0
and d1,d0
beq debugger_local_icache_kill # jump if matched
add L1_CACHE_WAYDISP,a0
mov (a0),d0 # read the tag in the way 3 slot
xor a1,d0
and d1,d0
bne debugger_local_icache_finish # jump if not matched
debugger_local_icache_kill:
mov d0,(a0) # kill the tag (D0 is 0 at this point)
debugger_local_icache_finish:
# wait for the cache to finish what it's doing
setlb
movhu (a2),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
or CHCTR_ICEN,d0
movhu d0,(a2)
movhu (a2),d0
# re-enable interrupts
LOCAL_IRQ_RESTORE(d3)
debugger_local_cache_flushinv_one_icache_end:
ret [d3,a2],8
.size debugger_local_cache_flushinv_one_icache,.-debugger_local_cache_flushinv_one_icache
#ifdef CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_TAG
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one = debugger_local_cache_flushinv_one_icache
#endif

View File

@ -0,0 +1,47 @@
/* MN10300 CPU cache invalidation routines
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
.globl debugger_local_cache_flushinv
###############################################################################
#
# void debugger_local_cache_flushinv(void)
#
# Invalidate the entire icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv
.type debugger_local_cache_flushinv,@function
debugger_local_cache_flushinv:
#
# we only need to invalidate the icache in this cache mode
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
invalidate_icache 1
debugger_local_cache_flushinv_end:
ret [],0
.size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv

View File

@ -62,7 +62,7 @@ mn10300_local_dcache_flush:
mn10300_local_dcache_flush_loop:
mov (a0),d0
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
and L1_CACHE_TAG_MASK,d0
or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
# cache
mov d0,(a1) # conditional purge
@ -112,11 +112,11 @@ mn10300_local_dcache_flush_range:
1:
# round start addr down
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
and L1_CACHE_TAG_MASK,d0
mov d0,a1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
and L1_CACHE_TAG_MASK,d1
# write a request to flush all instances of an address from the cache
mov DCACHE_PURGE(0,0),a0
@ -215,12 +215,11 @@ mn10300_local_dcache_flush_inv_range:
bra mn10300_local_dcache_flush_inv
1:
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
# addr down
and L1_CACHE_TAG_MASK,d0 # round start addr down
mov d0,a1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_MASK,d1
# write a request to flush and invalidate all instances of an address
# from the cache

View File

@ -15,6 +15,7 @@
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
#define mn10300_local_dcache_inv_range_intr_interval \
+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
@ -62,10 +63,7 @@ mn10300_local_icache_inv:
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_end
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
movhu (a0),d0
invalidate_icache 1
mn10300_local_icache_inv_end:
ret [],0
@ -87,11 +85,8 @@ mn10300_local_dcache_inv:
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_inv_end
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
movhu (a0),d0
invalidate_dcache 1
mn10300_local_dcache_inv_end:
ret [],0
.size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
@ -121,9 +116,9 @@ mn10300_local_dcache_inv_range:
# and if they're not cacheline-aligned, we must flush any bits outside
# the range that share cachelines with stuff inside the range
#ifdef CONFIG_MN10300_CACHE_WBACK
btst ~(L1_CACHE_BYTES-1),d0
btst ~L1_CACHE_TAG_MASK,d0
bne 1f
btst ~(L1_CACHE_BYTES-1),d1
btst ~L1_CACHE_TAG_MASK,d1
beq 2f
1:
bra mn10300_local_dcache_flush_inv_range
@ -141,12 +136,11 @@ mn10300_local_dcache_inv_range:
# writeback mode, in which case we would be in flush and invalidate by
# now
#ifndef CONFIG_MN10300_CACHE_WBACK
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
# addr down
and L1_CACHE_TAG_MASK,d0 # round start addr down
mov L1_CACHE_BYTES-1,d2
add d2,d1
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 # round end addr up
and L1_CACHE_TAG_MASK,d1 # round end addr up
#endif /* !CONFIG_MN10300_CACHE_WBACK */
sub d0,d1,d2 # calculate the total size

View File

@ -15,6 +15,7 @@
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
#define mn10300_local_dcache_inv_range_intr_interval \
+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
@ -70,43 +71,7 @@ mn10300_local_icache_inv:
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_end
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
LOCAL_CLI_SAVE(d1)
# disable the icache
and ~CHCTR_ICEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
# wait for the cache to finish
mov CHCTR,a0
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
and ~CHCTR_ICINV,d0
or CHCTR_ICEN,d0
movhu d0,(a0)
movhu (a0),d0
LOCAL_IRQ_RESTORE(d1)
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
invalidate_icache 1
mn10300_local_icache_inv_end:
ret [],0
@ -128,43 +93,7 @@ mn10300_local_dcache_inv:
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_inv_end
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
LOCAL_CLI_SAVE(d1)
# disable the dcache
and ~CHCTR_DCEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
# wait for the cache to finish
mov CHCTR,a0
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# and reenable it
and ~CHCTR_DCINV,d0
or CHCTR_DCEN,d0
movhu d0,(a0)
movhu (a0),d0
LOCAL_IRQ_RESTORE(d1)
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
invalidate_dcache 1
mn10300_local_dcache_inv_end:
ret [],0
@ -195,9 +124,9 @@ mn10300_local_dcache_inv_range:
# and if they're not cacheline-aligned, we must flush any bits outside
# the range that share cachelines with stuff inside the range
#ifdef CONFIG_MN10300_CACHE_WBACK
btst ~(L1_CACHE_BYTES-1),d0
btst ~L1_CACHE_TAG_MASK,d0
bne 1f
btst ~(L1_CACHE_BYTES-1),d1
btst ~L1_CACHE_TAG_MASK,d1
beq 2f
1:
bra mn10300_local_dcache_flush_inv_range
@ -212,11 +141,10 @@ mn10300_local_dcache_inv_range:
beq mn10300_local_dcache_inv_range_end
#ifndef CONFIG_MN10300_CACHE_WBACK
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
# addr down
and L1_CACHE_TAG_MASK,d0 # round start addr down
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
and L1_CACHE_TAG_MASK,d1
#endif /* !CONFIG_MN10300_CACHE_WBACK */
mov d0,a1

133
arch/mn10300/mm/cache.inc Normal file
View File

@ -0,0 +1,133 @@
/* MN10300 CPU core caching macros -*- asm -*-
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
###############################################################################
#
# Invalidate the instruction cache.
# A0: Should hold CHCTR
# D0: Should have been read from CHCTR
# D1: Will be clobbered
#
# On some cores it is necessary to disable the icache whilst we do this.
#
###############################################################################
.macro invalidate_icache,disable_irq
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
.if \disable_irq
# don't want an interrupt routine seeing a disabled cache
mov epsw,d1
and ~EPSW_IE,epsw
or EPSW_NMID,epsw
nop
nop
.endif
# disable the icache
and ~CHCTR_ICEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
# wait for the cache to finish
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
or CHCTR_ICEN,d0
movhu d0,(a0)
movhu (a0),d0
.if \disable_irq
LOCAL_IRQ_RESTORE(d1)
.endif
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
.endm
###############################################################################
#
# Invalidate the data cache.
# A0: Should hold CHCTR
# D0: Should have been read from CHCTR
# D1: Will be clobbered
#
# On some cores it is necessary to disable the dcache whilst we do this.
#
###############################################################################
.macro invalidate_dcache,disable_irq
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
.if \disable_irq
# don't want an interrupt routine seeing a disabled cache
mov epsw,d1
and ~EPSW_IE,epsw
or EPSW_NMID,epsw
nop
nop
.endif
# disable the dcache
and ~CHCTR_DCEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
# wait for the cache to finish
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# and reenable it
or CHCTR_DCEN,d0
movhu d0,(a0)
movhu (a0),d0
.if \disable_irq
LOCAL_IRQ_RESTORE(d1)
.endif
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
.endm

View File

@ -28,8 +28,9 @@
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/hardirq.h>
#include <asm/gdb-stub.h>
#include <asm/cpu-regs.h>
#include <asm/debugger.h>
#include <asm/gdb-stub.h>
/*
* Unlock any spinlocks which will prevent us from getting the
@ -306,10 +307,8 @@ no_context:
printk(" printing pc:\n");
printk(KERN_ALERT "%08lx\n", regs->pc);
#ifdef CONFIG_GDBSTUB
gdbstub_intercept(
regs, fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR);
#endif
debugger_intercept(fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR,
SIGSEGV, SEGV_ACCERR, regs);
page = PTBR;
page = ((unsigned long *) __va(page))[address >> 22];

View File

@ -23,6 +23,7 @@
#define L1_CACHE_TAG_DIRTY 0x00000008 /* data cache tag dirty bit */
#define L1_CACHE_TAG_ENTRY 0x00000ff0 /* cache tag entry address mask */
#define L1_CACHE_TAG_ADDRESS 0xfffff000 /* cache tag line address mask */
#define L1_CACHE_TAG_MASK +(L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY)
/*
* specification of the interval between interrupt checking intervals whilst

View File

@ -29,6 +29,7 @@
#define L1_CACHE_TAG_DIRTY 0x00000008 /* data cache tag dirty bit */
#define L1_CACHE_TAG_ENTRY 0x00000fe0 /* cache tag entry address mask */
#define L1_CACHE_TAG_ADDRESS 0xfffff000 /* cache tag line address mask */
#define L1_CACHE_TAG_MASK +(L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY)
/*
* specification of the interval between interrupt checking intervals whilst

View File

@ -297,6 +297,7 @@ extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
struct pt_regs *regs);
extern int kgdb_nmicallback(int cpu, void *regs);
extern void gdbstub_exit(int status);
extern int kgdb_single_step;
extern atomic_t kgdb_active;

View File

@ -1093,3 +1093,33 @@ int gdbstub_state(struct kgdb_state *ks, char *cmd)
put_packet(remcom_out_buffer);
return 0;
}
/**
* gdbstub_exit - Send an exit message to GDB
* @status: The exit code to report.
*/
void gdbstub_exit(int status)
{
unsigned char checksum, ch, buffer[3];
int loop;
buffer[0] = 'W';
buffer[1] = hex_asc_hi(status);
buffer[2] = hex_asc_lo(status);
dbg_io_ops->write_char('$');
checksum = 0;
for (loop = 0; loop < 3; loop++) {
ch = buffer[loop];
checksum += ch;
dbg_io_ops->write_char(ch);
}
dbg_io_ops->write_char('#');
dbg_io_ops->write_char(hex_asc_hi(checksum));
dbg_io_ops->write_char(hex_asc_lo(checksum));
/* make sure the output is flushed, lest the bootloader clobber it */
dbg_io_ops->flush();
}