forked from Minki/linux
4e14dfc722
Copy the stacktrace ops code from x86 and provide a central function for use by functions that need to dump a callstack. Signed-off-by: Matt Fleming <matt@console-pimps.org> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
131 lines
3.1 KiB
C
131 lines
3.1 KiB
C
/*
|
|
* SH specific backtracing code for oprofile
|
|
*
|
|
* Copyright 2007 STMicroelectronics Ltd.
|
|
*
|
|
* Author: Dave Peverley <dpeverley@mpc-data.co.uk>
|
|
*
|
|
* Based on ARM oprofile backtrace code by Richard Purdie and in turn, i386
|
|
* oprofile backtrace code by John Levon, David Smith
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
*/
|
|
#include <linux/oprofile.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/stacktrace.h>
|
|
|
|
static void backtrace_warning_symbol(void *data, char *msg,
|
|
unsigned long symbol)
|
|
{
|
|
/* Ignore warnings */
|
|
}
|
|
|
|
static void backtrace_warning(void *data, char *msg)
|
|
{
|
|
/* Ignore warnings */
|
|
}
|
|
|
|
static int backtrace_stack(void *data, char *name)
|
|
{
|
|
/* Yes, we want all stacks */
|
|
return 0;
|
|
}
|
|
|
|
static void backtrace_address(void *data, unsigned long addr, int reliable)
|
|
{
|
|
unsigned int *depth = data;
|
|
|
|
if ((*depth)--)
|
|
oprofile_add_trace(addr);
|
|
}
|
|
|
|
static struct stacktrace_ops backtrace_ops = {
|
|
.warning = backtrace_warning,
|
|
.warning_symbol = backtrace_warning_symbol,
|
|
.stack = backtrace_stack,
|
|
.address = backtrace_address,
|
|
};
|
|
|
|
/* Limit to stop backtracing too far. */
|
|
static int backtrace_limit = 20;
|
|
|
|
static unsigned long *
|
|
user_backtrace(unsigned long *stackaddr, struct pt_regs *regs)
|
|
{
|
|
unsigned long buf_stack;
|
|
|
|
/* Also check accessibility of address */
|
|
if (!access_ok(VERIFY_READ, stackaddr, sizeof(unsigned long)))
|
|
return NULL;
|
|
|
|
if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long)))
|
|
return NULL;
|
|
|
|
/* Quick paranoia check */
|
|
if (buf_stack & 3)
|
|
return NULL;
|
|
|
|
oprofile_add_trace(buf_stack);
|
|
|
|
stackaddr++;
|
|
|
|
return stackaddr;
|
|
}
|
|
|
|
/*
|
|
* | | /\ Higher addresses
|
|
* | |
|
|
* --------------- stack base (address of current_thread_info)
|
|
* | thread info |
|
|
* . .
|
|
* | stack |
|
|
* --------------- saved regs->regs[15] value if valid
|
|
* . .
|
|
* --------------- struct pt_regs stored on stack (struct pt_regs *)
|
|
* | |
|
|
* . .
|
|
* | |
|
|
* --------------- ???
|
|
* | |
|
|
* | | \/ Lower addresses
|
|
*
|
|
* Thus, &pt_regs <-> stack base restricts the valid(ish) fp values
|
|
*/
|
|
static int valid_kernel_stack(unsigned long *stackaddr, struct pt_regs *regs)
|
|
{
|
|
unsigned long stack = (unsigned long)regs;
|
|
unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
|
|
|
|
return ((unsigned long)stackaddr > stack) && ((unsigned long)stackaddr < stack_base);
|
|
}
|
|
|
|
void sh_backtrace(struct pt_regs * const regs, unsigned int depth)
|
|
{
|
|
unsigned long *stackaddr;
|
|
|
|
/*
|
|
* Paranoia - clip max depth as we could get lost in the weeds.
|
|
*/
|
|
if (depth > backtrace_limit)
|
|
depth = backtrace_limit;
|
|
|
|
stackaddr = (unsigned long *)regs->regs[15];
|
|
if (!user_mode(regs)) {
|
|
if (depth)
|
|
dump_trace(NULL, regs, stackaddr,
|
|
&backtrace_ops, &depth);
|
|
return;
|
|
}
|
|
|
|
while (depth-- && (stackaddr != NULL))
|
|
stackaddr = user_backtrace(stackaddr, regs);
|
|
}
|