linux/arch/powerpc/include/asm/ptrace.h

206 lines
6.4 KiB
C
Raw Normal View History

/*
* Copyright (C) 2001 PPC64 Team, IBM Corp
*
* This struct defines the way the registers are stored on the
* kernel stack during a system call or other kernel entry.
*
* this should only contain volatile regs
* since we can keep non-volatile in the thread_struct
* should set this up when only volatiles are saved
* by intr code.
*
* Since this is going on the stack, *CARE MUST BE TAKEN* to insure
* that the overall structure is a multiple of 16 bytes in length.
*
* Note that the offsets of the fields in this struct correspond with
* the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H
#include <uapi/asm/ptrace.h>
#ifdef __powerpc64__
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
STACK_FRAME_OVERHEAD + 288)
#define STACK_FRAME_MARKER 12
/* Size of dummy stack frame allocated when calling signal handler. */
#define __SIGNAL_FRAMESIZE 128
#define __SIGNAL_FRAMESIZE32 64
#else /* __powerpc64__ */
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
#define STACK_FRAME_MARKER 2
/* Size of stack frame allocated when calling signal handler. */
#define __SIGNAL_FRAMESIZE 64
#endif /* __powerpc64__ */
#ifndef __ASSEMBLY__
#define GET_IP(regs) ((regs)->nip)
#define GET_USP(regs) ((regs)->gpr[1])
#define GET_FP(regs) (0)
#define SET_FP(regs, val)
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#define profile_pc profile_pc
#endif
#include <asm-generic/ptrace.h>
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
Audit: push audit success and retcode into arch ptrace.h The audit system previously expected arches calling to audit_syscall_exit to supply as arguments if the syscall was a success and what the return code was. Audit also provides a helper AUDITSC_RESULT which was supposed to simplify things by converting from negative retcodes to an audit internal magic value stating success or failure. This helper was wrong and could indicate that a valid pointer returned to userspace was a failed syscall. The fix is to fix the layering foolishness. We now pass audit_syscall_exit a struct pt_reg and it in turns calls back into arch code to collect the return value and to determine if the syscall was a success or failure. We also define a generic is_syscall_success() macro which determines success/failure based on if the value is < -MAX_ERRNO. This works for arches like x86 which do not use a separate mechanism to indicate syscall failure. We make both the is_syscall_success() and regs_return_value() static inlines instead of macros. The reason is because the audit function must take a void* for the regs. (uml calls theirs struct uml_pt_regs instead of just struct pt_regs so audit_syscall_exit can't take a struct pt_regs). Since the audit function takes a void* we need to use static inlines to cast it back to the arch correct structure to dereference it. The other major change is that on some arches, like ia64, MIPS and ppc, we change regs_return_value() to give us the negative value on syscall failure. THE only other user of this macro, kretprobe_example.c, won't notice and it makes the value signed consistently for the audit functions across all archs. In arch/sh/kernel/ptrace_64.c I see that we were using regs[9] in the old audit code as the return value. But the ptrace_64.h code defined the macro regs_return_value() as regs[3]. I have no idea which one is correct, but this patch now uses the regs_return_value() function, so it now uses regs[3]. For powerpc we previously used regs->result but now use the regs_return_value() function which uses regs->gprs[3]. regs->gprs[3] is always positive so the regs_return_value(), much like ia64 makes it negative before calling the audit code when appropriate. Signed-off-by: Eric Paris <eparis@redhat.com> Acked-by: H. Peter Anvin <hpa@zytor.com> [for x86 portion] Acked-by: Tony Luck <tony.luck@intel.com> [for ia64] Acked-by: Richard Weinberger <richard@nod.at> [for uml] Acked-by: David S. Miller <davem@davemloft.net> [for sparc] Acked-by: Ralf Baechle <ralf@linux-mips.org> [for mips] Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> [for ppc]
2012-01-03 19:23:06 +00:00
static inline int is_syscall_success(struct pt_regs *regs)
{
return !(regs->ccr & 0x10000000);
}
static inline long regs_return_value(struct pt_regs *regs)
{
if (is_syscall_success(regs))
return regs->gpr[3];
else
return -regs->gpr[3];
}
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
#endif
#define force_successful_syscall_return() \
do { \
[PATCH] syscall entry/exit revamp This cleanup patch speeds up the null syscall path on ppc64 by about 3%, and brings the ppc32 and ppc64 code slightly closer together. The ppc64 code was checking current_thread_info()->flags twice in the syscall exit path; once for TIF_SYSCALL_T_OR_A before disabling interrupts, and then again for TIF_SIGPENDING|TIF_NEED_RESCHED etc after disabling interrupts. Now we do the same as ppc32 -- check the flags only once in the fast path, and re-enable interrupts if necessary in the ptrace case. The patch abolishes the 'syscall_noerror' member of struct thread_info and replaces it with a TIF_NOERROR bit in the flags, which is handled in the slow path. This shortens the syscall entry code, which no longer needs to clear syscall_noerror. The patch adds a TIF_SAVE_NVGPRS flag which causes the syscall exit slow path to save the non-volatile GPRs into a signal frame. This removes the need for the assembly wrappers around sys_sigsuspend(), sys_rt_sigsuspend(), et al which existed solely to save those registers in advance. It also means I don't have to add new wrappers for ppoll() and pselect(), which is what I was supposed to be doing when I got distracted into this... Finally, it unifies the ppc64 and ppc32 methods of handling syscall exit directly into a signal handler (as required by sigsuspend et al) by introducing a TIF_RESTOREALL flag which causes _all_ the registers to be reloaded from the pt_regs by taking the ret_from_exception path, instead of the normal syscall exit path which stomps on the callee-saved GPRs. It appears to pass an LTP test run on ppc64, and passes basic testing on ppc32 too. Brief tests of ptrace functionality with strace and gdb also appear OK. I wouldn't send it to Linus for 2.6.15 just yet though :) Signed-off-by: David Woodhouse <dwmw2@infradead.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-15 18:52:18 +00:00
set_thread_flag(TIF_NOERROR); \
} while(0)
struct task_struct;
extern int ptrace_get_reg(struct task_struct *task, int regno,
unsigned long *data);
extern int ptrace_put_reg(struct task_struct *task, int regno,
unsigned long data);
#define current_pt_regs() \
((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
/*
* We use the least-significant bit of the trap field to indicate
* whether we have saved the full set of registers, or only a
* partial set. A 1 there means the partial set.
* On 4xx we use the next bit to indicate whether the exception
* is a critical exception (1 means it is).
*/
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
#ifndef __powerpc64__
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
#endif /* ! __powerpc64__ */
#define TRAP(regs) ((regs)->trap & ~0xF)
#ifdef __powerpc64__
#define NV_REG_POISON 0xdeadbeefdeadbeefUL
#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
#else
#define NV_REG_POISON 0xdeadbeef
#define CHECK_FULL_REGS(regs) \
do { \
if ((regs)->trap & 1) \
printk(KERN_CRIT "%s: partial register set\n", __func__); \
} while (0)
#endif /* __powerpc64__ */
#define arch_has_single_step() (1)
#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601))
#define ARCH_HAS_USER_SINGLE_STEP_INFO
/*
* kprobe-based event tracer support
*/
#include <linux/stddef.h>
#include <linux/thread_info.h>
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
/**
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten
* @offset: offset number of the register.
*
* regs_get_register returns the value of a register whose offset from @regs.
* The @offset is the offset of the register in struct pt_regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
return *(unsigned long *)((unsigned long)regs + offset);
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static inline bool regs_within_kernel_stack(struct pt_regs *regs,
unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
#endif /* __ASSEMBLY__ */
#ifndef __powerpc64__
#else /* __powerpc64__ */
#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
#define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
#define PT_VRSAVE_32 (PT_VR0 + 33*4)
#define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
#endif /* __powerpc64__ */
#endif /* _ASM_POWERPC_PTRACE_H */