2008-10-23 05:26:29 +00:00
|
|
|
#ifndef _ASM_X86_FTRACE_H
|
|
|
|
#define _ASM_X86_FTRACE_H
|
2008-06-21 18:17:27 +00:00
|
|
|
|
2008-10-06 23:06:12 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
ftrace/x86: Add support for -mfentry to x86_64
If the kernel is compiled with gcc 4.6.0 which supports -mfentry,
then use that instead of mcount.
With mcount, frame pointers are forced with the -pg option and we
get something like:
<can_vma_merge_before>:
55 push %rbp
48 89 e5 mov %rsp,%rbp
53 push %rbx
41 51 push %r9
e8 fe 6a 39 00 callq ffffffff81483d00 <mcount>
31 c0 xor %eax,%eax
48 89 fb mov %rdi,%rbx
48 89 d7 mov %rdx,%rdi
48 33 73 30 xor 0x30(%rbx),%rsi
48 f7 c6 ff ff ff f7 test $0xfffffffff7ffffff,%rsi
With -mfentry, frame pointers are no longer forced and the call looks
like this:
<can_vma_merge_before>:
e8 33 af 37 00 callq ffffffff81461b40 <__fentry__>
53 push %rbx
48 89 fb mov %rdi,%rbx
31 c0 xor %eax,%eax
48 89 d7 mov %rdx,%rdi
41 51 push %r9
48 33 73 30 xor 0x30(%rbx),%rsi
48 f7 c6 ff ff ff f7 test $0xfffffffff7ffffff,%rsi
This adds the ftrace hook at the beginning of the function before a
frame is set up, and allows the function callbacks to be able to access
parameters. As kprobes now can use function tracing (at least on x86)
this speeds up the kprobe hooks that are at the beginning of the
function.
Link: http://lkml.kernel.org/r/20120807194100.130477900@goodmis.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-02-09 18:32:18 +00:00
|
|
|
#ifdef CC_USING_FENTRY
|
|
|
|
# define MCOUNT_ADDR ((long)(__fentry__))
|
|
|
|
#else
|
|
|
|
# define MCOUNT_ADDR ((long)(mcount))
|
|
|
|
#endif
|
2008-06-21 18:17:27 +00:00
|
|
|
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
|
|
|
|
|
2011-08-11 02:00:55 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
2011-08-08 20:57:47 +00:00
|
|
|
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
2012-04-30 20:20:23 +00:00
|
|
|
#endif
|
2011-08-08 20:57:47 +00:00
|
|
|
|
2008-06-21 18:17:27 +00:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
extern void mcount(void);
|
2012-05-30 17:26:37 +00:00
|
|
|
extern atomic_t modifying_ftrace_code;
|
ftrace/x86: Add support for -mfentry to x86_64
If the kernel is compiled with gcc 4.6.0 which supports -mfentry,
then use that instead of mcount.
With mcount, frame pointers are forced with the -pg option and we
get something like:
<can_vma_merge_before>:
55 push %rbp
48 89 e5 mov %rsp,%rbp
53 push %rbx
41 51 push %r9
e8 fe 6a 39 00 callq ffffffff81483d00 <mcount>
31 c0 xor %eax,%eax
48 89 fb mov %rdi,%rbx
48 89 d7 mov %rdx,%rdi
48 33 73 30 xor 0x30(%rbx),%rsi
48 f7 c6 ff ff ff f7 test $0xfffffffff7ffffff,%rsi
With -mfentry, frame pointers are no longer forced and the call looks
like this:
<can_vma_merge_before>:
e8 33 af 37 00 callq ffffffff81461b40 <__fentry__>
53 push %rbx
48 89 fb mov %rdi,%rbx
31 c0 xor %eax,%eax
48 89 d7 mov %rdx,%rdi
41 51 push %r9
48 33 73 30 xor 0x30(%rbx),%rsi
48 f7 c6 ff ff ff f7 test $0xfffffffff7ffffff,%rsi
This adds the ftrace hook at the beginning of the function before a
frame is set up, and allows the function callbacks to be able to access
parameters. As kprobes now can use function tracing (at least on x86)
this speeds up the kprobe hooks that are at the beginning of the
function.
Link: http://lkml.kernel.org/r/20120807194100.130477900@goodmis.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-02-09 18:32:18 +00:00
|
|
|
extern void __fentry__(void);
|
2008-08-14 19:45:08 +00:00
|
|
|
|
|
|
|
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
|
|
|
{
|
|
|
|
/*
|
2011-05-10 08:10:41 +00:00
|
|
|
* addr is the address of the mcount call instruction.
|
|
|
|
* recordmcount does the necessary offset calculation.
|
2008-08-14 19:45:08 +00:00
|
|
|
*/
|
2011-05-10 08:10:41 +00:00
|
|
|
return addr;
|
2008-08-14 19:45:08 +00:00
|
|
|
}
|
2008-11-15 00:21:19 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
|
|
|
|
struct dyn_arch_ftrace {
|
|
|
|
/* No extra data needed for x86 */
|
|
|
|
};
|
|
|
|
|
2011-08-16 13:57:10 +00:00
|
|
|
int ftrace_int3_handler(struct pt_regs *regs);
|
|
|
|
|
2014-07-12 03:23:53 +00:00
|
|
|
#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
|
|
|
|
|
2008-11-15 00:21:19 +00:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
2008-10-31 04:03:22 +00:00
|
|
|
#endif /* __ASSEMBLY__ */
|
2008-10-06 23:06:12 +00:00
|
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
2008-06-21 18:17:27 +00:00
|
|
|
|
tracing/syscalls: Allow archs to ignore tracing compat syscalls
The tracing of ia32 compat system calls has been a bit of a pain as they
use different system call numbers than the 64bit equivalents.
I wrote a simple 'lls' program that lists files. I compiled it as a i686
ELF binary and ran it under a x86_64 box. This is the result:
echo 0 > /debug/tracing/tracing_on
echo 1 > /debug/tracing/events/syscalls/enable
echo 1 > /debug/tracing/tracing_on ; ./lls ; echo 0 > /debug/tracing/tracing_on
grep lls /debug/tracing/trace
[.. skipping calls before TS_COMPAT is set ...]
lls-1127 [005] d... 936.409188: sys_recvfrom(fd: 0, ubuf: 4d560fc4, size: 0, flags: 8048034, addr: 8, addr_len: f7700420)
lls-1127 [005] d... 936.409190: sys_recvfrom -> 0x8a77000
lls-1127 [005] d... 936.409211: sys_lgetxattr(pathname: 0, name: 1000, value: 3, size: 22)
lls-1127 [005] d... 936.409215: sys_lgetxattr -> 0xf76ff000
lls-1127 [005] d... 936.409223: sys_dup2(oldfd: 4d55ae9b, newfd: 4)
lls-1127 [005] d... 936.409228: sys_dup2 -> 0xfffffffffffffffe
lls-1127 [005] d... 936.409236: sys_newfstat(fd: 4d55b085, statbuf: 80000)
lls-1127 [005] d... 936.409242: sys_newfstat -> 0x3
lls-1127 [005] d... 936.409243: sys_removexattr(pathname: 3, name: ffcd0060)
lls-1127 [005] d... 936.409244: sys_removexattr -> 0x0
lls-1127 [005] d... 936.409245: sys_lgetxattr(pathname: 0, name: 19614, value: 1, size: 2)
lls-1127 [005] d... 936.409248: sys_lgetxattr -> 0xf76e5000
lls-1127 [005] d... 936.409248: sys_newlstat(filename: 3, statbuf: 19614)
lls-1127 [005] d... 936.409249: sys_newlstat -> 0x0
lls-1127 [005] d... 936.409262: sys_newfstat(fd: f76fb588, statbuf: 80000)
lls-1127 [005] d... 936.409279: sys_newfstat -> 0x3
lls-1127 [005] d... 936.409279: sys_close(fd: 3)
lls-1127 [005] d... 936.421550: sys_close -> 0x200
lls-1127 [005] d... 936.421558: sys_removexattr(pathname: 3, name: ffcd00d0)
lls-1127 [005] d... 936.421560: sys_removexattr -> 0x0
lls-1127 [005] d... 936.421569: sys_lgetxattr(pathname: 4d564000, name: 1b1abc, value: 5, size: 802)
lls-1127 [005] d... 936.421574: sys_lgetxattr -> 0x4d564000
lls-1127 [005] d... 936.421575: sys_capget(header: 4d70f000, dataptr: 1000)
lls-1127 [005] d... 936.421580: sys_capget -> 0x0
lls-1127 [005] d... 936.421580: sys_lgetxattr(pathname: 4d710000, name: 3000, value: 3, size: 812)
lls-1127 [005] d... 936.421589: sys_lgetxattr -> 0x4d710000
lls-1127 [005] d... 936.426130: sys_lgetxattr(pathname: 4d713000, name: 2abc, value: 3, size: 32)
lls-1127 [005] d... 936.426141: sys_lgetxattr -> 0x4d713000
lls-1127 [005] d... 936.426145: sys_newlstat(filename: 3, statbuf: f76ff3f0)
lls-1127 [005] d... 936.426146: sys_newlstat -> 0x0
lls-1127 [005] d... 936.431748: sys_lgetxattr(pathname: 0, name: 1000, value: 3, size: 22)
Obviously I'm not calling newfstat with a fd of 4d55b085. The calls are
obviously incorrect, and confusing.
Other efforts have been made to fix this:
https://lkml.org/lkml/2012/3/26/367
But the real solution is to rewrite the syscall internals and come up
with a fixed solution. One that doesn't require all the kluge that the
current solution has.
Thus for now, instead of outputting incorrect data, simply ignore them.
With this patch the changes now have:
#> grep lls /debug/tracing/trace
#>
Compat system calls simply are not traced. If users need compat
syscalls, then they should just use the raw syscall tracepoints.
For an architecture to make their compat syscalls ignored, it must
define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS (done in asm/ftrace.h) and also
define an arch_trace_is_compat_syscall() function that will return true
if the current task should ignore tracing the syscall.
I want to stress that this change does not affect actual syscalls in any
way, shape or form. It is only used within the tracing system and
doesn't interfere with the syscall logic at all. The changes are
consolidated nicely into trace_syscalls.c and asm/ftrace.h.
I had to make one small modification to asm/thread_info.h and that was
to remove the include of asm/ftrace.h. As asm/ftrace.h required the
current_thread_info() it was causing include hell. That include was
added back in 2008 when the function graph tracer was added:
commit caf4b323 "tracing, x86: add low level support for ftrace return tracing"
It does not need to be included there.
Link: http://lkml.kernel.org/r/1360703939.21867.99.camel@gandalf.local.home
Acked-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-02-12 21:18:59 +00:00
|
|
|
|
|
|
|
#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
|
|
|
|
|
|
|
|
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
|
|
|
|
#include <asm/compat.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Because ia32 syscalls do not map to x86_64 syscall numbers
|
|
|
|
* this screws up the trace output when tracing a ia32 task.
|
|
|
|
* Instead of reporting bogus syscalls, just do not trace them.
|
|
|
|
*
|
|
|
|
* If the user realy wants these, then they should use the
|
|
|
|
* raw syscall tracepoints with filtering.
|
|
|
|
*/
|
|
|
|
#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
|
|
|
|
static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
if (is_compat_task())
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
|
|
|
|
#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
|
|
|
|
|
2008-10-23 05:26:29 +00:00
|
|
|
#endif /* _ASM_X86_FTRACE_H */
|