mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
d57c5d51a3
If the kernel is compiled with gcc 4.6.0 which supports -mfentry, then use that instead of mcount. With mcount, frame pointers are forced with the -pg option and we get something like: <can_vma_merge_before>: 55 push %rbp 48 89 e5 mov %rsp,%rbp 53 push %rbx 41 51 push %r9 e8 fe 6a 39 00 callq ffffffff81483d00 <mcount> 31 c0 xor %eax,%eax 48 89 fb mov %rdi,%rbx 48 89 d7 mov %rdx,%rdi 48 33 73 30 xor 0x30(%rbx),%rsi 48 f7 c6 ff ff ff f7 test $0xfffffffff7ffffff,%rsi With -mfentry, frame pointers are no longer forced and the call looks like this: <can_vma_merge_before>: e8 33 af 37 00 callq ffffffff81461b40 <__fentry__> 53 push %rbx 48 89 fb mov %rdi,%rbx 31 c0 xor %eax,%eax 48 89 d7 mov %rdx,%rdi 41 51 push %r9 48 33 73 30 xor 0x30(%rbx),%rsi 48 f7 c6 ff ff ff f7 test $0xfffffffff7ffffff,%rsi This adds the ftrace hook at the beginning of the function before a frame is set up, and allows the function callbacks to be able to access parameters. As kprobes now can use function tracing (at least on x86) this speeds up the kprobe hooks that are at the beginning of the function. Link: http://lkml.kernel.org/r/20120807194100.130477900@goodmis.org Acked-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Andi Kleen <andi@firstfloor.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
77 lines
1.6 KiB
C
77 lines
1.6 KiB
C
#ifndef _ASM_X86_FTRACE_H
|
|
#define _ASM_X86_FTRACE_H
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
/* skip is set if the stack was already partially adjusted */
|
|
.macro MCOUNT_SAVE_FRAME skip=0
|
|
/*
|
|
* We add enough stack to save all regs.
|
|
*/
|
|
subq $(SS+8-\skip), %rsp
|
|
movq %rax, RAX(%rsp)
|
|
movq %rcx, RCX(%rsp)
|
|
movq %rdx, RDX(%rsp)
|
|
movq %rsi, RSI(%rsp)
|
|
movq %rdi, RDI(%rsp)
|
|
movq %r8, R8(%rsp)
|
|
movq %r9, R9(%rsp)
|
|
/* Move RIP to its proper location */
|
|
movq SS+8(%rsp), %rdx
|
|
movq %rdx, RIP(%rsp)
|
|
.endm
|
|
|
|
.macro MCOUNT_RESTORE_FRAME skip=0
|
|
movq R9(%rsp), %r9
|
|
movq R8(%rsp), %r8
|
|
movq RDI(%rsp), %rdi
|
|
movq RSI(%rsp), %rsi
|
|
movq RDX(%rsp), %rdx
|
|
movq RCX(%rsp), %rcx
|
|
movq RAX(%rsp), %rax
|
|
addq $(SS+8-\skip), %rsp
|
|
.endm
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
#ifdef CC_USING_FENTRY
|
|
# define MCOUNT_ADDR ((long)(__fentry__))
|
|
#else
|
|
# define MCOUNT_ADDR ((long)(mcount))
|
|
#endif
|
|
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
|
#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
extern void mcount(void);
|
|
extern atomic_t modifying_ftrace_code;
|
|
extern void __fentry__(void);
|
|
|
|
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
|
{
|
|
/*
|
|
* addr is the address of the mcount call instruction.
|
|
* recordmcount does the necessary offset calculation.
|
|
*/
|
|
return addr;
|
|
}
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
struct dyn_arch_ftrace {
|
|
/* No extra data needed for x86 */
|
|
};
|
|
|
|
int ftrace_int3_handler(struct pt_regs *regs);
|
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
#endif /* __ASSEMBLY__ */
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
|
|
|
#endif /* _ASM_X86_FTRACE_H */
|