mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 18:13:04 +00:00
x86/ftrace: Move the ftrace specific code out of entry_32.S
The function tracing hook code for ftrace is not an entry point from userspace and does not belong in the entry_*.S files. It has already been moved out of entry_64.S. Move it out of entry_32.S into its own ftrace_32.S file. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Reviewed-by: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20170323143445.645218946@goodmis.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
db65d7b6dc
commit
3d82c59c6e
@ -35,16 +35,13 @@
|
||||
#include <asm/errno.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/irq_vectors.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/smap.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/frame.h>
|
||||
|
||||
.section .entry.text, "ax"
|
||||
@ -886,172 +883,6 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
|
||||
|
||||
#endif /* CONFIG_HYPERV */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
ENTRY(mcount)
|
||||
ret
|
||||
END(mcount)
|
||||
|
||||
ENTRY(ftrace_caller)
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl $0 /* Pass NULL as regs pointer */
|
||||
movl 4*4(%esp), %eax
|
||||
movl 0x4(%ebp), %edx
|
||||
movl function_trace_op, %ecx
|
||||
subl $MCOUNT_INSN_SIZE, %eax
|
||||
|
||||
.globl ftrace_call
|
||||
ftrace_call:
|
||||
call ftrace_stub
|
||||
|
||||
addl $4, %esp /* skip NULL pointer */
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
.Lftrace_ret:
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.globl ftrace_graph_call
|
||||
ftrace_graph_call:
|
||||
jmp ftrace_stub
|
||||
#endif
|
||||
|
||||
/* This is weak to keep gas from relaxing the jumps */
|
||||
WEAK(ftrace_stub)
|
||||
ret
|
||||
END(ftrace_caller)
|
||||
|
||||
ENTRY(ftrace_regs_caller)
|
||||
pushf /* push flags before compare (in cs location) */
|
||||
|
||||
/*
|
||||
* i386 does not save SS and ESP when coming from kernel.
|
||||
* Instead, to get sp, ®s->sp is used (see ptrace.h).
|
||||
* Unfortunately, that means eflags must be at the same location
|
||||
* as the current return ip is. We move the return ip into the
|
||||
* ip location, and move flags into the return ip location.
|
||||
*/
|
||||
pushl 4(%esp) /* save return ip into ip slot */
|
||||
|
||||
pushl $0 /* Load 0 into orig_ax */
|
||||
pushl %gs
|
||||
pushl %fs
|
||||
pushl %es
|
||||
pushl %ds
|
||||
pushl %eax
|
||||
pushl %ebp
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
pushl %edx
|
||||
pushl %ecx
|
||||
pushl %ebx
|
||||
|
||||
movl 13*4(%esp), %eax /* Get the saved flags */
|
||||
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
|
||||
/* clobbering return ip */
|
||||
movl $__KERNEL_CS, 13*4(%esp)
|
||||
|
||||
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
|
||||
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
|
||||
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
|
||||
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
|
||||
pushl %esp /* Save pt_regs as 4th parameter */
|
||||
|
||||
GLOBAL(ftrace_regs_call)
|
||||
call ftrace_stub
|
||||
|
||||
addl $4, %esp /* Skip pt_regs */
|
||||
movl 14*4(%esp), %eax /* Move flags back into cs */
|
||||
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
|
||||
movl 12*4(%esp), %eax /* Get return ip from regs->ip */
|
||||
movl %eax, 14*4(%esp) /* Put return ip back for ret */
|
||||
|
||||
popl %ebx
|
||||
popl %ecx
|
||||
popl %edx
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ebp
|
||||
popl %eax
|
||||
popl %ds
|
||||
popl %es
|
||||
popl %fs
|
||||
popl %gs
|
||||
addl $8, %esp /* Skip orig_ax and ip */
|
||||
popf /* Pop flags at end (no addl to corrupt flags) */
|
||||
jmp .Lftrace_ret
|
||||
|
||||
popf
|
||||
jmp ftrace_stub
|
||||
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
ENTRY(mcount)
|
||||
cmpl $__PAGE_OFFSET, %esp
|
||||
jb ftrace_stub /* Paging not enabled yet? */
|
||||
|
||||
cmpl $ftrace_stub, ftrace_trace_function
|
||||
jnz .Ltrace
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
cmpl $ftrace_stub, ftrace_graph_return
|
||||
jnz ftrace_graph_caller
|
||||
|
||||
cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
|
||||
jnz ftrace_graph_caller
|
||||
#endif
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
ret
|
||||
|
||||
/* taken from glibc */
|
||||
.Ltrace:
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
movl 0xc(%esp), %eax
|
||||
movl 0x4(%ebp), %edx
|
||||
subl $MCOUNT_INSN_SIZE, %eax
|
||||
|
||||
call *ftrace_trace_function
|
||||
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
jmp ftrace_stub
|
||||
END(mcount)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
EXPORT_SYMBOL(mcount)
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller)
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
movl 0xc(%esp), %eax
|
||||
lea 0x4(%ebp), %edx
|
||||
movl (%ebp), %ecx
|
||||
subl $MCOUNT_INSN_SIZE, %eax
|
||||
call prepare_ftrace_return
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
ret
|
||||
END(ftrace_graph_caller)
|
||||
|
||||
.globl return_to_handler
|
||||
return_to_handler:
|
||||
pushl %eax
|
||||
pushl %edx
|
||||
movl %ebp, %eax
|
||||
call ftrace_return_to_handler
|
||||
movl %eax, %ecx
|
||||
popl %edx
|
||||
popl %eax
|
||||
jmp *%ecx
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
ENTRY(trace_page_fault)
|
||||
ASM_CLAC
|
||||
|
@ -47,6 +47,7 @@ obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
|
||||
obj-$(CONFIG_IRQ_WORK) += irq_work.o
|
||||
obj-y += probe_roms.o
|
||||
obj-$(CONFIG_X86_64) += sys_x86_64.o ftrace_64.o
|
||||
obj-$(CONFIG_X86_32) += ftrace_32.o
|
||||
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
|
||||
obj-$(CONFIG_SYSFS) += ksysfs.o
|
||||
obj-y += bootflag.o e820.o
|
||||
|
175
arch/x86/kernel/ftrace_32.S
Normal file
175
arch/x86/kernel/ftrace_32.S
Normal file
@ -0,0 +1,175 @@
|
||||
/*
|
||||
* Copyright (C) 2017 Steven Rostedt, VMware Inc.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
ENTRY(mcount)
|
||||
ret
|
||||
END(mcount)
|
||||
|
||||
ENTRY(ftrace_caller)
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl $0 /* Pass NULL as regs pointer */
|
||||
movl 4*4(%esp), %eax
|
||||
movl 0x4(%ebp), %edx
|
||||
movl function_trace_op, %ecx
|
||||
subl $MCOUNT_INSN_SIZE, %eax
|
||||
|
||||
.globl ftrace_call
|
||||
ftrace_call:
|
||||
call ftrace_stub
|
||||
|
||||
addl $4, %esp /* skip NULL pointer */
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
.Lftrace_ret:
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.globl ftrace_graph_call
|
||||
ftrace_graph_call:
|
||||
jmp ftrace_stub
|
||||
#endif
|
||||
|
||||
/* This is weak to keep gas from relaxing the jumps */
|
||||
WEAK(ftrace_stub)
|
||||
ret
|
||||
END(ftrace_caller)
|
||||
|
||||
ENTRY(ftrace_regs_caller)
|
||||
pushf /* push flags before compare (in cs location) */
|
||||
|
||||
/*
|
||||
* i386 does not save SS and ESP when coming from kernel.
|
||||
* Instead, to get sp, ®s->sp is used (see ptrace.h).
|
||||
* Unfortunately, that means eflags must be at the same location
|
||||
* as the current return ip is. We move the return ip into the
|
||||
* ip location, and move flags into the return ip location.
|
||||
*/
|
||||
pushl 4(%esp) /* save return ip into ip slot */
|
||||
|
||||
pushl $0 /* Load 0 into orig_ax */
|
||||
pushl %gs
|
||||
pushl %fs
|
||||
pushl %es
|
||||
pushl %ds
|
||||
pushl %eax
|
||||
pushl %ebp
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
pushl %edx
|
||||
pushl %ecx
|
||||
pushl %ebx
|
||||
|
||||
movl 13*4(%esp), %eax /* Get the saved flags */
|
||||
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
|
||||
/* clobbering return ip */
|
||||
movl $__KERNEL_CS, 13*4(%esp)
|
||||
|
||||
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
|
||||
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
|
||||
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
|
||||
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
|
||||
pushl %esp /* Save pt_regs as 4th parameter */
|
||||
|
||||
GLOBAL(ftrace_regs_call)
|
||||
call ftrace_stub
|
||||
|
||||
addl $4, %esp /* Skip pt_regs */
|
||||
movl 14*4(%esp), %eax /* Move flags back into cs */
|
||||
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
|
||||
movl 12*4(%esp), %eax /* Get return ip from regs->ip */
|
||||
movl %eax, 14*4(%esp) /* Put return ip back for ret */
|
||||
|
||||
popl %ebx
|
||||
popl %ecx
|
||||
popl %edx
|
||||
popl %esi
|
||||
popl %edi
|
||||
popl %ebp
|
||||
popl %eax
|
||||
popl %ds
|
||||
popl %es
|
||||
popl %fs
|
||||
popl %gs
|
||||
addl $8, %esp /* Skip orig_ax and ip */
|
||||
popf /* Pop flags at end (no addl to corrupt flags) */
|
||||
jmp .Lftrace_ret
|
||||
|
||||
popf
|
||||
jmp ftrace_stub
|
||||
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
ENTRY(mcount)
|
||||
cmpl $__PAGE_OFFSET, %esp
|
||||
jb ftrace_stub /* Paging not enabled yet? */
|
||||
|
||||
cmpl $ftrace_stub, ftrace_trace_function
|
||||
jnz .Ltrace
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
cmpl $ftrace_stub, ftrace_graph_return
|
||||
jnz ftrace_graph_caller
|
||||
|
||||
cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
|
||||
jnz ftrace_graph_caller
|
||||
#endif
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
ret
|
||||
|
||||
/* taken from glibc */
|
||||
.Ltrace:
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
movl 0xc(%esp), %eax
|
||||
movl 0x4(%ebp), %edx
|
||||
subl $MCOUNT_INSN_SIZE, %eax
|
||||
|
||||
call *ftrace_trace_function
|
||||
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
jmp ftrace_stub
|
||||
END(mcount)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
EXPORT_SYMBOL(mcount)
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller)
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
movl 0xc(%esp), %eax
|
||||
lea 0x4(%ebp), %edx
|
||||
movl (%ebp), %ecx
|
||||
subl $MCOUNT_INSN_SIZE, %eax
|
||||
call prepare_ftrace_return
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %eax
|
||||
ret
|
||||
END(ftrace_graph_caller)
|
||||
|
||||
.globl return_to_handler
|
||||
return_to_handler:
|
||||
pushl %eax
|
||||
pushl %edx
|
||||
movl %ebp, %eax
|
||||
call ftrace_return_to_handler
|
||||
movl %eax, %ecx
|
||||
popl %edx
|
||||
popl %eax
|
||||
jmp *%ecx
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user