ARM: 8678/1: ftrace: Adds support for CONFIG_DYNAMIC_FTRACE_WITH_REGS

The DYNAMIC_FTRACE_WITH_REGS configuration makes it possible for a
ftrace operation to specify if registers need to saved/restored by
the ftrace handler. This is needed by kgraft and possibly other
ftrace-based tools, and the ARM architecture is currently lacking
this feature. It would also be the first step to support the
"Kprobes-on-ftrace" optimization on ARM.

This patch introduces a new ftrace handler that stores the registers
on the stack before calling the next stage. The registers are restored
from the stack before going back to the instrumented function.

A side-effect of this patch is to activate the support for
ftrace_modify_call() as it defines ARCH_SUPPORTS_FTRACE_OPS for the
ARM architecture.

Signed-off-by: Abel Vesa <abelvesa@linux.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
This commit is contained in:
Abel Vesa 2017-05-26 21:49:47 +01:00 committed by Russell King
parent 1515b186c2
commit 620176f335
4 changed files with 142 additions and 0 deletions

View File

@ -56,6 +56,7 @@ config ARM
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)

View File

@ -1,6 +1,10 @@
#ifndef _ASM_ARM_FTRACE #ifndef _ASM_ARM_FTRACE
#define _ASM_ARM_FTRACE #define _ASM_ARM_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc)) #define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */

View File

@ -92,12 +92,95 @@
2: mcount_exit 2: mcount_exit
.endm .endm
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
.macro __ftrace_regs_caller
sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
@ OLD_R0 will overwrite previous LR
add ip, sp, #12 @ move in IP the value of SP as it was
@ before the push {lr} of the mcount mechanism
str lr, [sp, #0] @ store LR instead of PC
ldr lr, [sp, #8] @ get previous LR
str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
stmdb sp!, {ip, lr}
stmdb sp!, {r0-r11, lr}
@ stack content at this point:
@ 0 4 48 52 56 60 64 68 72
@ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
mov r3, sp @ struct pt_regs*
ldr r2, =function_trace_op
ldr r2, [r2] @ pointer to the current
@ function tracing op
ldr r1, [sp, #S_LR] @ lr of instrumented func
ldr lr, [sp, #S_PC] @ get LR
mcount_adjust_addr r0, lr @ instrumented function
.globl ftrace_regs_call
ftrace_regs_call:
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_regs_call
ftrace_graph_regs_call:
mov r0, r0
#endif
@ pop saved regs
ldmia sp!, {r0-r12} @ restore r0 through r12
ldr ip, [sp, #8] @ restore PC
ldr lr, [sp, #4] @ restore LR
ldr sp, [sp, #0] @ restore SP
mov pc, ip @ return
.endm
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.macro __ftrace_graph_regs_caller
sub r0, fp, #4 @ lr of instrumented routine (parent)
@ called from __ftrace_regs_caller
ldr r1, [sp, #S_PC] @ instrumented routine (func)
mcount_adjust_addr r1, r1
mov r2, fp @ frame pointer
bl prepare_ftrace_return
@ pop registers saved in ftrace_regs_caller
ldmia sp!, {r0-r12} @ restore r0 through r12
ldr ip, [sp, #8] @ restore PC
ldr lr, [sp, #4] @ restore LR
ldr sp, [sp, #0] @ restore SP
mov pc, ip @ return
.endm
#endif
#endif
.macro __ftrace_caller suffix .macro __ftrace_caller suffix
mcount_enter mcount_enter
mcount_get_lr r1 @ lr of instrumented func mcount_get_lr r1 @ lr of instrumented func
mcount_adjust_addr r0, lr @ instrumented function mcount_adjust_addr r0, lr @ instrumented function
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ldr r2, =function_trace_op
ldr r2, [r2] @ pointer to the current
@ function tracing op
mov r3, #0 @ regs is NULL
#endif
.globl ftrace_call\suffix .globl ftrace_call\suffix
ftrace_call\suffix: ftrace_call\suffix:
bl ftrace_stub bl ftrace_stub
@ -212,6 +295,15 @@ UNWIND(.fnstart)
__ftrace_caller __ftrace_caller
UNWIND(.fnend) UNWIND(.fnend)
ENDPROC(ftrace_caller) ENDPROC(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_regs_caller)
UNWIND(.fnstart)
__ftrace_regs_caller
UNWIND(.fnend)
ENDPROC(ftrace_regs_caller)
#endif
#endif #endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@ -220,6 +312,14 @@ UNWIND(.fnstart)
__ftrace_graph_caller __ftrace_graph_caller
UNWIND(.fnend) UNWIND(.fnend)
ENDPROC(ftrace_graph_caller) ENDPROC(ftrace_graph_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_graph_regs_caller)
UNWIND(.fnstart)
__ftrace_graph_regs_caller
UNWIND(.fnend)
ENDPROC(ftrace_graph_regs_caller)
#endif
#endif #endif
.purgem mcount_enter .purgem mcount_enter

View File

@ -141,6 +141,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ret = ftrace_modify_code(pc, 0, new, false); ret = ftrace_modify_code(pc, 0, new, false);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!ret) {
pc = (unsigned long)&ftrace_regs_call;
new = ftrace_call_replace(pc, (unsigned long)func);
ret = ftrace_modify_code(pc, 0, new, false);
}
#endif
#ifdef CONFIG_OLD_MCOUNT #ifdef CONFIG_OLD_MCOUNT
if (!ret) { if (!ret) {
pc = (unsigned long)&ftrace_call_old; pc = (unsigned long)&ftrace_call_old;
@ -159,11 +168,29 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned long ip = rec->ip; unsigned long ip = rec->ip;
old = ftrace_nop_replace(rec); old = ftrace_nop_replace(rec);
new = ftrace_call_replace(ip, adjust_address(rec, addr)); new = ftrace_call_replace(ip, adjust_address(rec, addr));
return ftrace_modify_code(rec->ip, old, new, true); return ftrace_modify_code(rec->ip, old, new, true);
} }
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned long new, old;
unsigned long ip = rec->ip;
old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
new = ftrace_call_replace(ip, adjust_address(rec, addr));
return ftrace_modify_code(rec->ip, old, new, true);
}
#endif
int ftrace_make_nop(struct module *mod, int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr) struct dyn_ftrace *rec, unsigned long addr)
{ {
@ -231,6 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
extern unsigned long ftrace_graph_call; extern unsigned long ftrace_graph_call;
extern unsigned long ftrace_graph_call_old; extern unsigned long ftrace_graph_call_old;
extern void ftrace_graph_caller_old(void); extern void ftrace_graph_caller_old(void);
extern unsigned long ftrace_graph_regs_call;
extern void ftrace_graph_regs_caller(void);
static int __ftrace_modify_caller(unsigned long *callsite, static int __ftrace_modify_caller(unsigned long *callsite,
void (*func) (void), bool enable) void (*func) (void), bool enable)
@ -253,6 +282,14 @@ static int ftrace_modify_graph_caller(bool enable)
ftrace_graph_caller, ftrace_graph_caller,
enable); enable);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!ret)
ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
ftrace_graph_regs_caller,
enable);
#endif
#ifdef CONFIG_OLD_MCOUNT #ifdef CONFIG_OLD_MCOUNT
if (!ret) if (!ret)
ret = __ftrace_modify_caller(&ftrace_graph_call_old, ret = __ftrace_modify_caller(&ftrace_graph_call_old,