linux/arch/x86/kernel/ftrace.c
Linus Torvalds 4f292c4de4 New Feature:
* Randomize the per-cpu entry areas
 Cleanups:
 * Have CR3_ADDR_MASK use PHYSICAL_PAGE_MASK instead of open
   coding it
 * Move to "native" set_memory_rox() helper
 * Clean up pmd_get_atomic() and i386-PAE
 * Remove some unused page table size macros
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEV76QKkVc4xCGURexaDWVMHDJkrAFAmOc53UACgkQaDWVMHDJ
 krCUHw//SGZ+La0hLZLAiAiZTXLZZHpYkOmg1Oj1+11qSU11uZzTFqDpauhaKpRS
 cJCSh+D+RXe5e2ipgt0+Zl0hESLt7pJf8258OE4ra0DL/IlyO9uqruAs9Kn3eRS/
 Fk76nG8gdEU+JKJqpG02GqOLslYQuIy96n9hpuj1x25b614+uezPfC7S4XEat0NT
 MbJQ+jnVDf16aJIJkzT+iSwhubDVeh+bSHeO0SSCzX23WLUqDeg5NvlyxoCHGbBh
 UpUTWggV/0pYAkBKRHToeJs8qTWREwuuH/8JGewpe9A0tjdB5wyZfNL2PuracweN
 9MauXC3T5f0+Ca4yIIaPq1fF7Ny/PR2dBFihk27rOD0N7tjaZxNwal2pB1sZcmvZ
 +PAokjyTPVH5ZXjkMYGGAUe1jyjwr2+TgFSZxhTnDuGtyVQiY4pihGKOifLCX6tv
 x6khvYeTBw7wfaDRtKEAf+2kLHYn+71HszHP/8bNKX9T03h+Zf0i1wdZu5xbM5Gc
 VK2wR7bCC+UftJJYG0pldcHg2qaF19RBHK2tLwp7zngUv7lTbkKfkgKjre73KV2a
 D4b76lrqdUMo6UYwYdw7WtDyarZS4OVLq2DcNhwwMddBCaX8kyN5a4AqwQlZYJ0u
 dM+kuMofE8U3yMxmMhJimkZUsj09yLHIqfynY0jbAcU3nhKZZNY=
 =wwVF
 -----END PGP SIGNATURE-----

Merge tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm updates from Dave Hansen:
 "New Feature:

   - Randomize the per-cpu entry areas

  Cleanups:

   - Have CR3_ADDR_MASK use PHYSICAL_PAGE_MASK instead of open coding it

   - Move to "native" set_memory_rox() helper

   - Clean up pmd_get_atomic() and i386-PAE

   - Remove some unused page table size macros"

* tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits)
  x86/mm: Ensure forced page table splitting
  x86/kasan: Populate shadow for shared chunk of the CPU entry area
  x86/kasan: Add helpers to align shadow addresses up and down
  x86/kasan: Rename local CPU_ENTRY_AREA variables to shorten names
  x86/mm: Populate KASAN shadow for entire per-CPU range of CPU entry area
  x86/mm: Recompute physical address for every page of per-CPU CEA mapping
  x86/mm: Rename __change_page_attr_set_clr(.checkalias)
  x86/mm: Inhibit _PAGE_NX changes from cpa_process_alias()
  x86/mm: Untangle __change_page_attr_set_clr(.checkalias)
  x86/mm: Add a few comments
  x86/mm: Fix CR3_ADDR_MASK
  x86/mm: Remove P*D_PAGE_MASK and P*D_PAGE_SIZE macros
  mm: Convert __HAVE_ARCH_P..P_GET to the new style
  mm: Remove pointless barrier() after pmdp_get_lockless()
  x86/mm/pae: Get rid of set_64bit()
  x86_64: Remove pointless set_64bit() usage
  x86/mm/pae: Be consistent with pXXp_get_and_clear()
  x86/mm/pae: Use WRITE_ONCE()
  x86/mm/pae: Don't (ab)use atomic64
  mm/gup: Fix the lockless PMD access
  ...
2022-12-17 14:06:53 -06:00

671 lines
18 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Dynamic function tracing support.
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* Thanks goes to Ingo Molnar, for suggesting the idea.
* Mathieu Desnoyers, for suggesting postponing the modifications.
* Arjan van de Ven, for keeping me straight, and explaining to me
* the dangers of modifying code on the run.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/memory.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <trace/syscall.h>
#include <asm/kprobes.h>
#include <asm/ftrace.h>
#include <asm/nops.h>
#include <asm/text-patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
static int ftrace_poke_late = 0;
void ftrace_arch_code_modify_prepare(void)
__acquires(&text_mutex)
{
/*
* Need to grab text_mutex to prevent a race from module loading
* and live kernel patching from changing the text permissions while
* ftrace has it set to "read/write".
*/
mutex_lock(&text_mutex);
ftrace_poke_late = 1;
}
void ftrace_arch_code_modify_post_process(void)
__releases(&text_mutex)
{
/*
* ftrace_make_{call,nop}() may be called during
* module load, and we need to finish the text_poke_queue()
* that they do, here.
*/
text_poke_finish();
ftrace_poke_late = 0;
mutex_unlock(&text_mutex);
}
static const char *ftrace_nop_replace(void)
{
return x86_nops[5];
}
static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
/*
* No need to translate into a callthunk. The trampoline does
* the depth accounting itself.
*/
return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
}
static int ftrace_verify_code(unsigned long ip, const char *old_code)
{
char cur_code[MCOUNT_INSN_SIZE];
/*
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */
if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
WARN_ON(1);
return -EFAULT;
}
/* Make sure it is what we expect it to be */
if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
ftrace_expected = old_code;
WARN_ON(1);
return -EINVAL;
}
return 0;
}
/*
* Marked __ref because it calls text_poke_early() which is .init.text. That is
* ok because that call will happen early, during boot, when .init sections are
* still present.
*/
static int __ref
ftrace_modify_code_direct(unsigned long ip, const char *old_code,
const char *new_code)
{
int ret = ftrace_verify_code(ip, old_code);
if (ret)
return ret;
/* replace the text with the new text */
if (ftrace_poke_late)
text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
else
text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
const char *new, *old;
old = ftrace_call_replace(ip, addr);
new = ftrace_nop_replace();
/*
* On boot up, and when modules are loaded, the MCOUNT_ADDR
* is converted to a nop, and will never become MCOUNT_ADDR
* again. This code is either running before SMP (on boot up)
* or before the code will ever be executed (module load).
* We do not want to use the breakpoint version in this case,
* just modify the code directly.
*/
if (addr == MCOUNT_ADDR)
return ftrace_modify_code_direct(ip, old, new);
/*
* x86 overrides ftrace_replace_code -- this function will never be used
* in this case.
*/
WARN_ONCE(1, "invalid use of ftrace_make_nop");
return -EINVAL;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
const char *new, *old;
old = ftrace_nop_replace();
new = ftrace_call_replace(ip, addr);
/* Should only be called when module is loaded */
return ftrace_modify_code_direct(rec->ip, old, new);
}
/*
* Should never be called:
* As it is only called by __ftrace_replace_code() which is called by
* ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
* which is called to turn mcount into nops or nops into function calls
* but not to convert a function from not using regs to one that uses
* regs, which ftrace_modify_call() is for.
*/
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
WARN_ON(1);
return -EINVAL;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip;
const char *new;
ip = (unsigned long)(&ftrace_call);
new = ftrace_call_replace(ip, (unsigned long)func);
text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
ip = (unsigned long)(&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func);
text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
return 0;
}
void ftrace_replace_code(int enable)
{
struct ftrace_rec_iter *iter;
struct dyn_ftrace *rec;
const char *new, *old;
int ret;
for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter);
switch (ftrace_test_record(rec, enable)) {
case FTRACE_UPDATE_IGNORE:
default:
continue;
case FTRACE_UPDATE_MAKE_CALL:
old = ftrace_nop_replace();
break;
case FTRACE_UPDATE_MODIFY_CALL:
case FTRACE_UPDATE_MAKE_NOP:
old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
break;
}
ret = ftrace_verify_code(rec->ip, old);
if (ret) {
ftrace_expected = old;
ftrace_bug(ret, rec);
ftrace_expected = NULL;
return;
}
}
for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter);
switch (ftrace_test_record(rec, enable)) {
case FTRACE_UPDATE_IGNORE:
default:
continue;
case FTRACE_UPDATE_MAKE_CALL:
case FTRACE_UPDATE_MODIFY_CALL:
new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
break;
case FTRACE_UPDATE_MAKE_NOP:
new = ftrace_nop_replace();
break;
}
text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
ftrace_update_record(rec, enable);
}
text_poke_finish();
}
void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
/* Currently only x86_64 supports dynamic trampolines */
#ifdef CONFIG_X86_64
#ifdef CONFIG_MODULES
#include <linux/moduleloader.h>
/* Module allocation simplifies allocating memory for code */
static inline void *alloc_tramp(unsigned long size)
{
return module_alloc(size);
}
static inline void tramp_free(void *tramp)
{
module_memfree(tramp);
}
#else
/* Trampolines can only be created if modules are supported */
static inline void *alloc_tramp(unsigned long size)
{
return NULL;
}
static inline void tramp_free(void *tramp) { }
#endif
/* Defined as markers to the end of the ftrace default trampolines */
extern void ftrace_regs_caller_end(void);
extern void ftrace_regs_caller_ret(void);
extern void ftrace_caller_end(void);
extern void ftrace_caller_op_ptr(void);
extern void ftrace_regs_caller_op_ptr(void);
extern void ftrace_regs_caller_jmp(void);
/* movq function_trace_op(%rip), %rdx */
/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
#define OP_REF_SIZE 7
/*
* The ftrace_ops is passed to the function callback. Since the
* trampoline only services a single ftrace_ops, we can pass in
* that ops directly.
*
* The ftrace_op_code_union is used to create a pointer to the
* ftrace_ops that will be passed to the callback function.
*/
union ftrace_op_code_union {
char code[OP_REF_SIZE];
struct {
char op[3];
int offset;
} __attribute__((packed));
};
#define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
static unsigned long
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
{
unsigned long start_offset;
unsigned long end_offset;
unsigned long op_offset;
unsigned long call_offset;
unsigned long jmp_offset;
unsigned long offset;
unsigned long npages;
unsigned long size;
unsigned long *ptr;
void *trampoline;
void *ip, *dest;
/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
union ftrace_op_code_union op_ptr;
int ret;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
start_offset = (unsigned long)ftrace_regs_caller;
end_offset = (unsigned long)ftrace_regs_caller_end;
op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
call_offset = (unsigned long)ftrace_regs_call;
jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
} else {
start_offset = (unsigned long)ftrace_caller;
end_offset = (unsigned long)ftrace_caller_end;
op_offset = (unsigned long)ftrace_caller_op_ptr;
call_offset = (unsigned long)ftrace_call;
jmp_offset = 0;
}
size = end_offset - start_offset;
/*
* Allocate enough size to store the ftrace_caller code,
* the iret , as well as the address of the ftrace_ops this
* trampoline is used for.
*/
trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
if (!trampoline)
return 0;
*tramp_size = size + RET_SIZE + sizeof(void *);
npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
/* Copy ftrace_caller onto the trampoline memory */
ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
if (WARN_ON(ret < 0))
goto fail;
ip = trampoline + size;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
else
memcpy(ip, retq, sizeof(retq));
/* No need to test direct calls on created trampolines */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
ip = trampoline + (jmp_offset - start_offset);
if (WARN_ON(*(char *)ip != 0x75))
goto fail;
ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
if (ret < 0)
goto fail;
}
/*
* The address of the ftrace_ops that is used for this trampoline
* is stored at the end of the trampoline. This will be used to
* load the third parameter for the callback. Basically, that
* location at the end of the trampoline takes the place of
* the global function_trace_op variable.
*/
ptr = (unsigned long *)(trampoline + size + RET_SIZE);
*ptr = (unsigned long)ops;
op_offset -= start_offset;
memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
/* Are we pointing to the reference? */
if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
goto fail;
/* Load the contents of ptr into the callback parameter */
offset = (unsigned long)ptr;
offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
op_ptr.offset = offset;
/* put in the new offset to the ftrace_ops */
memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
/* put in the call to the function */
mutex_lock(&text_mutex);
call_offset -= start_offset;
/*
* No need to translate into a callthunk. The trampoline does
* the depth accounting before the call already.
*/
dest = ftrace_ops_get_func(ops);
memcpy(trampoline + call_offset,
text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
CALL_INSN_SIZE);
mutex_unlock(&text_mutex);
/* ALLOC_TRAMP flags lets us know we created it */
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
set_memory_rox((unsigned long)trampoline, npages);
return (unsigned long)trampoline;
fail:
tramp_free(trampoline);
return 0;
}
void set_ftrace_ops_ro(void)
{
struct ftrace_ops *ops;
unsigned long start_offset;
unsigned long end_offset;
unsigned long npages;
unsigned long size;
do_for_each_ftrace_op(ops, ftrace_ops_list) {
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
continue;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
start_offset = (unsigned long)ftrace_regs_caller;
end_offset = (unsigned long)ftrace_regs_caller_end;
} else {
start_offset = (unsigned long)ftrace_caller;
end_offset = (unsigned long)ftrace_caller_end;
}
size = end_offset - start_offset;
size = size + RET_SIZE + sizeof(void *);
npages = DIV_ROUND_UP(size, PAGE_SIZE);
set_memory_ro((unsigned long)ops->trampoline, npages);
} while_for_each_ftrace_op(ops);
}
static unsigned long calc_trampoline_call_offset(bool save_regs)
{
unsigned long start_offset;
unsigned long call_offset;
if (save_regs) {
start_offset = (unsigned long)ftrace_regs_caller;
call_offset = (unsigned long)ftrace_regs_call;
} else {
start_offset = (unsigned long)ftrace_caller;
call_offset = (unsigned long)ftrace_call;
}
return call_offset - start_offset;
}
void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
{
ftrace_func_t func;
unsigned long offset;
unsigned long ip;
unsigned int size;
const char *new;
if (!ops->trampoline) {
ops->trampoline = create_trampoline(ops, &size);
if (!ops->trampoline)
return;
ops->trampoline_size = size;
return;
}
/*
* The ftrace_ops caller may set up its own trampoline.
* In such a case, this code must not modify it.
*/
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
ip = ops->trampoline + offset;
func = ftrace_ops_get_func(ops);
mutex_lock(&text_mutex);
/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
mutex_unlock(&text_mutex);
}
/* Return the address of the function the trampoline calls */
static void *addr_from_call(void *ptr)
{
union text_poke_insn call;
int ret;
ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
if (WARN_ON_ONCE(ret < 0))
return NULL;
/* Make sure this is a call */
if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
pr_warn("Expected E8, got %x\n", call.opcode);
return NULL;
}
return ptr + CALL_INSN_SIZE + call.disp;
}
void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
unsigned long frame_pointer);
/*
* If the ops->trampoline was not allocated, then it probably
* has a static trampoline func, or is the ftrace caller itself.
*/
static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
unsigned long offset;
bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
void *ptr;
if (ops && ops->trampoline) {
#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
defined(CONFIG_FUNCTION_GRAPH_TRACER)
/*
* We only know about function graph tracer setting as static
* trampoline.
*/
if (ops->trampoline == FTRACE_GRAPH_ADDR)
return (void *)prepare_ftrace_return;
#endif
return NULL;
}
offset = calc_trampoline_call_offset(save_regs);
if (save_regs)
ptr = (void *)FTRACE_REGS_ADDR + offset;
else
ptr = (void *)FTRACE_ADDR + offset;
return addr_from_call(ptr);
}
void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
unsigned long offset;
/* If we didn't allocate this trampoline, consider it static */
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return static_tramp_func(ops, rec);
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
return addr_from_call((void *)ops->trampoline + offset);
}
void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
{
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
tramp_free((void *)ops->trampoline);
ops->trampoline = 0;
}
#endif /* CONFIG_X86_64 */
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
extern void ftrace_graph_call(void);
static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
{
return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
}
static int ftrace_mod_jmp(unsigned long ip, void *func)
{
const char *new;
new = ftrace_jmp_replace(ip, (unsigned long)func);
text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
return 0;
}
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
return ftrace_mod_jmp(ip, &ftrace_graph_caller);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
return ftrace_mod_jmp(ip, &ftrace_stub);
}
#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
int bit;
/*
* When resuming from suspend-to-ram, this function can be indirectly
* called from early CPU startup code while the CPU is in real mode,
* which would fail miserably. Make sure the stack pointer is a
* virtual address.
*
* This check isn't as accurate as virt_addr_valid(), but it should be
* good enough for this purpose, and it's fast.
*/
if (unlikely((long)__builtin_frame_address(0) >= 0))
return;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
bit = ftrace_test_recursion_trylock(ip, *parent);
if (bit < 0)
return;
if (!function_graph_enter(*parent, ip, frame_pointer, parent))
*parent = return_hooker;
ftrace_test_recursion_unlock(bit);
}
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct pt_regs *regs = &fregs->regs;
unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
prepare_ftrace_return(ip, (unsigned long *)stack, 0);
}
#endif
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */