mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
This contains 3 changes:
- Removal of code I accidentally applied when doing a minor fix up to a patch, and then using "git commit -a --amend", which pulled in some other changes I was playing with. - Remove an used variable in trace_events_inject code - Fix to function graph tracer when it traces a ftrace direct function. It will now ignore tracing a function that has a ftrace direct tramploine attached. This is needed for eBPF to use the ftrace direct code. -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCXfD/thQccm9zdGVkdEBn b29kbWlzLm9yZwAKCRAp5XQQmuv6qoo2AP4j7ONw7BTmMyo+GdYqPPntBeDnClHK vfMKrgK1j5BxYgEA7LgkwuUT9bcyLjfJVcyfeW67rB2PtmovKTWnKihFOwI= =DZ6N -----END PGP SIGNATURE----- Merge tag 'trace-v5.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes from Steven Rostedt: - Remove code I accidentally applied when doing a minor fix up to a patch, and then using "git commit -a --amend", which pulled in some other changes I was playing with. - Remove an used variable in trace_events_inject code - Fix function graph tracer when it traces a ftrace direct function. It will now ignore tracing a function that has a ftrace direct tramploine attached. This is needed for eBPF to use the ftrace direct code. * tag 'trace-v5.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ftrace: Fix function_graph tracer interaction with BPF trampoline tracing: remove set but not used variable 'buffer' module: Remove accidental change of module_enable_x()
This commit is contained in:
commit
6674fdb25a
@ -1042,20 +1042,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the return location is actually pointing directly to
|
||||
* the start of a direct trampoline (if we trace the trampoline
|
||||
* it will still be offset by MCOUNT_INSN_SIZE), then the
|
||||
* return address is actually off by one word, and we
|
||||
* need to adjust for that.
|
||||
*/
|
||||
if (ftrace_direct_func_count) {
|
||||
if (ftrace_find_direct_func(self_addr + MCOUNT_INSN_SIZE)) {
|
||||
self_addr = *parent;
|
||||
parent++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Protect against fault, even if it shouldn't
|
||||
* happen. This tool is too much intrusive to
|
||||
|
@ -264,6 +264,7 @@ int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
|
||||
struct dyn_ftrace *rec,
|
||||
unsigned long old_addr,
|
||||
unsigned long new_addr);
|
||||
unsigned long ftrace_find_rec_direct(unsigned long ip);
|
||||
#else
|
||||
# define ftrace_direct_func_count 0
|
||||
static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
|
||||
@ -290,6 +291,10 @@ static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
|
||||
|
||||
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
|
||||
|
@ -3730,6 +3730,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
|
||||
|
||||
module_enable_ro(mod, false);
|
||||
module_enable_nx(mod);
|
||||
module_enable_x(mod);
|
||||
|
||||
/* Mark state as coming so strong_try_module_get() ignores us,
|
||||
* but kallsyms etc. can see us. */
|
||||
@ -3752,11 +3753,6 @@ static int prepare_coming_module(struct module *mod)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Make module executable after ftrace is enabled */
|
||||
mutex_lock(&module_mutex);
|
||||
module_enable_x(mod);
|
||||
mutex_unlock(&module_mutex);
|
||||
|
||||
blocking_notifier_call_chain(&module_notify_list,
|
||||
MODULE_STATE_COMING, mod);
|
||||
return 0;
|
||||
|
@ -101,6 +101,15 @@ int function_graph_enter(unsigned long ret, unsigned long func,
|
||||
{
|
||||
struct ftrace_graph_ent trace;
|
||||
|
||||
/*
|
||||
* Skip graph tracing if the return location is served by direct trampoline,
|
||||
* since call sequence and return addresses is unpredicatable anymore.
|
||||
* Ex: BPF trampoline may call original function and may skip frame
|
||||
* depending on type of BPF programs attached.
|
||||
*/
|
||||
if (ftrace_direct_func_count &&
|
||||
ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
|
||||
return -EBUSY;
|
||||
trace.func = func;
|
||||
trace.depth = ++current->curr_ret_depth;
|
||||
|
||||
|
@ -2364,7 +2364,7 @@ int ftrace_direct_func_count;
|
||||
* Search the direct_functions hash to see if the given instruction pointer
|
||||
* has a direct caller attached to it.
|
||||
*/
|
||||
static unsigned long find_rec_direct(unsigned long ip)
|
||||
unsigned long ftrace_find_rec_direct(unsigned long ip)
|
||||
{
|
||||
struct ftrace_func_entry *entry;
|
||||
|
||||
@ -2380,7 +2380,7 @@ static void call_direct_funcs(unsigned long ip, unsigned long pip,
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
addr = find_rec_direct(ip);
|
||||
addr = ftrace_find_rec_direct(ip);
|
||||
if (!addr)
|
||||
return;
|
||||
|
||||
@ -2393,11 +2393,6 @@ struct ftrace_ops direct_ops = {
|
||||
| FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
|
||||
| FTRACE_OPS_FL_PERMANENT,
|
||||
};
|
||||
#else
|
||||
static inline unsigned long find_rec_direct(unsigned long ip)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
|
||||
|
||||
/**
|
||||
@ -2417,7 +2412,7 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
|
||||
|
||||
if ((rec->flags & FTRACE_FL_DIRECT) &&
|
||||
(ftrace_rec_count(rec) == 1)) {
|
||||
addr = find_rec_direct(rec->ip);
|
||||
addr = ftrace_find_rec_direct(rec->ip);
|
||||
if (addr)
|
||||
return addr;
|
||||
WARN_ON_ONCE(1);
|
||||
@ -2458,7 +2453,7 @@ unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
|
||||
|
||||
/* Direct calls take precedence over trampolines */
|
||||
if (rec->flags & FTRACE_FL_DIRECT_EN) {
|
||||
addr = find_rec_direct(rec->ip);
|
||||
addr = ftrace_find_rec_direct(rec->ip);
|
||||
if (addr)
|
||||
return addr;
|
||||
WARN_ON_ONCE(1);
|
||||
@ -3604,7 +3599,7 @@ static int t_show(struct seq_file *m, void *v)
|
||||
if (rec->flags & FTRACE_FL_DIRECT) {
|
||||
unsigned long direct;
|
||||
|
||||
direct = find_rec_direct(rec->ip);
|
||||
direct = ftrace_find_rec_direct(rec->ip);
|
||||
if (direct)
|
||||
seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
|
||||
}
|
||||
@ -5008,7 +5003,7 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
|
||||
mutex_lock(&direct_mutex);
|
||||
|
||||
/* See if there's a direct function at @ip already */
|
||||
if (find_rec_direct(ip))
|
||||
if (ftrace_find_rec_direct(ip))
|
||||
goto out_unlock;
|
||||
|
||||
ret = -ENODEV;
|
||||
@ -5027,7 +5022,7 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
|
||||
if (ip != rec->ip) {
|
||||
ip = rec->ip;
|
||||
/* Need to check this ip for a direct. */
|
||||
if (find_rec_direct(ip))
|
||||
if (ftrace_find_rec_direct(ip))
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
@ -17,12 +17,10 @@ static int
|
||||
trace_inject_entry(struct trace_event_file *file, void *rec, int len)
|
||||
{
|
||||
struct trace_event_buffer fbuffer;
|
||||
struct ring_buffer *buffer;
|
||||
int written = 0;
|
||||
void *entry;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
buffer = file->tr->trace_buffer.buffer;
|
||||
entry = trace_event_buffer_reserve(&fbuffer, file, len);
|
||||
if (entry) {
|
||||
memcpy(entry, rec, len);
|
||||
|
Loading…
Reference in New Issue
Block a user