mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
ftrace: build fix
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
0fd9e0dac9
commit
2e0f576185
@ -432,47 +432,6 @@ notrace void tracing_reset(struct trace_array_cpu *data)
|
||||
data->trace_tail_idx = 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
static notrace void
|
||||
function_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
struct trace_array *tr = &global_trace;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long flags;
|
||||
long disabled;
|
||||
int cpu;
|
||||
|
||||
if (unlikely(!tracer_enabled))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = tr->data[cpu];
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
|
||||
if (likely(disabled == 1))
|
||||
ftrace(tr, data, ip, parent_ip, flags);
|
||||
|
||||
atomic_dec(&data->disabled);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static struct ftrace_ops trace_ops __read_mostly =
|
||||
{
|
||||
.func = function_trace_call,
|
||||
};
|
||||
#endif
|
||||
|
||||
notrace void tracing_start_function_trace(void)
|
||||
{
|
||||
register_ftrace_function(&trace_ops);
|
||||
}
|
||||
|
||||
notrace void tracing_stop_function_trace(void)
|
||||
{
|
||||
unregister_ftrace_function(&trace_ops);
|
||||
}
|
||||
|
||||
#define SAVED_CMDLINES 128
|
||||
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
|
||||
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
|
||||
@ -635,7 +594,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
|
||||
}
|
||||
|
||||
notrace void
|
||||
ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
||||
__ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
||||
unsigned long ip, unsigned long parent_ip, unsigned long flags)
|
||||
{
|
||||
struct trace_entry *entry;
|
||||
@ -650,6 +609,14 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
||||
spin_unlock_irqrestore(&data->lock, irq_flags);
|
||||
}
|
||||
|
||||
notrace void
|
||||
ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
||||
unsigned long ip, unsigned long parent_ip, unsigned long flags)
|
||||
{
|
||||
if (likely(!atomic_read(&data->disabled)))
|
||||
__ftrace(tr, data, ip, parent_ip, flags);
|
||||
}
|
||||
|
||||
notrace void
|
||||
trace_special(struct trace_array *tr, struct trace_array_cpu *data,
|
||||
unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
||||
@ -688,6 +655,47 @@ tracing_sched_switch_trace(struct trace_array *tr,
|
||||
spin_unlock_irqrestore(&data->lock, irq_flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
static notrace void
|
||||
function_trace_call(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
struct trace_array *tr = &global_trace;
|
||||
struct trace_array_cpu *data;
|
||||
unsigned long flags;
|
||||
long disabled;
|
||||
int cpu;
|
||||
|
||||
if (unlikely(!tracer_enabled))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = tr->data[cpu];
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
|
||||
if (likely(disabled == 1))
|
||||
__ftrace(tr, data, ip, parent_ip, flags);
|
||||
|
||||
atomic_dec(&data->disabled);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static struct ftrace_ops trace_ops __read_mostly =
|
||||
{
|
||||
.func = function_trace_call,
|
||||
};
|
||||
|
||||
notrace void tracing_start_function_trace(void)
|
||||
{
|
||||
register_ftrace_function(&trace_ops);
|
||||
}
|
||||
|
||||
notrace void tracing_stop_function_trace(void)
|
||||
{
|
||||
unregister_ftrace_function(&trace_ops);
|
||||
}
|
||||
#endif
|
||||
|
||||
enum trace_file_type {
|
||||
TRACE_FILE_LAT_FMT = 1,
|
||||
};
|
||||
@ -722,7 +730,7 @@ trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
|
||||
return &array[iter->next_page_idx[cpu]];
|
||||
}
|
||||
|
||||
static struct notrace trace_entry *
|
||||
static struct trace_entry * notrace
|
||||
find_next_entry(struct trace_iterator *iter, int *ent_cpu)
|
||||
{
|
||||
struct trace_array *tr = iter->tr;
|
||||
@ -1866,6 +1874,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
||||
static cpumask_t mask;
|
||||
static int start;
|
||||
unsigned long flags;
|
||||
int ftrace_save;
|
||||
int read = 0;
|
||||
int cpu;
|
||||
int len;
|
||||
@ -1944,6 +1953,9 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
||||
|
||||
cpus_clear(mask);
|
||||
local_irq_save(flags);
|
||||
ftrace_save = ftrace_enabled;
|
||||
ftrace_enabled = 0;
|
||||
smp_wmb();
|
||||
for_each_possible_cpu(cpu) {
|
||||
data = iter->tr->data[cpu];
|
||||
|
||||
@ -1951,10 +1963,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
||||
continue;
|
||||
|
||||
atomic_inc(&data->disabled);
|
||||
spin_lock(&data->lock);
|
||||
cpu_set(cpu, mask);
|
||||
}
|
||||
|
||||
for_each_cpu_mask(cpu, mask) {
|
||||
data = iter->tr->data[cpu];
|
||||
spin_lock(&data->lock);
|
||||
}
|
||||
|
||||
while (find_next_entry_inc(iter) != NULL) {
|
||||
int len = iter->seq.len;
|
||||
|
||||
@ -1974,8 +1990,13 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
||||
for_each_cpu_mask(cpu, mask) {
|
||||
data = iter->tr->data[cpu];
|
||||
spin_unlock(&data->lock);
|
||||
}
|
||||
|
||||
for_each_cpu_mask(cpu, mask) {
|
||||
data = iter->tr->data[cpu];
|
||||
atomic_dec(&data->disabled);
|
||||
}
|
||||
ftrace_enabled = ftrace_save;
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Now copy what we have to the user */
|
||||
|
@ -9,10 +9,10 @@
|
||||
* Copyright (C) 2004-2006 Ingo Molnar
|
||||
* Copyright (C) 2004 William Lee Irwin III
|
||||
*/
|
||||
#include <linux/fs.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user