forked from Minki/linux
Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull minor tracing updates and fixes from Steven Rostedt: "It seems that one of my old pull requests have slipped through. The changes are contained to just the files that I maintain, and are changes from others that I told I would get into this merge window. They have already been in linux-next for several weeks, and should be well tested." * 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Remove unnecessary WARN_ONCE's from tracing_buffers_splice_read tracing: Remove unneeded checks from the stack tracer tracing: Add a resize function to make one buffer equivalent to another buffer
This commit is contained in:
commit
758338e960
@ -3034,6 +3034,31 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val)
|
||||
tr->data[cpu]->entries = val;
|
||||
}
|
||||
|
||||
/* resize @tr's buffer to the size of @size_tr's entries */
|
||||
static int resize_buffer_duplicate_size(struct trace_array *tr,
|
||||
struct trace_array *size_tr, int cpu_id)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
if (cpu_id == RING_BUFFER_ALL_CPUS) {
|
||||
for_each_tracing_cpu(cpu) {
|
||||
ret = ring_buffer_resize(tr->buffer,
|
||||
size_tr->data[cpu]->entries, cpu);
|
||||
if (ret < 0)
|
||||
break;
|
||||
tr->data[cpu]->entries = size_tr->data[cpu]->entries;
|
||||
}
|
||||
} else {
|
||||
ret = ring_buffer_resize(tr->buffer,
|
||||
size_tr->data[cpu_id]->entries, cpu_id);
|
||||
if (ret == 0)
|
||||
tr->data[cpu_id]->entries =
|
||||
size_tr->data[cpu_id]->entries;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
|
||||
{
|
||||
int ret;
|
||||
@ -3058,23 +3083,8 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
|
||||
|
||||
ret = ring_buffer_resize(max_tr.buffer, size, cpu);
|
||||
if (ret < 0) {
|
||||
int r = 0;
|
||||
|
||||
if (cpu == RING_BUFFER_ALL_CPUS) {
|
||||
int i;
|
||||
for_each_tracing_cpu(i) {
|
||||
r = ring_buffer_resize(global_trace.buffer,
|
||||
global_trace.data[i]->entries,
|
||||
i);
|
||||
if (r < 0)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
r = ring_buffer_resize(global_trace.buffer,
|
||||
global_trace.data[cpu]->entries,
|
||||
cpu);
|
||||
}
|
||||
|
||||
int r = resize_buffer_duplicate_size(&global_trace,
|
||||
&global_trace, cpu);
|
||||
if (r < 0) {
|
||||
/*
|
||||
* AARGH! We are left with different
|
||||
@ -3212,17 +3222,11 @@ static int tracing_set_tracer(const char *buf)
|
||||
|
||||
topts = create_trace_option_files(t);
|
||||
if (t->use_max_tr) {
|
||||
int cpu;
|
||||
/* we need to make per cpu buffer sizes equivalent */
|
||||
for_each_tracing_cpu(cpu) {
|
||||
ret = ring_buffer_resize(max_tr.buffer,
|
||||
global_trace.data[cpu]->entries,
|
||||
cpu);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
max_tr.data[cpu]->entries =
|
||||
global_trace.data[cpu]->entries;
|
||||
}
|
||||
ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
|
||||
RING_BUFFER_ALL_CPUS);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (t->init) {
|
||||
@ -4271,13 +4275,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
return -ENOMEM;
|
||||
|
||||
if (*ppos & (PAGE_SIZE - 1)) {
|
||||
WARN_ONCE(1, "Ftrace: previous read must page-align\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (len & (PAGE_SIZE - 1)) {
|
||||
WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
|
||||
if (len < PAGE_SIZE) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -33,7 +33,6 @@ static unsigned long max_stack_size;
|
||||
static arch_spinlock_t max_stack_lock =
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static int stack_trace_disabled __read_mostly;
|
||||
static DEFINE_PER_CPU(int, trace_active);
|
||||
static DEFINE_MUTEX(stack_sysctl_mutex);
|
||||
|
||||
@ -116,9 +115,6 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (unlikely(!ftrace_enabled || stack_trace_disabled))
|
||||
return;
|
||||
|
||||
preempt_disable_notrace();
|
||||
|
||||
cpu = raw_smp_processor_id();
|
||||
|
Loading…
Reference in New Issue
Block a user