Merge branch 'tip/tracing/ftrace-4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace

This commit is contained in:
Ingo Molnar 2009-06-05 16:50:29 +02:00
commit 918143e8b7
4 changed files with 83 additions and 60 deletions

View File

@ -104,6 +104,7 @@
* field = (typeof(field))entry;
*
* p = get_cpu_var(ftrace_event_seq);
* trace_seq_init(p);
* ret = trace_seq_printf(s, <TP_printk> "\n");
* put_cpu();
* if (!ret)
@ -167,6 +168,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
field = (typeof(field))entry; \
\
p = &get_cpu_var(ftrace_event_seq); \
trace_seq_init(p); \
ret = trace_seq_printf(s, #call ": " print); \
put_cpu(); \
if (!ret) \

View File

@ -370,6 +370,9 @@ static inline int test_time_stamp(u64 delta)
/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
/* Max number of timestamps that can fit on a page */
#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
int ring_buffer_print_page_header(struct trace_seq *s)
{
struct buffer_data_page field;
@ -1335,6 +1338,38 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
return event;
}
static inline int
rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
unsigned long new_index, old_index;
struct buffer_page *bpage;
unsigned long index;
unsigned long addr;
new_index = rb_event_index(event);
old_index = new_index + rb_event_length(event);
addr = (unsigned long)event;
addr &= PAGE_MASK;
bpage = cpu_buffer->tail_page;
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
/*
* This is on the tail page. It is possible that
* a write could come in and move the tail page
* and write to the next page. That is fine
* because we just shorten what is on this page.
*/
index = local_cmpxchg(&bpage->write, old_index, new_index);
if (index == old_index)
return 1;
}
/* could not discard */
return 0;
}
static int
rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
u64 *ts, u64 *delta)
@ -1377,17 +1412,24 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
event->array[0] = *delta >> TS_SHIFT;
} else {
cpu_buffer->commit_page->page->time_stamp = *ts;
event->time_delta = 0;
event->array[0] = 0;
/* try to discard, since we do not need this */
if (!rb_try_to_discard(cpu_buffer, event)) {
/* nope, just zero it */
event->time_delta = 0;
event->array[0] = 0;
}
}
cpu_buffer->write_stamp = *ts;
/* let the caller know this was the commit */
ret = 1;
} else {
/* Darn, this is just wasted space */
event->time_delta = 0;
event->array[0] = 0;
ret = 0;
/* Try to discard the event */
if (!rb_try_to_discard(cpu_buffer, event)) {
/* Darn, this is just wasted space */
event->time_delta = 0;
event->array[0] = 0;
ret = 0;
}
}
*delta = 0;
@ -1682,10 +1724,6 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long new_index, old_index;
struct buffer_page *bpage;
unsigned long index;
unsigned long addr;
int cpu;
/* The event is discarded regardless */
@ -1701,24 +1739,8 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
cpu = smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
new_index = rb_event_index(event);
old_index = new_index + rb_event_length(event);
addr = (unsigned long)event;
addr &= PAGE_MASK;
bpage = cpu_buffer->tail_page;
if (bpage == (void *)addr && rb_page_write(bpage) == old_index) {
/*
* This is on the tail page. It is possible that
* a write could come in and move the tail page
* and write to the next page. That is fine
* because we just shorten what is on this page.
*/
index = local_cmpxchg(&bpage->write, old_index, new_index);
if (index == old_index)
goto out;
}
if (!rb_try_to_discard(cpu_buffer, event))
goto out;
/*
* The commit is still visible by the reader, so we
@ -2253,8 +2275,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
* Check if we are at the end of the buffer.
*/
if (iter->head >= rb_page_size(iter->head_page)) {
if (RB_WARN_ON(buffer,
iter->head_page == cpu_buffer->commit_page))
/* discarded commits can make the page empty */
if (iter->head_page == cpu_buffer->commit_page)
return;
rb_inc_iter(iter);
return;
@ -2297,12 +2319,10 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
/*
* We repeat when a timestamp is encountered. It is possible
* to get multiple timestamps from an interrupt entering just
* as one timestamp is about to be written. The max times
* that this can happen is the number of nested interrupts we
* can have. Nesting 10 deep of interrupts is clearly
* an anomaly.
* as one timestamp is about to be written, or from discarded
* commits. The most that we can have is the number on a single page.
*/
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
return NULL;
reader = rb_get_reader_page(cpu_buffer);
@ -2368,14 +2388,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
again:
/*
* We repeat when a timestamp is encountered. It is possible
* to get multiple timestamps from an interrupt entering just
* as one timestamp is about to be written. The max times
* that this can happen is the number of nested interrupts we
* can have. Nesting 10 deep of interrupts is clearly
* an anomaly.
* We repeat when a timestamp is encountered.
* We can get multiple timestamps by nested interrupts or also
* if filtering is on (discarding commits). Since discarding
* commits can be frequent we can get a lot of timestamps.
* But we limit them by not adding timestamps if they begin
* at the start of a page.
*/
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
return NULL;
if (rb_per_cpu_empty(cpu_buffer))

View File

@ -223,10 +223,9 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
{
unsigned long mask;
const char *str;
const char *ret = p->buffer + p->len;
int i;
trace_seq_init(p);
for (i = 0; flag_array[i].name && flags; i++) {
mask = flag_array[i].mask;
@ -249,7 +248,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
trace_seq_putc(p, 0);
return p->buffer;
return ret;
}
EXPORT_SYMBOL(ftrace_print_flags_seq);
@ -258,8 +257,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
const struct trace_print_flags *symbol_array)
{
int i;
trace_seq_init(p);
const char *ret = p->buffer + p->len;
for (i = 0; symbol_array[i].name; i++) {
@ -275,7 +273,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
trace_seq_putc(p, 0);
return p->buffer;
return ret;
}
EXPORT_SYMBOL(ftrace_print_symbols_seq);
@ -389,17 +387,20 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
if (ip == ULONG_MAX || !ret)
break;
if (i && ret)
ret = trace_seq_puts(s, " <- ");
if (ret)
ret = trace_seq_puts(s, " => ");
if (!ip) {
if (ret)
ret = trace_seq_puts(s, "??");
if (ret)
ret = trace_seq_puts(s, "\n");
continue;
}
if (!ret)
break;
if (ret)
ret = seq_print_user_ip(s, mm, ip, sym_flags);
ret = trace_seq_puts(s, "\n");
}
if (mm)
@ -975,16 +976,16 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
trace_assign_type(field, iter->ent);
if (!trace_seq_puts(s, "<stack trace>\n"))
goto partial;
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
if (!field->caller[i])
if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
break;
if (i) {
if (!trace_seq_puts(s, " <= "))
goto partial;
if (!trace_seq_puts(s, " => "))
goto partial;
if (!seq_print_ip_sym(s, field->caller[i], flags))
goto partial;
}
if (!seq_print_ip_sym(s, field->caller[i], flags))
goto partial;
if (!trace_seq_puts(s, "\n"))
goto partial;
}
@ -1012,10 +1013,10 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
trace_assign_type(field, iter->ent);
if (!seq_print_userip_objs(field, s, flags))
if (!trace_seq_puts(s, "<user stack trace>\n"))
goto partial;
if (!trace_seq_putc(s, '\n'))
if (!seq_print_userip_objs(field, s, flags))
goto partial;
return TRACE_TYPE_HANDLED;

View File

@ -265,7 +265,7 @@ static int t_show(struct seq_file *m, void *v)
seq_printf(m, " Depth Size Location"
" (%d entries)\n"
" ----- ---- --------\n",
max_stack_trace.nr_entries);
max_stack_trace.nr_entries - 1);
if (!stack_tracer_enabled && !max_stack_size)
print_disabled(m);