forked from Minki/linux
Merge branch 'tip/perf/urgent-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/urgent
Pull ftrace fixes from Steve Rostedt. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
08cd2a6960
@ -2437,7 +2437,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
|
||||
{
|
||||
iter->pos = 0;
|
||||
iter->func_pos = 0;
|
||||
iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
|
||||
iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
|
||||
}
|
||||
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
|
@ -1396,6 +1396,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
struct list_head *head_page_with_bit;
|
||||
|
||||
head_page = &rb_set_head_page(cpu_buffer)->list;
|
||||
if (!head_page)
|
||||
break;
|
||||
prev_page = head_page->prev;
|
||||
|
||||
first_page = pages->next;
|
||||
@ -2934,7 +2936,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
|
||||
unsigned long flags;
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct buffer_page *bpage;
|
||||
unsigned long ret;
|
||||
unsigned long ret = 0;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return 0;
|
||||
@ -2949,6 +2951,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
|
||||
bpage = cpu_buffer->reader_page;
|
||||
else
|
||||
bpage = rb_set_head_page(cpu_buffer);
|
||||
if (bpage)
|
||||
ret = bpage->page->time_stamp;
|
||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
@ -3260,6 +3263,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
* Splice the empty reader page into the list around the head.
|
||||
*/
|
||||
reader = rb_set_head_page(cpu_buffer);
|
||||
if (!reader)
|
||||
goto out;
|
||||
cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
|
||||
cpu_buffer->reader_page->list.prev = reader->list.prev;
|
||||
|
||||
@ -3778,12 +3783,17 @@ void
|
||||
ring_buffer_read_finish(struct ring_buffer_iter *iter)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Ring buffer is disabled from recording, here's a good place
|
||||
* to check the integrity of the ring buffer.
|
||||
* Must prevent readers from trying to read, as the check
|
||||
* clears the HEAD page and readers require it.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
rb_check_pages(cpu_buffer);
|
||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
atomic_dec(&cpu_buffer->record_disabled);
|
||||
atomic_dec(&cpu_buffer->buffer->resize_disabled);
|
||||
|
Loading…
Reference in New Issue
Block a user