mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
ring-buffer: Fix bytes info in per_cpu buffer stats
The 'bytes' info in file 'per_cpu/cpu<X>/stats' means the number of
bytes in cpu buffer that have not been consumed. However, currently
after consuming data by reading file 'trace_pipe', the 'bytes' info
was not changed as expected.
# cat per_cpu/cpu0/stats
entries: 0
overrun: 0
commit overrun: 0
bytes: 568 <--- 'bytes' is problematical !!!
oldest event ts: 8651.371479
now ts: 8653.912224
dropped events: 0
read events: 8
The root cause is incorrect stat on cpu_buffer->read_bytes. To fix it:
1. When stat 'read_bytes', account consumed event in rb_advance_reader();
2. When stat 'entries_bytes', exclude the discarded padding event which
is smaller than minimum size because it is invisible to reader. Then
use rb_page_commit() instead of BUF_PAGE_SIZE at where accounting for
page-based read/remove/overrun.
Also correct the comments of ring_buffer_bytes_cpu() in this patch.
Link: https://lore.kernel.org/linux-trace-kernel/20230921125425.1708423-1-zhengyejian1@huawei.com
Cc: stable@vger.kernel.org
Fixes: c64e148a3b
("trace: Add ring buffer stats to measure rate of events")
Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
ce9ecca023
commit
45d99ea451
@ -354,6 +354,11 @@ static void rb_init_page(struct buffer_data_page *bpage)
|
||||
local_set(&bpage->commit, 0);
|
||||
}
|
||||
|
||||
static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
|
||||
{
|
||||
return local_read(&bpage->page->commit);
|
||||
}
|
||||
|
||||
static void free_buffer_page(struct buffer_page *bpage)
|
||||
{
|
||||
free_page((unsigned long)bpage->page);
|
||||
@ -2003,7 +2008,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
|
||||
* Increment overrun to account for the lost events.
|
||||
*/
|
||||
local_add(page_entries, &cpu_buffer->overrun);
|
||||
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
|
||||
local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
|
||||
local_inc(&cpu_buffer->pages_lost);
|
||||
}
|
||||
|
||||
@ -2367,11 +2372,6 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
cpu_buffer->reader_page->read);
|
||||
}
|
||||
|
||||
static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
|
||||
{
|
||||
return local_read(&bpage->page->commit);
|
||||
}
|
||||
|
||||
static struct ring_buffer_event *
|
||||
rb_iter_head_event(struct ring_buffer_iter *iter)
|
||||
{
|
||||
@ -2517,7 +2517,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
* the counters.
|
||||
*/
|
||||
local_add(entries, &cpu_buffer->overrun);
|
||||
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
|
||||
local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
|
||||
local_inc(&cpu_buffer->pages_lost);
|
||||
|
||||
/*
|
||||
@ -2660,9 +2660,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
|
||||
event = __rb_page_index(tail_page, tail);
|
||||
|
||||
/* account for padding bytes */
|
||||
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
|
||||
|
||||
/*
|
||||
* Save the original length to the meta data.
|
||||
* This will be used by the reader to add lost event
|
||||
@ -2676,7 +2673,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
* write counter enough to allow another writer to slip
|
||||
* in on this page.
|
||||
* We put in a discarded commit instead, to make sure
|
||||
* that this space is not used again.
|
||||
* that this space is not used again, and this space will
|
||||
* not be accounted into 'entries_bytes'.
|
||||
*
|
||||
* If we are less than the minimum size, we don't need to
|
||||
* worry about it.
|
||||
@ -2701,6 +2699,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
/* time delta must be non zero */
|
||||
event->time_delta = 1;
|
||||
|
||||
/* account for padding bytes */
|
||||
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
|
||||
|
||||
/* Make sure the padding is visible before the tail_page->write update */
|
||||
smp_wmb();
|
||||
|
||||
@ -4215,7 +4216,7 @@ u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
|
||||
|
||||
/**
|
||||
* ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
|
||||
* ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
|
||||
* @buffer: The ring buffer
|
||||
* @cpu: The per CPU buffer to read from.
|
||||
*/
|
||||
@ -4723,6 +4724,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
|
||||
length = rb_event_length(event);
|
||||
cpu_buffer->reader_page->read += length;
|
||||
cpu_buffer->read_bytes += length;
|
||||
}
|
||||
|
||||
static void rb_advance_iter(struct ring_buffer_iter *iter)
|
||||
@ -5816,7 +5818,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
|
||||
} else {
|
||||
/* update the entry counter */
|
||||
cpu_buffer->read += rb_page_entries(reader);
|
||||
cpu_buffer->read_bytes += BUF_PAGE_SIZE;
|
||||
cpu_buffer->read_bytes += rb_page_commit(reader);
|
||||
|
||||
/* swap the pages */
|
||||
rb_init_page(bpage);
|
||||
|
Loading…
Reference in New Issue
Block a user