Skip to content

Commit 45d99ea

Browse files
Zheng Yejianrostedt
authored andcommitted
ring-buffer: Fix bytes info in per_cpu buffer stats
The 'bytes' info in file 'per_cpu/cpu<X>/stats' means the number of bytes in cpu buffer that have not been consumed. However, currently after consuming data by reading file 'trace_pipe', the 'bytes' info was not changed as expected. # cat per_cpu/cpu0/stats entries: 0 overrun: 0 commit overrun: 0 bytes: 568 <--- 'bytes' is problematical !!! oldest event ts: 8651.371479 now ts: 8653.912224 dropped events: 0 read events: 8 The root cause is incorrect stat on cpu_buffer->read_bytes. To fix it: 1. When stat 'read_bytes', account consumed event in rb_advance_reader(); 2. When stat 'entries_bytes', exclude the discarded padding event which is smaller than minimum size because it is invisible to reader. Then use rb_page_commit() instead of BUF_PAGE_SIZE at where accounting for page-based read/remove/overrun. Also correct the comments of ring_buffer_bytes_cpu() in this patch. Link: https://lore.kernel.org/linux-trace-kernel/[email protected] Cc: [email protected] Fixes: c64e148 ("trace: Add ring buffer stats to measure rate of events") Signed-off-by: Zheng Yejian <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent ce9ecca commit 45d99ea

File tree

1 file changed

+15
-13
lines changed

1 file changed

+15
-13
lines changed

kernel/trace/ring_buffer.c

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -354,6 +354,11 @@ static void rb_init_page(struct buffer_data_page *bpage)
354354
local_set(&bpage->commit, 0);
355355
}
356356

357+
static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
358+
{
359+
return local_read(&bpage->page->commit);
360+
}
361+
357362
static void free_buffer_page(struct buffer_page *bpage)
358363
{
359364
free_page((unsigned long)bpage->page);
@@ -2003,7 +2008,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
20032008
* Increment overrun to account for the lost events.
20042009
*/
20052010
local_add(page_entries, &cpu_buffer->overrun);
2006-
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2011+
local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
20072012
local_inc(&cpu_buffer->pages_lost);
20082013
}
20092014

@@ -2367,11 +2372,6 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
23672372
cpu_buffer->reader_page->read);
23682373
}
23692374

2370-
static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
2371-
{
2372-
return local_read(&bpage->page->commit);
2373-
}
2374-
23752375
static struct ring_buffer_event *
23762376
rb_iter_head_event(struct ring_buffer_iter *iter)
23772377
{
@@ -2517,7 +2517,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
25172517
* the counters.
25182518
*/
25192519
local_add(entries, &cpu_buffer->overrun);
2520-
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2520+
local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
25212521
local_inc(&cpu_buffer->pages_lost);
25222522

25232523
/*
@@ -2660,9 +2660,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
26602660

26612661
event = __rb_page_index(tail_page, tail);
26622662

2663-
/* account for padding bytes */
2664-
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2665-
26662663
/*
26672664
* Save the original length to the meta data.
26682665
* This will be used by the reader to add lost event
@@ -2676,7 +2673,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
26762673
* write counter enough to allow another writer to slip
26772674
* in on this page.
26782675
* We put in a discarded commit instead, to make sure
2679-
* that this space is not used again.
2676+
* that this space is not used again, and this space will
2677+
* not be accounted into 'entries_bytes'.
26802678
*
26812679
* If we are less than the minimum size, we don't need to
26822680
* worry about it.
@@ -2701,6 +2699,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
27012699
/* time delta must be non zero */
27022700
event->time_delta = 1;
27032701

2702+
/* account for padding bytes */
2703+
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2704+
27042705
/* Make sure the padding is visible before the tail_page->write update */
27052706
smp_wmb();
27062707

@@ -4215,7 +4216,7 @@ u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
42154216
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
42164217

42174218
/**
4218-
* ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
4219+
* ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
42194220
* @buffer: The ring buffer
42204221
* @cpu: The per CPU buffer to read from.
42214222
*/
@@ -4723,6 +4724,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
47234724

47244725
length = rb_event_length(event);
47254726
cpu_buffer->reader_page->read += length;
4727+
cpu_buffer->read_bytes += length;
47264728
}
47274729

47284730
static void rb_advance_iter(struct ring_buffer_iter *iter)
@@ -5816,7 +5818,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
58165818
} else {
58175819
/* update the entry counter */
58185820
cpu_buffer->read += rb_page_entries(reader);
5819-
cpu_buffer->read_bytes += BUF_PAGE_SIZE;
5821+
cpu_buffer->read_bytes += rb_page_commit(reader);
58205822

58215823
/* swap the pages */
58225824
rb_init_page(bpage);

0 commit comments

Comments
 (0)