Skip to content

Commit c64e148

Browse files
vnagarnaikrostedt
authored andcommitted
trace: Add ring buffer stats to measure rate of events
The stats file under per_cpu folder provides the number of entries, overruns and other statistics about the CPU ring buffer. However, the numbers do not provide any indication of how full the ring buffer is in bytes compared to the overall size in bytes. Also, it is helpful to know the rate at which the cpu buffer is filling up. This patch adds an entry "bytes: " in printed stats for per_cpu ring buffer which provides the actual bytes consumed in the ring buffer. This field includes the number of bytes used by recorded events and the padding bytes added when moving the tail pointer to next page. It also adds the following time stamps: "oldest event ts:" - the oldest timestamp in the ring buffer "now ts:" - the timestamp at the time of reading The field "now ts" provides a consistent time snapshot to the userspace when being read. This is read from the same trace clock used by tracing event timestamps. Together, these values provide the rate at which the buffer is filling up, from the formula: bytes / (now_ts - oldest_event_ts) Signed-off-by: Vaibhav Nagarnaik <[email protected]> Cc: Michael Rubin <[email protected]> Cc: David Sharp <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Steven Rostedt <[email protected]>
1 parent f81ab07 commit c64e148

File tree

3 files changed

+84
-1
lines changed

3 files changed

+84
-1
lines changed

include/linux/ring_buffer.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,8 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
154154
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
155155
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
156156

157+
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
158+
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
157159
unsigned long ring_buffer_entries(struct ring_buffer *buffer);
158160
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
159161
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);

kernel/trace/ring_buffer.c

Lines changed: 69 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -488,12 +488,14 @@ struct ring_buffer_per_cpu {
488488
struct buffer_page *reader_page;
489489
unsigned long lost_events;
490490
unsigned long last_overrun;
491+
local_t entries_bytes;
491492
local_t commit_overrun;
492493
local_t overrun;
493494
local_t entries;
494495
local_t committing;
495496
local_t commits;
496497
unsigned long read;
498+
unsigned long read_bytes;
497499
u64 write_stamp;
498500
u64 read_stamp;
499501
};
@@ -1708,6 +1710,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
17081710
* the counters.
17091711
*/
17101712
local_add(entries, &cpu_buffer->overrun);
1713+
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
17111714

17121715
/*
17131716
* The entries will be zeroed out when we move the
@@ -1863,6 +1866,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
18631866
event = __rb_page_index(tail_page, tail);
18641867
kmemcheck_annotate_bitfield(event, bitfield);
18651868

1869+
/* account for padding bytes */
1870+
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
1871+
18661872
/*
18671873
* Save the original length to the meta data.
18681874
* This will be used by the reader to add lost event
@@ -2054,6 +2060,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
20542060
if (!tail)
20552061
tail_page->page->time_stamp = ts;
20562062

2063+
/* account for these added bytes */
2064+
local_add(length, &cpu_buffer->entries_bytes);
2065+
20572066
return event;
20582067
}
20592068

@@ -2076,6 +2085,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
20762085
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
20772086
unsigned long write_mask =
20782087
local_read(&bpage->write) & ~RB_WRITE_MASK;
2088+
unsigned long event_length = rb_event_length(event);
20792089
/*
20802090
* This is on the tail page. It is possible that
20812091
* a write could come in and move the tail page
@@ -2085,8 +2095,11 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
20852095
old_index += write_mask;
20862096
new_index += write_mask;
20872097
index = local_cmpxchg(&bpage->write, old_index, new_index);
2088-
if (index == old_index)
2098+
if (index == old_index) {
2099+
/* update counters */
2100+
local_sub(event_length, &cpu_buffer->entries_bytes);
20892101
return 1;
2102+
}
20902103
}
20912104

20922105
/* could not discard */
@@ -2660,6 +2673,58 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
26602673
(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
26612674
}
26622675

2676+
/**
2677+
* ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2678+
* @buffer: The ring buffer
2679+
* @cpu: The per CPU buffer to read from.
2680+
*/
2681+
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2682+
{
2683+
unsigned long flags;
2684+
struct ring_buffer_per_cpu *cpu_buffer;
2685+
struct buffer_page *bpage;
2686+
unsigned long ret;
2687+
2688+
if (!cpumask_test_cpu(cpu, buffer->cpumask))
2689+
return 0;
2690+
2691+
cpu_buffer = buffer->buffers[cpu];
2692+
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2693+
/*
2694+
* if the tail is on reader_page, oldest time stamp is on the reader
2695+
* page
2696+
*/
2697+
if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2698+
bpage = cpu_buffer->reader_page;
2699+
else
2700+
bpage = rb_set_head_page(cpu_buffer);
2701+
ret = bpage->page->time_stamp;
2702+
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2703+
2704+
return ret;
2705+
}
2706+
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
2707+
2708+
/**
2709+
* ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
2710+
* @buffer: The ring buffer
2711+
* @cpu: The per CPU buffer to read from.
2712+
*/
2713+
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
2714+
{
2715+
struct ring_buffer_per_cpu *cpu_buffer;
2716+
unsigned long ret;
2717+
2718+
if (!cpumask_test_cpu(cpu, buffer->cpumask))
2719+
return 0;
2720+
2721+
cpu_buffer = buffer->buffers[cpu];
2722+
ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
2723+
2724+
return ret;
2725+
}
2726+
EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
2727+
26632728
/**
26642729
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
26652730
* @buffer: The ring buffer
@@ -3527,11 +3592,13 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
35273592
cpu_buffer->reader_page->read = 0;
35283593

35293594
local_set(&cpu_buffer->commit_overrun, 0);
3595+
local_set(&cpu_buffer->entries_bytes, 0);
35303596
local_set(&cpu_buffer->overrun, 0);
35313597
local_set(&cpu_buffer->entries, 0);
35323598
local_set(&cpu_buffer->committing, 0);
35333599
local_set(&cpu_buffer->commits, 0);
35343600
cpu_buffer->read = 0;
3601+
cpu_buffer->read_bytes = 0;
35353602

35363603
cpu_buffer->write_stamp = 0;
35373604
cpu_buffer->read_stamp = 0;
@@ -3918,6 +3985,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
39183985
} else {
39193986
/* update the entry counter */
39203987
cpu_buffer->read += rb_page_entries(reader);
3988+
cpu_buffer->read_bytes += BUF_PAGE_SIZE;
39213989

39223990
/* swap the pages */
39233991
rb_init_page(bpage);

kernel/trace/trace.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4056,6 +4056,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
40564056
struct trace_array *tr = &global_trace;
40574057
struct trace_seq *s;
40584058
unsigned long cnt;
4059+
unsigned long long t;
4060+
unsigned long usec_rem;
40594061

40604062
s = kmalloc(sizeof(*s), GFP_KERNEL);
40614063
if (!s)
@@ -4072,6 +4074,17 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
40724074
cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
40734075
trace_seq_printf(s, "commit overrun: %ld\n", cnt);
40744076

4077+
cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4078+
trace_seq_printf(s, "bytes: %ld\n", cnt);
4079+
4080+
t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4081+
usec_rem = do_div(t, USEC_PER_SEC);
4082+
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
4083+
4084+
t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4085+
usec_rem = do_div(t, USEC_PER_SEC);
4086+
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4087+
40754088
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
40764089

40774090
kfree(s);

0 commit comments

Comments
 (0)