@@ -374,11 +374,6 @@ static inline bool test_time_stamp(u64 delta)
374
374
return !!(delta & TS_DELTA_TEST );
375
375
}
376
376
377
- #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
378
-
379
- /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
380
- #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
381
-
382
377
struct rb_irq_work {
383
378
struct irq_work work ;
384
379
wait_queue_head_t waiters ;
@@ -510,6 +505,9 @@ struct trace_buffer {
510
505
511
506
struct rb_irq_work irq_work ;
512
507
bool time_stamp_abs ;
508
+
509
+ unsigned int subbuf_size ;
510
+ unsigned int max_data_size ;
513
511
};
514
512
515
513
struct ring_buffer_iter {
@@ -523,10 +521,11 @@ struct ring_buffer_iter {
523
521
u64 read_stamp ;
524
522
u64 page_stamp ;
525
523
struct ring_buffer_event * event ;
524
+ size_t event_size ;
526
525
int missed_events ;
527
526
};
528
527
529
- int ring_buffer_print_page_header (struct trace_seq * s )
528
+ int ring_buffer_print_page_header (struct trace_buffer * buffer , struct trace_seq * s )
530
529
{
531
530
struct buffer_data_page field ;
532
531
@@ -550,7 +549,7 @@ int ring_buffer_print_page_header(struct trace_seq *s)
550
549
trace_seq_printf (s , "\tfield: char data;\t"
551
550
"offset:%u;\tsize:%u;\tsigned:%u;\n" ,
552
551
(unsigned int )offsetof(typeof (field ), data ),
553
- (unsigned int )BUF_PAGE_SIZE ,
552
+ (unsigned int )buffer -> subbuf_size ,
554
553
(unsigned int )is_signed_type (char ));
555
554
556
555
return !trace_seq_has_overflowed (s );
@@ -1625,7 +1624,13 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1625
1624
if (!zalloc_cpumask_var (& buffer -> cpumask , GFP_KERNEL ))
1626
1625
goto fail_free_buffer ;
1627
1626
1628
- nr_pages = DIV_ROUND_UP (size , BUF_PAGE_SIZE );
1627
+ /* Default buffer page size - one system page */
1628
+ buffer -> subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE ;
1629
+
1630
+ /* Max payload is buffer page size - header (8bytes) */
1631
+ buffer -> max_data_size = buffer -> subbuf_size - (sizeof (u32 ) * 2 );
1632
+
1633
+ nr_pages = DIV_ROUND_UP (size , buffer -> subbuf_size );
1629
1634
buffer -> flags = flags ;
1630
1635
buffer -> clock = trace_clock_local ;
1631
1636
buffer -> reader_lock_key = key ;
@@ -1944,7 +1949,7 @@ static void update_pages_handler(struct work_struct *work)
1944
1949
* @size: the new size.
1945
1950
* @cpu_id: the cpu buffer to resize
1946
1951
*
1947
- * Minimum size is 2 * BUF_PAGE_SIZE .
1952
+ * Minimum size is 2 * buffer->subbuf_size .
1948
1953
*
1949
1954
* Returns 0 on success and < 0 on failure.
1950
1955
*/
@@ -1966,7 +1971,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
1966
1971
!cpumask_test_cpu (cpu_id , buffer -> cpumask ))
1967
1972
return 0 ;
1968
1973
1969
- nr_pages = DIV_ROUND_UP (size , BUF_PAGE_SIZE );
1974
+ nr_pages = DIV_ROUND_UP (size , buffer -> subbuf_size );
1970
1975
1971
1976
/* we need a minimum of two pages */
1972
1977
if (nr_pages < 2 )
@@ -2213,7 +2218,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
2213
2218
*/
2214
2219
barrier ();
2215
2220
2216
- if ((iter -> head + length ) > commit || length > BUF_PAGE_SIZE )
2221
+ if ((iter -> head + length ) > commit || length > iter -> event_size )
2217
2222
/* Writer corrupted the read? */
2218
2223
goto reset ;
2219
2224
@@ -2446,6 +2451,7 @@ static inline void
2446
2451
rb_reset_tail (struct ring_buffer_per_cpu * cpu_buffer ,
2447
2452
unsigned long tail , struct rb_event_info * info )
2448
2453
{
2454
+ unsigned long bsize = READ_ONCE (cpu_buffer -> buffer -> subbuf_size );
2449
2455
struct buffer_page * tail_page = info -> tail_page ;
2450
2456
struct ring_buffer_event * event ;
2451
2457
unsigned long length = info -> length ;
@@ -2454,13 +2460,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2454
2460
* Only the event that crossed the page boundary
2455
2461
* must fill the old tail_page with padding.
2456
2462
*/
2457
- if (tail >= BUF_PAGE_SIZE ) {
2463
+ if (tail >= bsize ) {
2458
2464
/*
2459
2465
* If the page was filled, then we still need
2460
2466
* to update the real_end. Reset it to zero
2461
2467
* and the reader will ignore it.
2462
2468
*/
2463
- if (tail == BUF_PAGE_SIZE )
2469
+ if (tail == bsize )
2464
2470
tail_page -> real_end = 0 ;
2465
2471
2466
2472
local_sub (length , & tail_page -> write );
@@ -2488,7 +2494,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2488
2494
* If we are less than the minimum size, we don't need to
2489
2495
* worry about it.
2490
2496
*/
2491
- if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE )) {
2497
+ if (tail > (bsize - RB_EVNT_MIN_SIZE )) {
2492
2498
/* No room for any events */
2493
2499
2494
2500
/* Mark the rest of the page with padding */
@@ -2503,19 +2509,19 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2503
2509
}
2504
2510
2505
2511
/* Put in a discarded event */
2506
- event -> array [0 ] = (BUF_PAGE_SIZE - tail ) - RB_EVNT_HDR_SIZE ;
2512
+ event -> array [0 ] = (bsize - tail ) - RB_EVNT_HDR_SIZE ;
2507
2513
event -> type_len = RINGBUF_TYPE_PADDING ;
2508
2514
/* time delta must be non zero */
2509
2515
event -> time_delta = 1 ;
2510
2516
2511
2517
/* account for padding bytes */
2512
- local_add (BUF_PAGE_SIZE - tail , & cpu_buffer -> entries_bytes );
2518
+ local_add (bsize - tail , & cpu_buffer -> entries_bytes );
2513
2519
2514
2520
/* Make sure the padding is visible before the tail_page->write update */
2515
2521
smp_wmb ();
2516
2522
2517
2523
/* Set write to end of buffer */
2518
- length = (tail + length ) - BUF_PAGE_SIZE ;
2524
+ length = (tail + length ) - bsize ;
2519
2525
local_sub (length , & tail_page -> write );
2520
2526
}
2521
2527
@@ -3469,7 +3475,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3469
3475
tail = write - info -> length ;
3470
3476
3471
3477
/* See if we shot pass the end of this buffer page */
3472
- if (unlikely (write > BUF_PAGE_SIZE )) {
3478
+ if (unlikely (write > cpu_buffer -> buffer -> subbuf_size )) {
3473
3479
check_buffer (cpu_buffer , info , CHECK_FULL_PAGE );
3474
3480
return rb_move_tail (cpu_buffer , tail , info );
3475
3481
}
@@ -3600,7 +3606,7 @@ rb_reserve_next_event(struct trace_buffer *buffer,
3600
3606
if (ring_buffer_time_stamp_abs (cpu_buffer -> buffer )) {
3601
3607
add_ts_default = RB_ADD_STAMP_ABSOLUTE ;
3602
3608
info .length += RB_LEN_TIME_EXTEND ;
3603
- if (info .length > BUF_MAX_DATA_SIZE )
3609
+ if (info .length > cpu_buffer -> buffer -> max_data_size )
3604
3610
goto out_fail ;
3605
3611
} else {
3606
3612
add_ts_default = RB_ADD_STAMP_NONE ;
@@ -3675,7 +3681,7 @@ ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3675
3681
if (unlikely (atomic_read (& cpu_buffer -> record_disabled )))
3676
3682
goto out ;
3677
3683
3678
- if (unlikely (length > BUF_MAX_DATA_SIZE ))
3684
+ if (unlikely (length > buffer -> max_data_size ))
3679
3685
goto out ;
3680
3686
3681
3687
if (unlikely (trace_recursive_lock (cpu_buffer )))
@@ -3825,7 +3831,7 @@ int ring_buffer_write(struct trace_buffer *buffer,
3825
3831
if (atomic_read (& cpu_buffer -> record_disabled ))
3826
3832
goto out ;
3827
3833
3828
- if (length > BUF_MAX_DATA_SIZE )
3834
+ if (length > buffer -> max_data_size )
3829
3835
goto out ;
3830
3836
3831
3837
if (unlikely (trace_recursive_lock (cpu_buffer )))
@@ -4405,6 +4411,7 @@ static struct buffer_page *
4405
4411
rb_get_reader_page (struct ring_buffer_per_cpu * cpu_buffer )
4406
4412
{
4407
4413
struct buffer_page * reader = NULL ;
4414
+ unsigned long bsize = READ_ONCE (cpu_buffer -> buffer -> subbuf_size );
4408
4415
unsigned long overwrite ;
4409
4416
unsigned long flags ;
4410
4417
int nr_loops = 0 ;
@@ -4540,7 +4547,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4540
4547
#define USECS_WAIT 1000000
4541
4548
for (nr_loops = 0 ; nr_loops < USECS_WAIT ; nr_loops ++ ) {
4542
4549
/* If the write is past the end of page, a writer is still updating it */
4543
- if (likely (!reader || rb_page_write (reader ) <= BUF_PAGE_SIZE ))
4550
+ if (likely (!reader || rb_page_write (reader ) <= bsize ))
4544
4551
break ;
4545
4552
4546
4553
udelay (1 );
@@ -4984,7 +4991,8 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
4984
4991
return NULL ;
4985
4992
4986
4993
/* Holds the entire event: data and meta data */
4987
- iter -> event = kmalloc (BUF_PAGE_SIZE , flags );
4994
+ iter -> event_size = buffer -> subbuf_size ;
4995
+ iter -> event = kmalloc (iter -> event_size , flags );
4988
4996
if (!iter -> event ) {
4989
4997
kfree (iter );
4990
4998
return NULL ;
@@ -5102,14 +5110,14 @@ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5102
5110
{
5103
5111
/*
5104
5112
* Earlier, this method returned
5105
- * BUF_PAGE_SIZE * buffer->nr_pages
5113
+ * buffer->subbuf_size * buffer->nr_pages
5106
5114
* Since the nr_pages field is now removed, we have converted this to
5107
5115
* return the per cpu buffer value.
5108
5116
*/
5109
5117
if (!cpumask_test_cpu (cpu , buffer -> cpumask ))
5110
5118
return 0 ;
5111
5119
5112
- return BUF_PAGE_SIZE * buffer -> buffers [cpu ]-> nr_pages ;
5120
+ return buffer -> subbuf_size * buffer -> buffers [cpu ]-> nr_pages ;
5113
5121
}
5114
5122
EXPORT_SYMBOL_GPL (ring_buffer_size );
5115
5123
@@ -5123,8 +5131,8 @@ unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
5123
5131
{
5124
5132
/* If abs timestamp is requested, events have a timestamp too */
5125
5133
if (ring_buffer_time_stamp_abs (buffer ))
5126
- return BUF_MAX_DATA_SIZE - RB_LEN_TIME_EXTEND ;
5127
- return BUF_MAX_DATA_SIZE ;
5134
+ return buffer -> max_data_size - RB_LEN_TIME_EXTEND ;
5135
+ return buffer -> max_data_size ;
5128
5136
}
5129
5137
EXPORT_SYMBOL_GPL (ring_buffer_max_event_size );
5130
5138
@@ -5730,7 +5738,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
5730
5738
/* If there is room at the end of the page to save the
5731
5739
* missed events, then record it there.
5732
5740
*/
5733
- if (BUF_PAGE_SIZE - commit >= sizeof (missed_events )) {
5741
+ if (buffer -> subbuf_size - commit >= sizeof (missed_events )) {
5734
5742
memcpy (& bpage -> data [commit ], & missed_events ,
5735
5743
sizeof (missed_events ));
5736
5744
local_add (RB_MISSED_STORED , & bpage -> commit );
@@ -5742,8 +5750,8 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
5742
5750
/*
5743
5751
* This page may be off to user land. Zero it out here.
5744
5752
*/
5745
- if (commit < BUF_PAGE_SIZE )
5746
- memset (& bpage -> data [commit ], 0 , BUF_PAGE_SIZE - commit );
5753
+ if (commit < buffer -> subbuf_size )
5754
+ memset (& bpage -> data [commit ], 0 , buffer -> subbuf_size - commit );
5747
5755
5748
5756
out_unlock :
5749
5757
raw_spin_unlock_irqrestore (& cpu_buffer -> reader_lock , flags );
0 commit comments