@@ -332,6 +332,7 @@ struct buffer_page {
332
332
unsigned read ; /* index for next read */
333
333
local_t entries ; /* entries on this page */
334
334
unsigned long real_end ; /* real end of data */
335
+ unsigned order ; /* order of the page */
335
336
struct buffer_data_page * page ; /* Actual data page */
336
337
};
337
338
@@ -362,7 +363,7 @@ static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
362
363
363
364
static void free_buffer_page (struct buffer_page * bpage )
364
365
{
365
- free_page ((unsigned long )bpage -> page );
366
+ free_pages ((unsigned long )bpage -> page , bpage -> order );
366
367
kfree (bpage );
367
368
}
368
369
@@ -1460,10 +1461,12 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1460
1461
1461
1462
list_add (& bpage -> list , pages );
1462
1463
1463
- page = alloc_pages_node (cpu_to_node (cpu_buffer -> cpu ), mflags , 0 );
1464
+ page = alloc_pages_node (cpu_to_node (cpu_buffer -> cpu ), mflags ,
1465
+ cpu_buffer -> buffer -> subbuf_order );
1464
1466
if (!page )
1465
1467
goto free_pages ;
1466
1468
bpage -> page = page_address (page );
1469
+ bpage -> order = cpu_buffer -> buffer -> subbuf_order ;
1467
1470
rb_init_page (bpage -> page );
1468
1471
1469
1472
if (user_thread && fatal_signal_pending (current ))
@@ -1542,7 +1545,8 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1542
1545
rb_check_bpage (cpu_buffer , bpage );
1543
1546
1544
1547
cpu_buffer -> reader_page = bpage ;
1545
- page = alloc_pages_node (cpu_to_node (cpu ), GFP_KERNEL , 0 );
1548
+
1549
+ page = alloc_pages_node (cpu_to_node (cpu ), GFP_KERNEL , cpu_buffer -> buffer -> subbuf_order );
1546
1550
if (!page )
1547
1551
goto fail_free_reader ;
1548
1552
bpage -> page = page_address (page );
@@ -1626,6 +1630,7 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1626
1630
goto fail_free_buffer ;
1627
1631
1628
1632
/* Default buffer page size - one system page */
1633
+ buffer -> subbuf_order = 0 ;
1629
1634
buffer -> subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE ;
1630
1635
1631
1636
/* Max payload is buffer page size - header (8bytes) */
@@ -5503,8 +5508,8 @@ void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5503
5508
if (bpage )
5504
5509
goto out ;
5505
5510
5506
- page = alloc_pages_node (cpu_to_node (cpu ),
5507
- GFP_KERNEL | __GFP_NORETRY , 0 );
5511
+ page = alloc_pages_node (cpu_to_node (cpu ), GFP_KERNEL | __GFP_NORETRY ,
5512
+ cpu_buffer -> buffer -> subbuf_order );
5508
5513
if (!page )
5509
5514
return ERR_PTR (- ENOMEM );
5510
5515
@@ -5553,7 +5558,7 @@ void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data
5553
5558
local_irq_restore (flags );
5554
5559
5555
5560
out :
5556
- free_page ((unsigned long )bpage );
5561
+ free_pages ((unsigned long )bpage , buffer -> subbuf_order );
5557
5562
}
5558
5563
EXPORT_SYMBOL_GPL (ring_buffer_free_read_page );
5559
5564
@@ -5813,7 +5818,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
5813
5818
*/
5814
5819
int ring_buffer_subbuf_order_set (struct trace_buffer * buffer , int order )
5815
5820
{
5821
+ struct ring_buffer_per_cpu * * cpu_buffers ;
5822
+ int old_order , old_size ;
5823
+ int nr_pages ;
5816
5824
int psize ;
5825
+ int bsize ;
5826
+ int err ;
5827
+ int cpu ;
5817
5828
5818
5829
if (!buffer || order < 0 )
5819
5830
return - EINVAL ;
@@ -5825,12 +5836,67 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
5825
5836
if (psize <= BUF_PAGE_HDR_SIZE )
5826
5837
return - EINVAL ;
5827
5838
5839
+ bsize = sizeof (void * ) * buffer -> cpus ;
5840
+ cpu_buffers = kzalloc (bsize , GFP_KERNEL );
5841
+ if (!cpu_buffers )
5842
+ return - ENOMEM ;
5843
+
5844
+ old_order = buffer -> subbuf_order ;
5845
+ old_size = buffer -> subbuf_size ;
5846
+
5847
+ /* prevent another thread from changing buffer sizes */
5848
+ mutex_lock (& buffer -> mutex );
5849
+ atomic_inc (& buffer -> record_disabled );
5850
+
5851
+ /* Make sure all commits have finished */
5852
+ synchronize_rcu ();
5853
+
5828
5854
buffer -> subbuf_order = order ;
5829
5855
buffer -> subbuf_size = psize - BUF_PAGE_HDR_SIZE ;
5830
5856
5831
- /* Todo: reset the buffer with the new page size */
5857
+ /* Make sure all new buffers are allocated, before deleting the old ones */
5858
+ for_each_buffer_cpu (buffer , cpu ) {
5859
+ if (!cpumask_test_cpu (cpu , buffer -> cpumask ))
5860
+ continue ;
5861
+
5862
+ nr_pages = buffer -> buffers [cpu ]-> nr_pages ;
5863
+ cpu_buffers [cpu ] = rb_allocate_cpu_buffer (buffer , nr_pages , cpu );
5864
+ if (!cpu_buffers [cpu ]) {
5865
+ err = - ENOMEM ;
5866
+ goto error ;
5867
+ }
5868
+ }
5869
+
5870
+ for_each_buffer_cpu (buffer , cpu ) {
5871
+ if (!cpumask_test_cpu (cpu , buffer -> cpumask ))
5872
+ continue ;
5873
+
5874
+ rb_free_cpu_buffer (buffer -> buffers [cpu ]);
5875
+ buffer -> buffers [cpu ] = cpu_buffers [cpu ];
5876
+ }
5877
+
5878
+ atomic_dec (& buffer -> record_disabled );
5879
+ mutex_unlock (& buffer -> mutex );
5880
+
5881
+ kfree (cpu_buffers );
5832
5882
5833
5883
return 0 ;
5884
+
5885
+ error :
5886
+ buffer -> subbuf_order = old_order ;
5887
+ buffer -> subbuf_size = old_size ;
5888
+
5889
+ atomic_dec (& buffer -> record_disabled );
5890
+ mutex_unlock (& buffer -> mutex );
5891
+
5892
+ for_each_buffer_cpu (buffer , cpu ) {
5893
+ if (!cpu_buffers [cpu ])
5894
+ continue ;
5895
+ kfree (cpu_buffers [cpu ]);
5896
+ }
5897
+ kfree (cpu_buffers );
5898
+
5899
+ return err ;
5834
5900
}
5835
5901
EXPORT_SYMBOL_GPL (ring_buffer_subbuf_order_set );
5836
5902
0 commit comments