@@ -115,16 +115,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
115
115
* Thanks to Peter Zijlstra for suggesting this idea.
116
116
*/
117
117
struct buffer_page {
118
- union {
119
- struct {
120
- unsigned long flags ; /* mandatory */
121
- atomic_t _count ; /* mandatory */
122
- u64 time_stamp ; /* page time stamp */
123
- unsigned size ; /* size of page data */
124
- struct list_head list ; /* list of free pages */
125
- };
126
- struct page page ;
127
- };
118
+ u64 time_stamp ; /* page time stamp */
119
+ unsigned size ; /* size of page data */
120
+ struct list_head list ; /* list of free pages */
121
+ void * page ; /* Actual data page */
128
122
};
129
123
130
124
/*
@@ -133,9 +127,9 @@ struct buffer_page {
133
127
*/
134
128
static inline void free_buffer_page (struct buffer_page * bpage )
135
129
{
136
- reset_page_mapcount ( & bpage -> page );
137
- bpage -> page . mapping = NULL ;
138
- __free_page ( & bpage -> page );
130
+ if ( bpage -> page )
131
+ __free_page ( bpage -> page ) ;
132
+ kfree ( bpage );
139
133
}
140
134
141
135
/*
@@ -237,11 +231,16 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
237
231
unsigned i ;
238
232
239
233
for (i = 0 ; i < nr_pages ; i ++ ) {
234
+ page = kzalloc_node (ALIGN (sizeof (* page ), cache_line_size ()),
235
+ GFP_KERNEL , cpu_to_node (cpu ));
236
+ if (!page )
237
+ goto free_pages ;
238
+ list_add (& page -> list , & pages );
239
+
240
240
addr = __get_free_page (GFP_KERNEL );
241
241
if (!addr )
242
242
goto free_pages ;
243
- page = (struct buffer_page * )virt_to_page (addr );
244
- list_add (& page -> list , & pages );
243
+ page -> page = (void * )addr ;
245
244
}
246
245
247
246
list_splice (& pages , head );
@@ -262,6 +261,7 @@ static struct ring_buffer_per_cpu *
262
261
rb_allocate_cpu_buffer (struct ring_buffer * buffer , int cpu )
263
262
{
264
263
struct ring_buffer_per_cpu * cpu_buffer ;
264
+ struct buffer_page * page ;
265
265
unsigned long addr ;
266
266
int ret ;
267
267
@@ -275,10 +275,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
275
275
spin_lock_init (& cpu_buffer -> lock );
276
276
INIT_LIST_HEAD (& cpu_buffer -> pages );
277
277
278
+ page = kzalloc_node (ALIGN (sizeof (* page ), cache_line_size ()),
279
+ GFP_KERNEL , cpu_to_node (cpu ));
280
+ if (!page )
281
+ goto fail_free_buffer ;
282
+
283
+ cpu_buffer -> reader_page = page ;
278
284
addr = __get_free_page (GFP_KERNEL );
279
285
if (!addr )
280
- goto fail_free_buffer ;
281
- cpu_buffer -> reader_page = (struct buffer_page * )virt_to_page (addr );
286
+ goto fail_free_reader ;
287
+ page -> page = (void * )addr ;
288
+
282
289
INIT_LIST_HEAD (& cpu_buffer -> reader_page -> list );
283
290
cpu_buffer -> reader_page -> size = 0 ;
284
291
@@ -523,11 +530,16 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
523
530
524
531
for_each_buffer_cpu (buffer , cpu ) {
525
532
for (i = 0 ; i < new_pages ; i ++ ) {
533
+ page = kzalloc_node (ALIGN (sizeof (* page ),
534
+ cache_line_size ()),
535
+ GFP_KERNEL , cpu_to_node (cpu ));
536
+ if (!page )
537
+ goto free_pages ;
538
+ list_add (& page -> list , & pages );
526
539
addr = __get_free_page (GFP_KERNEL );
527
540
if (!addr )
528
541
goto free_pages ;
529
- page = (struct buffer_page * )virt_to_page (addr );
530
- list_add (& page -> list , & pages );
542
+ page -> page = (void * )addr ;
531
543
}
532
544
}
533
545
@@ -567,9 +579,7 @@ static inline int rb_null_event(struct ring_buffer_event *event)
567
579
568
580
static inline void * rb_page_index (struct buffer_page * page , unsigned index )
569
581
{
570
- void * addr = page_address (& page -> page );
571
-
572
- return addr + index ;
582
+ return page -> page + index ;
573
583
}
574
584
575
585
static inline struct ring_buffer_event *
0 commit comments