Skip to content

Commit e4c2ce8

Browse files
rostedtIngo Molnar
authored andcommitted
ring_buffer: allocate buffer page pointer
The current method of overlaying the page frame as the buffer page pointer can be very dangerous and limits our ability to do other things with a page from the buffer, like send it off to disk. This patch allocates the buffer_page instead of overlaying the page's page frame. The use of the buffer_page has hardly changed due to this. Signed-off-by: Steven Rostedt <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent 7104f30 commit e4c2ce8

File tree

1 file changed

+32
-22
lines changed

1 file changed

+32
-22
lines changed

kernel/trace/ring_buffer.c

Lines changed: 32 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -115,16 +115,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
115115
* Thanks to Peter Zijlstra for suggesting this idea.
116116
*/
117117
struct buffer_page {
118-
union {
119-
struct {
120-
unsigned long flags; /* mandatory */
121-
atomic_t _count; /* mandatory */
122-
u64 time_stamp; /* page time stamp */
123-
unsigned size; /* size of page data */
124-
struct list_head list; /* list of free pages */
125-
};
126-
struct page page;
127-
};
118+
u64 time_stamp; /* page time stamp */
119+
unsigned size; /* size of page data */
120+
struct list_head list; /* list of free pages */
121+
void *page; /* Actual data page */
128122
};
129123

130124
/*
@@ -133,9 +127,9 @@ struct buffer_page {
133127
*/
134128
static inline void free_buffer_page(struct buffer_page *bpage)
135129
{
136-
reset_page_mapcount(&bpage->page);
137-
bpage->page.mapping = NULL;
138-
__free_page(&bpage->page);
130+
if (bpage->page)
131+
__free_page(bpage->page);
132+
kfree(bpage);
139133
}
140134

141135
/*
@@ -237,11 +231,16 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
237231
unsigned i;
238232

239233
for (i = 0; i < nr_pages; i++) {
234+
page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
235+
GFP_KERNEL, cpu_to_node(cpu));
236+
if (!page)
237+
goto free_pages;
238+
list_add(&page->list, &pages);
239+
240240
addr = __get_free_page(GFP_KERNEL);
241241
if (!addr)
242242
goto free_pages;
243-
page = (struct buffer_page *)virt_to_page(addr);
244-
list_add(&page->list, &pages);
243+
page->page = (void *)addr;
245244
}
246245

247246
list_splice(&pages, head);
@@ -262,6 +261,7 @@ static struct ring_buffer_per_cpu *
262261
rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
263262
{
264263
struct ring_buffer_per_cpu *cpu_buffer;
264+
struct buffer_page *page;
265265
unsigned long addr;
266266
int ret;
267267

@@ -275,10 +275,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
275275
spin_lock_init(&cpu_buffer->lock);
276276
INIT_LIST_HEAD(&cpu_buffer->pages);
277277

278+
page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
279+
GFP_KERNEL, cpu_to_node(cpu));
280+
if (!page)
281+
goto fail_free_buffer;
282+
283+
cpu_buffer->reader_page = page;
278284
addr = __get_free_page(GFP_KERNEL);
279285
if (!addr)
280-
goto fail_free_buffer;
281-
cpu_buffer->reader_page = (struct buffer_page *)virt_to_page(addr);
286+
goto fail_free_reader;
287+
page->page = (void *)addr;
288+
282289
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
283290
cpu_buffer->reader_page->size = 0;
284291

@@ -523,11 +530,16 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
523530

524531
for_each_buffer_cpu(buffer, cpu) {
525532
for (i = 0; i < new_pages; i++) {
533+
page = kzalloc_node(ALIGN(sizeof(*page),
534+
cache_line_size()),
535+
GFP_KERNEL, cpu_to_node(cpu));
536+
if (!page)
537+
goto free_pages;
538+
list_add(&page->list, &pages);
526539
addr = __get_free_page(GFP_KERNEL);
527540
if (!addr)
528541
goto free_pages;
529-
page = (struct buffer_page *)virt_to_page(addr);
530-
list_add(&page->list, &pages);
542+
page->page = (void *)addr;
531543
}
532544
}
533545

@@ -567,9 +579,7 @@ static inline int rb_null_event(struct ring_buffer_event *event)
567579

568580
static inline void *rb_page_index(struct buffer_page *page, unsigned index)
569581
{
570-
void *addr = page_address(&page->page);
571-
572-
return addr + index;
582+
return page->page + index;
573583
}
574584

575585
static inline struct ring_buffer_event *

0 commit comments

Comments
 (0)