Skip to content

Commit 09661f7

Browse files
petrpavlurostedt
authored andcommitted
ring-buffer: Fix reader locking when changing the sub buffer order
The function ring_buffer_subbuf_order_set() updates each ring_buffer_per_cpu and installs new sub buffers that match the requested page order. This operation may be invoked concurrently with readers that rely on some of the modified data, such as the head bit (RB_PAGE_HEAD), or the ring_buffer_per_cpu.pages and reader_page pointers. However, no exclusive access is acquired by ring_buffer_subbuf_order_set(). Modifying the mentioned data while a reader also operates on them can then result in incorrect memory access and various crashes. Fix the problem by taking the reader_lock when updating a specific ring_buffer_per_cpu in ring_buffer_subbuf_order_set(). Link: https://lore.kernel.org/linux-trace-kernel/[email protected]/ Link: https://lore.kernel.org/linux-trace-kernel/[email protected]/ Link: https://lore.kernel.org/linux-trace-kernel/[email protected]/ Cc: Masami Hiramatsu <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Link: https://lore.kernel.org/[email protected] Fixes: 8e7b58c ("ring-buffer: Just update the subbuffers when changing their allocation order") Signed-off-by: Petr Pavlu <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent 2cf9733 commit 09661f7

File tree

1 file changed

+26
-18
lines changed

1 file changed

+26
-18
lines changed

kernel/trace/ring_buffer.c

Lines changed: 26 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6728,39 +6728,38 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
67286728
}
67296729

67306730
for_each_buffer_cpu(buffer, cpu) {
6731+
struct buffer_data_page *old_free_data_page;
6732+
struct list_head old_pages;
6733+
unsigned long flags;
67316734

67326735
if (!cpumask_test_cpu(cpu, buffer->cpumask))
67336736
continue;
67346737

67356738
cpu_buffer = buffer->buffers[cpu];
67366739

6740+
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6741+
67376742
/* Clear the head bit to make the link list normal to read */
67386743
rb_head_page_deactivate(cpu_buffer);
67396744

6740-
/* Now walk the list and free all the old sub buffers */
6741-
list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
6742-
list_del_init(&bpage->list);
6743-
free_buffer_page(bpage);
6744-
}
6745-
/* The above loop stopped an the last page needing to be freed */
6746-
bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
6747-
free_buffer_page(bpage);
6748-
6749-
/* Free the current reader page */
6750-
free_buffer_page(cpu_buffer->reader_page);
6745+
/*
6746+
* Collect buffers from the cpu_buffer pages list and the
6747+
* reader_page on old_pages, so they can be freed later when not
6748+
* under a spinlock. The pages list is a linked list with no
6749+
* head, adding old_pages turns it into a regular list with
6750+
* old_pages being the head.
6751+
*/
6752+
list_add(&old_pages, cpu_buffer->pages);
6753+
list_add(&cpu_buffer->reader_page->list, &old_pages);
67516754

67526755
/* One page was allocated for the reader page */
67536756
cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
67546757
struct buffer_page, list);
67556758
list_del_init(&cpu_buffer->reader_page->list);
67566759

6757-
/* The cpu_buffer pages are a link list with no head */
6760+
/* Install the new pages, remove the head from the list */
67586761
cpu_buffer->pages = cpu_buffer->new_pages.next;
6759-
cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
6760-
cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
6761-
6762-
/* Clear the new_pages list */
6763-
INIT_LIST_HEAD(&cpu_buffer->new_pages);
6762+
list_del_init(&cpu_buffer->new_pages);
67646763

67656764
cpu_buffer->head_page
67666765
= list_entry(cpu_buffer->pages, struct buffer_page, list);
@@ -6769,11 +6768,20 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
67696768
cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
67706769
cpu_buffer->nr_pages_to_update = 0;
67716770

6772-
free_pages((unsigned long)cpu_buffer->free_page, old_order);
6771+
old_free_data_page = cpu_buffer->free_page;
67736772
cpu_buffer->free_page = NULL;
67746773

67756774
rb_head_page_activate(cpu_buffer);
67766775

6776+
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6777+
6778+
/* Free old sub buffers */
6779+
list_for_each_entry_safe(bpage, tmp, &old_pages, list) {
6780+
list_del_init(&bpage->list);
6781+
free_buffer_page(bpage);
6782+
}
6783+
free_pages((unsigned long)old_free_data_page, old_order);
6784+
67776785
rb_check_pages(cpu_buffer);
67786786
}
67796787

0 commit comments

Comments
 (0)