Skip to content

Commit 2f87d09

Browse files
committed
Merge tag 'trace-ringbuffer-v6.12-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull ring-buffer fixes from Steven Rostedt: - Fix ref counter of buffers assigned at boot up A tracing instance can be created from the kernel command line. If it maps to memory, it is considered permanent and should not be deleted, or bad things can happen. If it is not mapped to memory, then the user is fine to delete it via rmdir from the instances directory. But the ref counts assumed 0 was free to remove and greater than zero was not. But this was not the case. When an instance is created, it should have the reference of 1, and if it should not be removed, it must be greater than 1. The boot up code set normal instances with a ref count of 0, which could get removed if something accessed it and then released it. And memory mapped instances had a ref count of 1 which meant it could be deleted, and bad things happen. Keep normal instances ref count as 1, and set memory mapped instances ref count to 2. - Protect sub buffer size (order) updates from other modifications When a ring buffer is changing the size of its sub-buffers, no other operations should be performed on the ring buffer. That includes reading it. But the locking only grabbed the buffer->mutex that keeps some operations from touching the ring buffer. It also must hold the cpu_buffer->reader_lock as well when updates happen as other paths use that to do some operations on the ring buffer. * tag 'trace-ringbuffer-v6.12-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: ring-buffer: Fix reader locking when changing the sub buffer order ring-buffer: Fix refcount setting of boot mapped buffers
2 parents bdc7276 + 09661f7 commit 2f87d09

File tree

2 files changed

+29
-21
lines changed

2 files changed

+29
-21
lines changed

kernel/trace/ring_buffer.c

Lines changed: 26 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6728,39 +6728,38 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
67286728
}
67296729

67306730
for_each_buffer_cpu(buffer, cpu) {
6731+
struct buffer_data_page *old_free_data_page;
6732+
struct list_head old_pages;
6733+
unsigned long flags;
67316734

67326735
if (!cpumask_test_cpu(cpu, buffer->cpumask))
67336736
continue;
67346737

67356738
cpu_buffer = buffer->buffers[cpu];
67366739

6740+
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6741+
67376742
/* Clear the head bit to make the link list normal to read */
67386743
rb_head_page_deactivate(cpu_buffer);
67396744

6740-
/* Now walk the list and free all the old sub buffers */
6741-
list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
6742-
list_del_init(&bpage->list);
6743-
free_buffer_page(bpage);
6744-
}
6745-
/* The above loop stopped an the last page needing to be freed */
6746-
bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
6747-
free_buffer_page(bpage);
6748-
6749-
/* Free the current reader page */
6750-
free_buffer_page(cpu_buffer->reader_page);
6745+
/*
6746+
* Collect buffers from the cpu_buffer pages list and the
6747+
* reader_page on old_pages, so they can be freed later when not
6748+
* under a spinlock. The pages list is a linked list with no
6749+
* head, adding old_pages turns it into a regular list with
6750+
* old_pages being the head.
6751+
*/
6752+
list_add(&old_pages, cpu_buffer->pages);
6753+
list_add(&cpu_buffer->reader_page->list, &old_pages);
67516754

67526755
/* One page was allocated for the reader page */
67536756
cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
67546757
struct buffer_page, list);
67556758
list_del_init(&cpu_buffer->reader_page->list);
67566759

6757-
/* The cpu_buffer pages are a link list with no head */
6760+
/* Install the new pages, remove the head from the list */
67586761
cpu_buffer->pages = cpu_buffer->new_pages.next;
6759-
cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
6760-
cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
6761-
6762-
/* Clear the new_pages list */
6763-
INIT_LIST_HEAD(&cpu_buffer->new_pages);
6762+
list_del_init(&cpu_buffer->new_pages);
67646763

67656764
cpu_buffer->head_page
67666765
= list_entry(cpu_buffer->pages, struct buffer_page, list);
@@ -6769,11 +6768,20 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
67696768
cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
67706769
cpu_buffer->nr_pages_to_update = 0;
67716770

6772-
free_pages((unsigned long)cpu_buffer->free_page, old_order);
6771+
old_free_data_page = cpu_buffer->free_page;
67736772
cpu_buffer->free_page = NULL;
67746773

67756774
rb_head_page_activate(cpu_buffer);
67766775

6776+
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6777+
6778+
/* Free old sub buffers */
6779+
list_for_each_entry_safe(bpage, tmp, &old_pages, list) {
6780+
list_del_init(&bpage->list);
6781+
free_buffer_page(bpage);
6782+
}
6783+
free_pages((unsigned long)old_free_data_page, old_order);
6784+
67776785
rb_check_pages(cpu_buffer);
67786786
}
67796787

kernel/trace/trace.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10621,10 +10621,10 @@ __init static void enable_instances(void)
1062110621
* cannot be deleted by user space, so keep the reference
1062210622
* to it.
1062310623
*/
10624-
if (start)
10624+
if (start) {
1062510625
tr->flags |= TRACE_ARRAY_FL_BOOT;
10626-
else
10627-
trace_array_put(tr);
10626+
tr->ref++;
10627+
}
1062810628

1062910629
while ((tok = strsep(&curr_str, ","))) {
1063010630
early_enable_events(tr, tok, true);

0 commit comments

Comments
 (0)