Skip to content

Commit dd4900d

Browse files
committed
ring-buffer: Allow mapped field to be set without mapping
In preparation for having the ring buffer mapped to a dedicated location, which will have the same restrictions as user space memory mapped buffers, allow it to use the "mapped" field of the ring_buffer_per_cpu structure without having the user space meta page mapping. When this starts using the mapped field, it will need to handle adding a user space mapping (and removing it) from a ring buffer that is using a dedicated memory range. Link: https://lkml.kernel.org/r/[email protected] Cc: Masami Hiramatsu <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Vincent Donnefort <[email protected]> Cc: Joel Fernandes <[email protected]> Cc: Daniel Bristot de Oliveira <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vineeth Pillai <[email protected]> Cc: Youssef Esmat <[email protected]> Cc: Beau Belgrave <[email protected]> Cc: Alexander Graf <[email protected]> Cc: Baoquan He <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: "Paul E. McKenney" <[email protected]> Cc: David Howells <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Tony Luck <[email protected]> Cc: Guenter Roeck <[email protected]> Cc: Ross Zwisler <[email protected]> Cc: Kees Cook <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent c3f38fa commit dd4900d

File tree

1 file changed

+27
-10
lines changed

1 file changed

+27
-10
lines changed

kernel/trace/ring_buffer.c

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -491,6 +491,7 @@ struct ring_buffer_per_cpu {
491491
unsigned long pages_removed;
492492

493493
unsigned int mapped;
494+
unsigned int user_mapped; /* user space mapping */
494495
struct mutex mapping_lock;
495496
unsigned long *subbuf_ids; /* ID to subbuf VA */
496497
struct trace_buffer_meta *meta_page;
@@ -5224,6 +5225,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
52245225
{
52255226
struct trace_buffer_meta *meta = cpu_buffer->meta_page;
52265227

5228+
if (!meta)
5229+
return;
5230+
52275231
meta->reader.read = cpu_buffer->reader_page->read;
52285232
meta->reader.id = cpu_buffer->reader_page->id;
52295233
meta->reader.lost_events = cpu_buffer->lost_events;
@@ -5280,7 +5284,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
52805284
cpu_buffer->lost_events = 0;
52815285
cpu_buffer->last_overrun = 0;
52825286

5283-
if (cpu_buffer->mapped)
5287+
if (cpu_buffer->user_mapped)
52845288
rb_update_meta_page(cpu_buffer);
52855289

52865290
rb_head_page_activate(cpu_buffer);
@@ -6167,7 +6171,7 @@ rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
61676171

61686172
mutex_lock(&cpu_buffer->mapping_lock);
61696173

6170-
if (!cpu_buffer->mapped) {
6174+
if (!cpu_buffer->user_mapped) {
61716175
mutex_unlock(&cpu_buffer->mapping_lock);
61726176
return ERR_PTR(-ENODEV);
61736177
}
@@ -6191,19 +6195,26 @@ static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
61916195

61926196
lockdep_assert_held(&cpu_buffer->mapping_lock);
61936197

6198+
/* mapped is always greater or equal to user_mapped */
6199+
if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped))
6200+
return -EINVAL;
6201+
61946202
if (inc && cpu_buffer->mapped == UINT_MAX)
61956203
return -EBUSY;
61966204

6197-
if (WARN_ON(!inc && cpu_buffer->mapped == 0))
6205+
if (WARN_ON(!inc && cpu_buffer->user_mapped == 0))
61986206
return -EINVAL;
61996207

62006208
mutex_lock(&cpu_buffer->buffer->mutex);
62016209
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
62026210

6203-
if (inc)
6211+
if (inc) {
6212+
cpu_buffer->user_mapped++;
62046213
cpu_buffer->mapped++;
6205-
else
6214+
} else {
6215+
cpu_buffer->user_mapped--;
62066216
cpu_buffer->mapped--;
6217+
}
62076218

62086219
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
62096220
mutex_unlock(&cpu_buffer->buffer->mutex);
@@ -6328,7 +6339,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
63286339

63296340
mutex_lock(&cpu_buffer->mapping_lock);
63306341

6331-
if (cpu_buffer->mapped) {
6342+
if (cpu_buffer->user_mapped) {
63326343
err = __rb_map_vma(cpu_buffer, vma);
63336344
if (!err)
63346345
err = __rb_inc_dec_mapped(cpu_buffer, true);
@@ -6359,12 +6370,15 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
63596370
*/
63606371
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
63616372
rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
6373+
63626374
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
63636375

63646376
err = __rb_map_vma(cpu_buffer, vma);
63656377
if (!err) {
63666378
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6367-
cpu_buffer->mapped = 1;
6379+
/* This is the first time it is mapped by user */
6380+
cpu_buffer->mapped++;
6381+
cpu_buffer->user_mapped = 1;
63686382
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
63696383
} else {
63706384
kfree(cpu_buffer->subbuf_ids);
@@ -6392,18 +6406,21 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
63926406

63936407
mutex_lock(&cpu_buffer->mapping_lock);
63946408

6395-
if (!cpu_buffer->mapped) {
6409+
if (!cpu_buffer->user_mapped) {
63966410
err = -ENODEV;
63976411
goto out;
6398-
} else if (cpu_buffer->mapped > 1) {
6412+
} else if (cpu_buffer->user_mapped > 1) {
63996413
__rb_inc_dec_mapped(cpu_buffer, false);
64006414
goto out;
64016415
}
64026416

64036417
mutex_lock(&buffer->mutex);
64046418
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
64056419

6406-
cpu_buffer->mapped = 0;
6420+
/* This is the last user space mapping */
6421+
if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped))
6422+
cpu_buffer->mapped--;
6423+
cpu_buffer->user_mapped = 0;
64076424

64086425
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
64096426

0 commit comments

Comments
 (0)