@@ -491,6 +491,7 @@ struct ring_buffer_per_cpu {
491
491
unsigned long pages_removed ;
492
492
493
493
unsigned int mapped ;
494
+ unsigned int user_mapped ; /* user space mapping */
494
495
struct mutex mapping_lock ;
495
496
unsigned long * subbuf_ids ; /* ID to subbuf VA */
496
497
struct trace_buffer_meta * meta_page ;
@@ -5224,6 +5225,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
5224
5225
{
5225
5226
struct trace_buffer_meta * meta = cpu_buffer -> meta_page ;
5226
5227
5228
+ if (!meta )
5229
+ return ;
5230
+
5227
5231
meta -> reader .read = cpu_buffer -> reader_page -> read ;
5228
5232
meta -> reader .id = cpu_buffer -> reader_page -> id ;
5229
5233
meta -> reader .lost_events = cpu_buffer -> lost_events ;
@@ -5280,7 +5284,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5280
5284
cpu_buffer -> lost_events = 0 ;
5281
5285
cpu_buffer -> last_overrun = 0 ;
5282
5286
5283
- if (cpu_buffer -> mapped )
5287
+ if (cpu_buffer -> user_mapped )
5284
5288
rb_update_meta_page (cpu_buffer );
5285
5289
5286
5290
rb_head_page_activate (cpu_buffer );
@@ -6167,7 +6171,7 @@ rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
6167
6171
6168
6172
mutex_lock (& cpu_buffer -> mapping_lock );
6169
6173
6170
- if (!cpu_buffer -> mapped ) {
6174
+ if (!cpu_buffer -> user_mapped ) {
6171
6175
mutex_unlock (& cpu_buffer -> mapping_lock );
6172
6176
return ERR_PTR (- ENODEV );
6173
6177
}
@@ -6191,19 +6195,26 @@ static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
6191
6195
6192
6196
lockdep_assert_held (& cpu_buffer -> mapping_lock );
6193
6197
6198
+ /* mapped is always greater or equal to user_mapped */
6199
+ if (WARN_ON (cpu_buffer -> mapped < cpu_buffer -> user_mapped ))
6200
+ return - EINVAL ;
6201
+
6194
6202
if (inc && cpu_buffer -> mapped == UINT_MAX )
6195
6203
return - EBUSY ;
6196
6204
6197
- if (WARN_ON (!inc && cpu_buffer -> mapped == 0 ))
6205
+ if (WARN_ON (!inc && cpu_buffer -> user_mapped == 0 ))
6198
6206
return - EINVAL ;
6199
6207
6200
6208
mutex_lock (& cpu_buffer -> buffer -> mutex );
6201
6209
raw_spin_lock_irqsave (& cpu_buffer -> reader_lock , flags );
6202
6210
6203
- if (inc )
6211
+ if (inc ) {
6212
+ cpu_buffer -> user_mapped ++ ;
6204
6213
cpu_buffer -> mapped ++ ;
6205
- else
6214
+ } else {
6215
+ cpu_buffer -> user_mapped -- ;
6206
6216
cpu_buffer -> mapped -- ;
6217
+ }
6207
6218
6208
6219
raw_spin_unlock_irqrestore (& cpu_buffer -> reader_lock , flags );
6209
6220
mutex_unlock (& cpu_buffer -> buffer -> mutex );
@@ -6328,7 +6339,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
6328
6339
6329
6340
mutex_lock (& cpu_buffer -> mapping_lock );
6330
6341
6331
- if (cpu_buffer -> mapped ) {
6342
+ if (cpu_buffer -> user_mapped ) {
6332
6343
err = __rb_map_vma (cpu_buffer , vma );
6333
6344
if (!err )
6334
6345
err = __rb_inc_dec_mapped (cpu_buffer , true);
@@ -6359,12 +6370,15 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
6359
6370
*/
6360
6371
raw_spin_lock_irqsave (& cpu_buffer -> reader_lock , flags );
6361
6372
rb_setup_ids_meta_page (cpu_buffer , subbuf_ids );
6373
+
6362
6374
raw_spin_unlock_irqrestore (& cpu_buffer -> reader_lock , flags );
6363
6375
6364
6376
err = __rb_map_vma (cpu_buffer , vma );
6365
6377
if (!err ) {
6366
6378
raw_spin_lock_irqsave (& cpu_buffer -> reader_lock , flags );
6367
- cpu_buffer -> mapped = 1 ;
6379
+ /* This is the first time it is mapped by user */
6380
+ cpu_buffer -> mapped ++ ;
6381
+ cpu_buffer -> user_mapped = 1 ;
6368
6382
raw_spin_unlock_irqrestore (& cpu_buffer -> reader_lock , flags );
6369
6383
} else {
6370
6384
kfree (cpu_buffer -> subbuf_ids );
@@ -6392,18 +6406,21 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
6392
6406
6393
6407
mutex_lock (& cpu_buffer -> mapping_lock );
6394
6408
6395
- if (!cpu_buffer -> mapped ) {
6409
+ if (!cpu_buffer -> user_mapped ) {
6396
6410
err = - ENODEV ;
6397
6411
goto out ;
6398
- } else if (cpu_buffer -> mapped > 1 ) {
6412
+ } else if (cpu_buffer -> user_mapped > 1 ) {
6399
6413
__rb_inc_dec_mapped (cpu_buffer , false);
6400
6414
goto out ;
6401
6415
}
6402
6416
6403
6417
mutex_lock (& buffer -> mutex );
6404
6418
raw_spin_lock_irqsave (& cpu_buffer -> reader_lock , flags );
6405
6419
6406
- cpu_buffer -> mapped = 0 ;
6420
+ /* This is the last user space mapping */
6421
+ if (!WARN_ON_ONCE (cpu_buffer -> mapped < cpu_buffer -> user_mapped ))
6422
+ cpu_buffer -> mapped -- ;
6423
+ cpu_buffer -> user_mapped = 0 ;
6407
6424
6408
6425
raw_spin_unlock_irqrestore (& cpu_buffer -> reader_lock , flags );
6409
6426
0 commit comments