19
19
#include <linux/seq_file.h>
20
20
#include <linux/notifier.h>
21
21
#include <linux/irqflags.h>
22
+ #include <linux/irq_work.h>
22
23
#include <linux/debugfs.h>
23
24
#include <linux/pagemap.h>
24
25
#include <linux/hardirq.h>
@@ -84,6 +85,14 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
84
85
*/
85
86
static DEFINE_PER_CPU (bool , trace_cmdline_save );
86
87
88
+ /*
89
+ * When a reader is waiting for data, then this variable is
90
+ * set to true.
91
+ */
92
+ static bool trace_wakeup_needed ;
93
+
94
+ static struct irq_work trace_work_wakeup ;
95
+
87
96
/*
88
97
* Kill all tracing for good (never come back).
89
98
* It is initialized to 1 but will turn to zero if the initialization
@@ -329,12 +338,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
329
338
static int trace_stop_count ;
330
339
static DEFINE_RAW_SPINLOCK (tracing_start_lock );
331
340
332
- static void wakeup_work_handler (struct work_struct * work )
341
+ /**
342
+ * trace_wake_up - wake up tasks waiting for trace input
343
+ *
344
+ * Schedules a delayed work to wake up any task that is blocked on the
345
+ * trace_wait queue. These is used with trace_poll for tasks polling the
346
+ * trace.
347
+ */
348
+ static void trace_wake_up (struct irq_work * work )
333
349
{
334
- wake_up (& trace_wait );
335
- }
350
+ wake_up_all (& trace_wait );
336
351
337
- static DECLARE_DELAYED_WORK ( wakeup_work , wakeup_work_handler ) ;
352
+ }
338
353
339
354
/**
340
355
* tracing_on - enable tracing buffers
@@ -389,22 +404,6 @@ int tracing_is_on(void)
389
404
}
390
405
EXPORT_SYMBOL_GPL (tracing_is_on );
391
406
392
- /**
393
- * trace_wake_up - wake up tasks waiting for trace input
394
- *
395
- * Schedules a delayed work to wake up any task that is blocked on the
396
- * trace_wait queue. These is used with trace_poll for tasks polling the
397
- * trace.
398
- */
399
- void trace_wake_up (void )
400
- {
401
- const unsigned long delay = msecs_to_jiffies (2 );
402
-
403
- if (trace_flags & TRACE_ITER_BLOCK )
404
- return ;
405
- schedule_delayed_work (& wakeup_work , delay );
406
- }
407
-
408
407
static int __init set_buf_size (char * str )
409
408
{
410
409
unsigned long buf_size ;
@@ -753,6 +752,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
753
752
}
754
753
#endif /* CONFIG_TRACER_MAX_TRACE */
755
754
755
+ static void default_wait_pipe (struct trace_iterator * iter )
756
+ {
757
+ DEFINE_WAIT (wait );
758
+
759
+ prepare_to_wait (& trace_wait , & wait , TASK_INTERRUPTIBLE );
760
+
761
+ /*
762
+ * The events can happen in critical sections where
763
+ * checking a work queue can cause deadlocks.
764
+ * After adding a task to the queue, this flag is set
765
+ * only to notify events to try to wake up the queue
766
+ * using irq_work.
767
+ *
768
+ * We don't clear it even if the buffer is no longer
769
+ * empty. The flag only causes the next event to run
770
+ * irq_work to do the work queue wake up. The worse
771
+ * that can happen if we race with !trace_empty() is that
772
+ * an event will cause an irq_work to try to wake up
773
+ * an empty queue.
774
+ *
775
+ * There's no reason to protect this flag either, as
776
+ * the work queue and irq_work logic will do the necessary
777
+ * synchronization for the wake ups. The only thing
778
+ * that is necessary is that the wake up happens after
779
+ * a task has been queued. It's OK for spurious wake ups.
780
+ */
781
+ trace_wakeup_needed = true;
782
+
783
+ if (trace_empty (iter ))
784
+ schedule ();
785
+
786
+ finish_wait (& trace_wait , & wait );
787
+ }
788
+
756
789
/**
757
790
* register_tracer - register a tracer with the ftrace system.
758
791
* @type - the plugin for the tracer
@@ -1156,30 +1189,32 @@ void
1156
1189
__buffer_unlock_commit (struct ring_buffer * buffer , struct ring_buffer_event * event )
1157
1190
{
1158
1191
__this_cpu_write (trace_cmdline_save , true);
1192
+ if (trace_wakeup_needed ) {
1193
+ trace_wakeup_needed = false;
1194
+ /* irq_work_queue() supplies it's own memory barriers */
1195
+ irq_work_queue (& trace_work_wakeup );
1196
+ }
1159
1197
ring_buffer_unlock_commit (buffer , event );
1160
1198
}
1161
1199
1162
1200
static inline void
1163
1201
__trace_buffer_unlock_commit (struct ring_buffer * buffer ,
1164
1202
struct ring_buffer_event * event ,
1165
- unsigned long flags , int pc ,
1166
- int wake )
1203
+ unsigned long flags , int pc )
1167
1204
{
1168
1205
__buffer_unlock_commit (buffer , event );
1169
1206
1170
1207
ftrace_trace_stack (buffer , flags , 6 , pc );
1171
1208
ftrace_trace_userstack (buffer , flags , pc );
1172
-
1173
- if (wake )
1174
- trace_wake_up ();
1175
1209
}
1176
1210
1177
1211
void trace_buffer_unlock_commit (struct ring_buffer * buffer ,
1178
1212
struct ring_buffer_event * event ,
1179
1213
unsigned long flags , int pc )
1180
1214
{
1181
- __trace_buffer_unlock_commit (buffer , event , flags , pc , 1 );
1215
+ __trace_buffer_unlock_commit (buffer , event , flags , pc );
1182
1216
}
1217
+ EXPORT_SYMBOL_GPL (trace_buffer_unlock_commit );
1183
1218
1184
1219
struct ring_buffer_event *
1185
1220
trace_current_buffer_lock_reserve (struct ring_buffer * * current_rb ,
@@ -1196,29 +1231,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1196
1231
struct ring_buffer_event * event ,
1197
1232
unsigned long flags , int pc )
1198
1233
{
1199
- __trace_buffer_unlock_commit (buffer , event , flags , pc , 1 );
1234
+ __trace_buffer_unlock_commit (buffer , event , flags , pc );
1200
1235
}
1201
1236
EXPORT_SYMBOL_GPL (trace_current_buffer_unlock_commit );
1202
1237
1203
- void trace_nowake_buffer_unlock_commit (struct ring_buffer * buffer ,
1204
- struct ring_buffer_event * event ,
1205
- unsigned long flags , int pc )
1206
- {
1207
- __trace_buffer_unlock_commit (buffer , event , flags , pc , 0 );
1208
- }
1209
- EXPORT_SYMBOL_GPL (trace_nowake_buffer_unlock_commit );
1210
-
1211
- void trace_nowake_buffer_unlock_commit_regs (struct ring_buffer * buffer ,
1212
- struct ring_buffer_event * event ,
1213
- unsigned long flags , int pc ,
1214
- struct pt_regs * regs )
1238
+ void trace_buffer_unlock_commit_regs (struct ring_buffer * buffer ,
1239
+ struct ring_buffer_event * event ,
1240
+ unsigned long flags , int pc ,
1241
+ struct pt_regs * regs )
1215
1242
{
1216
1243
__buffer_unlock_commit (buffer , event );
1217
1244
1218
1245
ftrace_trace_stack_regs (buffer , flags , 0 , pc , regs );
1219
1246
ftrace_trace_userstack (buffer , flags , pc );
1220
1247
}
1221
- EXPORT_SYMBOL_GPL (trace_nowake_buffer_unlock_commit_regs );
1248
+ EXPORT_SYMBOL_GPL (trace_buffer_unlock_commit_regs );
1222
1249
1223
1250
void trace_current_buffer_discard_commit (struct ring_buffer * buffer ,
1224
1251
struct ring_buffer_event * event )
@@ -3354,19 +3381,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3354
3381
}
3355
3382
}
3356
3383
3357
-
3358
- void default_wait_pipe (struct trace_iterator * iter )
3359
- {
3360
- DEFINE_WAIT (wait );
3361
-
3362
- prepare_to_wait (& trace_wait , & wait , TASK_INTERRUPTIBLE );
3363
-
3364
- if (trace_empty (iter ))
3365
- schedule ();
3366
-
3367
- finish_wait (& trace_wait , & wait );
3368
- }
3369
-
3370
3384
/*
3371
3385
* This is a make-shift waitqueue.
3372
3386
* A tracer might use this callback on some rare cases:
@@ -5107,6 +5121,7 @@ __init static int tracer_alloc_buffers(void)
5107
5121
#endif
5108
5122
5109
5123
trace_init_cmdlines ();
5124
+ init_irq_work (& trace_work_wakeup , trace_wake_up );
5110
5125
5111
5126
register_tracer (& nop_trace );
5112
5127
current_trace = & nop_trace ;
0 commit comments