@@ -98,6 +98,13 @@ struct ftrace_pid {
98
98
struct pid * pid ;
99
99
};
100
100
101
+ static bool ftrace_pids_enabled (void )
102
+ {
103
+ return !list_empty (& ftrace_pids );
104
+ }
105
+
106
+ static void ftrace_update_trampoline (struct ftrace_ops * ops );
107
+
101
108
/*
102
109
* ftrace_disabled is set when an anomaly is discovered.
103
110
* ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
109
116
static struct ftrace_ops * ftrace_control_list __read_mostly = & ftrace_list_end ;
110
117
static struct ftrace_ops * ftrace_ops_list __read_mostly = & ftrace_list_end ;
111
118
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub ;
112
- ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub ;
113
119
static struct ftrace_ops global_ops ;
114
120
static struct ftrace_ops control_ops ;
115
121
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
183
189
if (!test_tsk_trace_trace (current ))
184
190
return ;
185
191
186
- ftrace_pid_function (ip , parent_ip , op , regs );
187
- }
188
-
189
- static void set_ftrace_pid_function (ftrace_func_t func )
190
- {
191
- /* do not set ftrace_pid_function to itself! */
192
- if (func != ftrace_pid_func )
193
- ftrace_pid_function = func ;
192
+ op -> saved_func (ip , parent_ip , op , regs );
194
193
}
195
194
196
195
/**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
202
201
void clear_ftrace_function (void )
203
202
{
204
203
ftrace_trace_function = ftrace_stub ;
205
- ftrace_pid_function = ftrace_stub ;
206
204
}
207
205
208
206
static void control_ops_disable_all (struct ftrace_ops * ops )
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
436
434
} else
437
435
add_ftrace_ops (& ftrace_ops_list , ops );
438
436
437
+ /* Always save the function, and reset at unregistering */
438
+ ops -> saved_func = ops -> func ;
439
+
440
+ if (ops -> flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled ())
441
+ ops -> func = ftrace_pid_func ;
442
+
439
443
ftrace_update_trampoline (ops );
440
444
441
445
if (ftrace_enabled )
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
463
467
if (ftrace_enabled )
464
468
update_ftrace_function ();
465
469
470
+ ops -> func = ops -> saved_func ;
471
+
466
472
return 0 ;
467
473
}
468
474
469
475
static void ftrace_update_pid_func (void )
470
476
{
477
+ bool enabled = ftrace_pids_enabled ();
478
+ struct ftrace_ops * op ;
479
+
471
480
/* Only do something if we are tracing something */
472
481
if (ftrace_trace_function == ftrace_stub )
473
482
return ;
474
483
484
+ do_for_each_ftrace_op (op , ftrace_ops_list ) {
485
+ if (op -> flags & FTRACE_OPS_FL_PID ) {
486
+ op -> func = enabled ? ftrace_pid_func :
487
+ op -> saved_func ;
488
+ ftrace_update_trampoline (op );
489
+ }
490
+ } while_for_each_ftrace_op (op );
491
+
475
492
update_ftrace_function ();
476
493
}
477
494
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
1133
1150
.local_hash .filter_hash = EMPTY_HASH ,
1134
1151
INIT_OPS_HASH (global_ops )
1135
1152
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
1136
- FTRACE_OPS_FL_INITIALIZED ,
1153
+ FTRACE_OPS_FL_INITIALIZED |
1154
+ FTRACE_OPS_FL_PID ,
1137
1155
};
1138
1156
1139
1157
/*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
5023
5041
5024
5042
static struct ftrace_ops global_ops = {
5025
5043
.func = ftrace_stub ,
5026
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED ,
5044
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5045
+ FTRACE_OPS_FL_INITIALIZED |
5046
+ FTRACE_OPS_FL_PID ,
5027
5047
};
5028
5048
5029
5049
static int __init ftrace_nodyn_init (void )
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5080
5100
if (WARN_ON (tr -> ops -> func != ftrace_stub ))
5081
5101
printk ("ftrace ops had %pS for function\n" ,
5082
5102
tr -> ops -> func );
5083
- /* Only the top level instance does pid tracing */
5084
- if (!list_empty (& ftrace_pids )) {
5085
- set_ftrace_pid_function (func );
5086
- func = ftrace_pid_func ;
5087
- }
5088
5103
}
5089
5104
tr -> ops -> func = func ;
5090
5105
tr -> ops -> private = tr ;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
5371
5386
{
5372
5387
mutex_lock (& ftrace_lock );
5373
5388
5374
- if (list_empty ( & ftrace_pids ) && (!* pos ))
5389
+ if (! ftrace_pids_enabled ( ) && (!* pos ))
5375
5390
return (void * ) 1 ;
5376
5391
5377
5392
return seq_list_start (& ftrace_pids , * pos );
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
5610
5625
.func = ftrace_stub ,
5611
5626
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
5612
5627
FTRACE_OPS_FL_INITIALIZED |
5628
+ FTRACE_OPS_FL_PID |
5613
5629
FTRACE_OPS_FL_STUB ,
5614
5630
#ifdef FTRACE_GRAPH_TRAMP_ADDR
5615
5631
.trampoline = FTRACE_GRAPH_TRAMP_ADDR ,
0 commit comments