Skip to content

Commit 9cd2992

Browse files
committed
fgraph: Have set_graph_notrace only affect function_graph tracer
In order to make the function graph infrastructure more generic, there can not be code specific for the function_graph tracer in the generic code. This includes the set_graph_notrace logic, that stops all graph calls when a function in the set_graph_notrace is hit. By using the trace_recursion mask, we can use a bit in the current task_struct to implement the notrace code, and move the logic out of fgraph.c and into trace_functions_graph.c and keeps it affecting only the tracer and not all call graph callbacks. Acked-by: Namhyung Kim <[email protected]> Reviewed-by: Joel Fernandes (Google) <[email protected]> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
1 parent d864a3c commit 9cd2992

File tree

3 files changed

+29
-21
lines changed

3 files changed

+29
-21
lines changed

kernel/trace/fgraph.c

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -64,30 +64,9 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
6464
return -EBUSY;
6565
}
6666

67-
/*
68-
* The curr_ret_stack is an index to ftrace return stack of
69-
* current task. Its value should be in [0, FTRACE_RETFUNC_
70-
* DEPTH) when the function graph tracer is used. To support
71-
* filtering out specific functions, it makes the index
72-
* negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
73-
* so when it sees a negative index the ftrace will ignore
74-
* the record. And the index gets recovered when returning
75-
* from the filtered function by adding the FTRACE_NOTRACE_
76-
* DEPTH and then it'll continue to record functions normally.
77-
*
78-
* The curr_ret_stack is initialized to -1 and get increased
79-
* in this function. So it can be less than -1 only if it was
80-
* filtered out via ftrace_graph_notrace_addr() which can be
81-
* set from set_graph_notrace file in tracefs by user.
82-
*/
83-
if (current->curr_ret_stack < -1)
84-
return -EBUSY;
85-
8667
calltime = trace_clock_local();
8768

8869
index = ++current->curr_ret_stack;
89-
if (ftrace_graph_notrace_addr(func))
90-
current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
9170
barrier();
9271
current->ret_stack[index].ret = ret;
9372
current->ret_stack[index].func = func;

kernel/trace/trace.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -534,6 +534,13 @@ enum {
534534

535535
TRACE_GRAPH_DEPTH_START_BIT,
536536
TRACE_GRAPH_DEPTH_END_BIT,
537+
538+
/*
539+
* To implement set_graph_notrace, if this bit is set, we ignore
540+
* function graph tracing of called functions, until the return
541+
* function is called to clear it.
542+
*/
543+
TRACE_GRAPH_NOTRACE_BIT,
537544
};
538545

539546
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)

kernel/trace/trace_functions_graph.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
188188
int cpu;
189189
int pc;
190190

191+
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
192+
return 0;
193+
194+
if (ftrace_graph_notrace_addr(trace->func)) {
195+
trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
196+
/*
197+
* Need to return 1 to have the return called
198+
* that will clear the NOTRACE bit.
199+
*/
200+
return 1;
201+
}
202+
191203
if (!ftrace_trace_task(tr))
192204
return 0;
193205

@@ -290,6 +302,11 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
290302

291303
ftrace_graph_addr_finish(trace);
292304

305+
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
306+
trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
307+
return;
308+
}
309+
293310
local_irq_save(flags);
294311
cpu = raw_smp_processor_id();
295312
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -315,6 +332,11 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
315332
{
316333
ftrace_graph_addr_finish(trace);
317334

335+
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
336+
trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
337+
return;
338+
}
339+
318340
if (tracing_thresh &&
319341
(trace->rettime - trace->calltime < tracing_thresh))
320342
return;

0 commit comments

Comments
 (0)