|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Infrastructure to took into function calls and returns. |
| 4 | + * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]> |
| 5 | + * Mostly borrowed from function tracer which |
| 6 | + * is Copyright (c) Steven Rostedt <[email protected]> |
| 7 | + * |
| 8 | + * Highly modified by Steven Rostedt (VMware). |
| 9 | + */ |
| 10 | +#include <linux/ftrace.h> |
| 11 | + |
| 12 | +#include "trace.h" |
| 13 | + |
| 14 | +static bool kill_ftrace_graph; |
| 15 | + |
| 16 | +/** |
| 17 | + * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called |
| 18 | + * |
| 19 | + * ftrace_graph_stop() is called when a severe error is detected in |
| 20 | + * the function graph tracing. This function is called by the critical |
| 21 | + * paths of function graph to keep those paths from doing any more harm. |
| 22 | + */ |
| 23 | +bool ftrace_graph_is_dead(void) |
| 24 | +{ |
| 25 | + return kill_ftrace_graph; |
| 26 | +} |
| 27 | + |
| 28 | +/** |
| 29 | + * ftrace_graph_stop - set to permanently disable function graph tracincg |
| 30 | + * |
| 31 | + * In case of an error int function graph tracing, this is called |
| 32 | + * to try to keep function graph tracing from causing any more harm. |
| 33 | + * Usually this is pretty severe and this is called to try to at least |
| 34 | + * get a warning out to the user. |
| 35 | + */ |
| 36 | +void ftrace_graph_stop(void) |
| 37 | +{ |
| 38 | + kill_ftrace_graph = true; |
| 39 | +} |
| 40 | + |
| 41 | +/* Add a function return address to the trace stack on thread info.*/ |
| 42 | +static int |
| 43 | +ftrace_push_return_trace(unsigned long ret, unsigned long func, |
| 44 | + unsigned long frame_pointer, unsigned long *retp) |
| 45 | +{ |
| 46 | + unsigned long long calltime; |
| 47 | + int index; |
| 48 | + |
| 49 | + if (unlikely(ftrace_graph_is_dead())) |
| 50 | + return -EBUSY; |
| 51 | + |
| 52 | + if (!current->ret_stack) |
| 53 | + return -EBUSY; |
| 54 | + |
| 55 | + /* |
| 56 | + * We must make sure the ret_stack is tested before we read |
| 57 | + * anything else. |
| 58 | + */ |
| 59 | + smp_rmb(); |
| 60 | + |
| 61 | + /* The return trace stack is full */ |
| 62 | + if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
| 63 | + atomic_inc(¤t->trace_overrun); |
| 64 | + return -EBUSY; |
| 65 | + } |
| 66 | + |
| 67 | + /* |
| 68 | + * The curr_ret_stack is an index to ftrace return stack of |
| 69 | + * current task. Its value should be in [0, FTRACE_RETFUNC_ |
| 70 | + * DEPTH) when the function graph tracer is used. To support |
| 71 | + * filtering out specific functions, it makes the index |
| 72 | + * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) |
| 73 | + * so when it sees a negative index the ftrace will ignore |
| 74 | + * the record. And the index gets recovered when returning |
| 75 | + * from the filtered function by adding the FTRACE_NOTRACE_ |
| 76 | + * DEPTH and then it'll continue to record functions normally. |
| 77 | + * |
| 78 | + * The curr_ret_stack is initialized to -1 and get increased |
| 79 | + * in this function. So it can be less than -1 only if it was |
| 80 | + * filtered out via ftrace_graph_notrace_addr() which can be |
| 81 | + * set from set_graph_notrace file in tracefs by user. |
| 82 | + */ |
| 83 | + if (current->curr_ret_stack < -1) |
| 84 | + return -EBUSY; |
| 85 | + |
| 86 | + calltime = trace_clock_local(); |
| 87 | + |
| 88 | + index = ++current->curr_ret_stack; |
| 89 | + if (ftrace_graph_notrace_addr(func)) |
| 90 | + current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH; |
| 91 | + barrier(); |
| 92 | + current->ret_stack[index].ret = ret; |
| 93 | + current->ret_stack[index].func = func; |
| 94 | + current->ret_stack[index].calltime = calltime; |
| 95 | +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
| 96 | + current->ret_stack[index].fp = frame_pointer; |
| 97 | +#endif |
| 98 | +#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR |
| 99 | + current->ret_stack[index].retp = retp; |
| 100 | +#endif |
| 101 | + return 0; |
| 102 | +} |
| 103 | + |
| 104 | +int function_graph_enter(unsigned long ret, unsigned long func, |
| 105 | + unsigned long frame_pointer, unsigned long *retp) |
| 106 | +{ |
| 107 | + struct ftrace_graph_ent trace; |
| 108 | + |
| 109 | + trace.func = func; |
| 110 | + trace.depth = ++current->curr_ret_depth; |
| 111 | + |
| 112 | + if (ftrace_push_return_trace(ret, func, frame_pointer, retp)) |
| 113 | + goto out; |
| 114 | + |
| 115 | + /* Only trace if the calling function expects to */ |
| 116 | + if (!ftrace_graph_entry(&trace)) |
| 117 | + goto out_ret; |
| 118 | + |
| 119 | + return 0; |
| 120 | + out_ret: |
| 121 | + current->curr_ret_stack--; |
| 122 | + out: |
| 123 | + current->curr_ret_depth--; |
| 124 | + return -EBUSY; |
| 125 | +} |
| 126 | + |
| 127 | +/* Retrieve a function return address to the trace stack on thread info.*/ |
| 128 | +static void |
| 129 | +ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
| 130 | + unsigned long frame_pointer) |
| 131 | +{ |
| 132 | + int index; |
| 133 | + |
| 134 | + index = current->curr_ret_stack; |
| 135 | + |
| 136 | + /* |
| 137 | + * A negative index here means that it's just returned from a |
| 138 | + * notrace'd function. Recover index to get an original |
| 139 | + * return address. See ftrace_push_return_trace(). |
| 140 | + * |
| 141 | + * TODO: Need to check whether the stack gets corrupted. |
| 142 | + */ |
| 143 | + if (index < 0) |
| 144 | + index += FTRACE_NOTRACE_DEPTH; |
| 145 | + |
| 146 | + if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { |
| 147 | + ftrace_graph_stop(); |
| 148 | + WARN_ON(1); |
| 149 | + /* Might as well panic, otherwise we have no where to go */ |
| 150 | + *ret = (unsigned long)panic; |
| 151 | + return; |
| 152 | + } |
| 153 | + |
| 154 | +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
| 155 | + /* |
| 156 | + * The arch may choose to record the frame pointer used |
| 157 | + * and check it here to make sure that it is what we expect it |
| 158 | + * to be. If gcc does not set the place holder of the return |
| 159 | + * address in the frame pointer, and does a copy instead, then |
| 160 | + * the function graph trace will fail. This test detects this |
| 161 | + * case. |
| 162 | + * |
| 163 | + * Currently, x86_32 with optimize for size (-Os) makes the latest |
| 164 | + * gcc do the above. |
| 165 | + * |
| 166 | + * Note, -mfentry does not use frame pointers, and this test |
| 167 | + * is not needed if CC_USING_FENTRY is set. |
| 168 | + */ |
| 169 | + if (unlikely(current->ret_stack[index].fp != frame_pointer)) { |
| 170 | + ftrace_graph_stop(); |
| 171 | + WARN(1, "Bad frame pointer: expected %lx, received %lx\n" |
| 172 | + " from func %ps return to %lx\n", |
| 173 | + current->ret_stack[index].fp, |
| 174 | + frame_pointer, |
| 175 | + (void *)current->ret_stack[index].func, |
| 176 | + current->ret_stack[index].ret); |
| 177 | + *ret = (unsigned long)panic; |
| 178 | + return; |
| 179 | + } |
| 180 | +#endif |
| 181 | + |
| 182 | + *ret = current->ret_stack[index].ret; |
| 183 | + trace->func = current->ret_stack[index].func; |
| 184 | + trace->calltime = current->ret_stack[index].calltime; |
| 185 | + trace->overrun = atomic_read(¤t->trace_overrun); |
| 186 | + trace->depth = current->curr_ret_depth--; |
| 187 | + /* |
| 188 | + * We still want to trace interrupts coming in if |
| 189 | + * max_depth is set to 1. Make sure the decrement is |
| 190 | + * seen before ftrace_graph_return. |
| 191 | + */ |
| 192 | + barrier(); |
| 193 | +} |
| 194 | + |
| 195 | +/* |
| 196 | + * Send the trace to the ring-buffer. |
| 197 | + * @return the original return address. |
| 198 | + */ |
| 199 | +unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
| 200 | +{ |
| 201 | + struct ftrace_graph_ret trace; |
| 202 | + unsigned long ret; |
| 203 | + |
| 204 | + ftrace_pop_return_trace(&trace, &ret, frame_pointer); |
| 205 | + trace.rettime = trace_clock_local(); |
| 206 | + ftrace_graph_return(&trace); |
| 207 | + /* |
| 208 | + * The ftrace_graph_return() may still access the current |
| 209 | + * ret_stack structure, we need to make sure the update of |
| 210 | + * curr_ret_stack is after that. |
| 211 | + */ |
| 212 | + barrier(); |
| 213 | + current->curr_ret_stack--; |
| 214 | + /* |
| 215 | + * The curr_ret_stack can be less than -1 only if it was |
| 216 | + * filtered out and it's about to return from the function. |
| 217 | + * Recover the index and continue to trace normal functions. |
| 218 | + */ |
| 219 | + if (current->curr_ret_stack < -1) { |
| 220 | + current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; |
| 221 | + return ret; |
| 222 | + } |
| 223 | + |
| 224 | + if (unlikely(!ret)) { |
| 225 | + ftrace_graph_stop(); |
| 226 | + WARN_ON(1); |
| 227 | + /* Might as well panic. What else to do? */ |
| 228 | + ret = (unsigned long)panic; |
| 229 | + } |
| 230 | + |
| 231 | + return ret; |
| 232 | +} |
0 commit comments