18
18
19
19
#define STACK_TRACE_ENTRIES 500
20
20
21
- #ifdef CC_USING_FENTRY
22
- # define fentry 1
23
- #else
24
- # define fentry 0
25
- #endif
26
-
27
21
static unsigned long stack_dump_trace [STACK_TRACE_ENTRIES + 1 ] =
28
22
{ [0 ... (STACK_TRACE_ENTRIES )] = ULONG_MAX };
29
23
static unsigned stack_dump_index [STACK_TRACE_ENTRIES ];
@@ -35,7 +29,7 @@ static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
35
29
*/
36
30
static struct stack_trace max_stack_trace = {
37
31
.max_entries = STACK_TRACE_ENTRIES - 1 ,
38
- .entries = & stack_dump_trace [1 ],
32
+ .entries = & stack_dump_trace [0 ],
39
33
};
40
34
41
35
static unsigned long max_stack_size ;
@@ -55,7 +49,7 @@ static inline void print_max_stack(void)
55
49
56
50
pr_emerg (" Depth Size Location (%d entries)\n"
57
51
" ----- ---- --------\n" ,
58
- max_stack_trace .nr_entries - 1 );
52
+ max_stack_trace .nr_entries );
59
53
60
54
for (i = 0 ; i < max_stack_trace .nr_entries ; i ++ ) {
61
55
if (stack_dump_trace [i ] == ULONG_MAX )
@@ -77,7 +71,7 @@ check_stack(unsigned long ip, unsigned long *stack)
77
71
unsigned long this_size , flags ; unsigned long * p , * top , * start ;
78
72
static int tracer_frame ;
79
73
int frame_size = ACCESS_ONCE (tracer_frame );
80
- int i ;
74
+ int i , x ;
81
75
82
76
this_size = ((unsigned long )stack ) & (THREAD_SIZE - 1 );
83
77
this_size = THREAD_SIZE - this_size ;
@@ -105,26 +99,20 @@ check_stack(unsigned long ip, unsigned long *stack)
105
99
max_stack_size = this_size ;
106
100
107
101
max_stack_trace .nr_entries = 0 ;
108
-
109
- if (using_ftrace_ops_list_func ())
110
- max_stack_trace .skip = 4 ;
111
- else
112
- max_stack_trace .skip = 3 ;
102
+ max_stack_trace .skip = 3 ;
113
103
114
104
save_stack_trace (& max_stack_trace );
115
105
116
- /*
117
- * Add the passed in ip from the function tracer.
118
- * Searching for this on the stack will skip over
119
- * most of the overhead from the stack tracer itself.
120
- */
121
- stack_dump_trace [0 ] = ip ;
122
- max_stack_trace .nr_entries ++ ;
106
+ /* Skip over the overhead of the stack tracer itself */
107
+ for (i = 0 ; i < max_stack_trace .nr_entries ; i ++ ) {
108
+ if (stack_dump_trace [i ] == ip )
109
+ break ;
110
+ }
123
111
124
112
/*
125
113
* Now find where in the stack these are.
126
114
*/
127
- i = 0 ;
115
+ x = 0 ;
128
116
start = stack ;
129
117
top = (unsigned long * )
130
118
(((unsigned long )start & ~(THREAD_SIZE - 1 )) + THREAD_SIZE );
@@ -139,12 +127,15 @@ check_stack(unsigned long ip, unsigned long *stack)
139
127
while (i < max_stack_trace .nr_entries ) {
140
128
int found = 0 ;
141
129
142
- stack_dump_index [i ] = this_size ;
130
+ stack_dump_index [x ] = this_size ;
143
131
p = start ;
144
132
145
133
for (; p < top && i < max_stack_trace .nr_entries ; p ++ ) {
134
+ if (stack_dump_trace [i ] == ULONG_MAX )
135
+ break ;
146
136
if (* p == stack_dump_trace [i ]) {
147
- this_size = stack_dump_index [i ++ ] =
137
+ stack_dump_trace [x ] = stack_dump_trace [i ++ ];
138
+ this_size = stack_dump_index [x ++ ] =
148
139
(top - p ) * sizeof (unsigned long );
149
140
found = 1 ;
150
141
/* Start the search from here */
@@ -156,7 +147,7 @@ check_stack(unsigned long ip, unsigned long *stack)
156
147
* out what that is, then figure it out
157
148
* now.
158
149
*/
159
- if (unlikely (!tracer_frame ) && i == 1 ) {
150
+ if (unlikely (!tracer_frame )) {
160
151
tracer_frame = (p - stack ) *
161
152
sizeof (unsigned long );
162
153
max_stack_size -= tracer_frame ;
@@ -168,6 +159,10 @@ check_stack(unsigned long ip, unsigned long *stack)
168
159
i ++ ;
169
160
}
170
161
162
+ max_stack_trace .nr_entries = x ;
163
+ for (; x < i ; x ++ )
164
+ stack_dump_trace [x ] = ULONG_MAX ;
165
+
171
166
if (task_stack_end_corrupted (current )) {
172
167
print_max_stack ();
173
168
BUG ();
@@ -192,24 +187,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
192
187
if (per_cpu (trace_active , cpu )++ != 0 )
193
188
goto out ;
194
189
195
- /*
196
- * When fentry is used, the traced function does not get
197
- * its stack frame set up, and we lose the parent.
198
- * The ip is pretty useless because the function tracer
199
- * was called before that function set up its stack frame.
200
- * In this case, we use the parent ip.
201
- *
202
- * By adding the return address of either the parent ip
203
- * or the current ip we can disregard most of the stack usage
204
- * caused by the stack tracer itself.
205
- *
206
- * The function tracer always reports the address of where the
207
- * mcount call was, but the stack will hold the return address.
208
- */
209
- if (fentry )
210
- ip = parent_ip ;
211
- else
212
- ip += MCOUNT_INSN_SIZE ;
190
+ ip += MCOUNT_INSN_SIZE ;
213
191
214
192
check_stack (ip , & stack );
215
193
@@ -284,7 +262,7 @@ __next(struct seq_file *m, loff_t *pos)
284
262
{
285
263
long n = * pos - 1 ;
286
264
287
- if (n >= max_stack_trace .nr_entries || stack_dump_trace [n ] == ULONG_MAX )
265
+ if (n > max_stack_trace .nr_entries || stack_dump_trace [n ] == ULONG_MAX )
288
266
return NULL ;
289
267
290
268
m -> private = (void * )n ;
@@ -354,7 +332,7 @@ static int t_show(struct seq_file *m, void *v)
354
332
seq_printf (m , " Depth Size Location"
355
333
" (%d entries)\n"
356
334
" ----- ---- --------\n" ,
357
- max_stack_trace .nr_entries - 1 );
335
+ max_stack_trace .nr_entries );
358
336
359
337
if (!stack_tracer_enabled && !max_stack_size )
360
338
print_disabled (m );
0 commit comments