@@ -111,9 +111,16 @@ static struct thread_stack *thread_stack__new(struct thread *thread,
111
111
ts -> kernel_start = 1ULL << 63 ;
112
112
ts -> crp = crp ;
113
113
114
+ thread -> ts = ts ;
115
+
114
116
return ts ;
115
117
}
116
118
119
+ static inline struct thread_stack * thread__stack (struct thread * thread )
120
+ {
121
+ return thread ? thread -> ts : NULL ;
122
+ }
123
+
117
124
static int thread_stack__push (struct thread_stack * ts , u64 ret_addr ,
118
125
bool trace_end )
119
126
{
@@ -226,40 +233,44 @@ static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
226
233
227
234
int thread_stack__flush (struct thread * thread )
228
235
{
229
- if (thread -> ts )
230
- return __thread_stack__flush (thread , thread -> ts );
236
+ struct thread_stack * ts = thread -> ts ;
237
+
238
+ if (ts )
239
+ return __thread_stack__flush (thread , ts );
231
240
232
241
return 0 ;
233
242
}
234
243
235
244
int thread_stack__event (struct thread * thread , u32 flags , u64 from_ip ,
236
245
u64 to_ip , u16 insn_len , u64 trace_nr )
237
246
{
247
+ struct thread_stack * ts = thread__stack (thread );
248
+
238
249
if (!thread )
239
250
return - EINVAL ;
240
251
241
- if (!thread -> ts ) {
242
- thread -> ts = thread_stack__new (thread , NULL );
243
- if (!thread -> ts ) {
252
+ if (!ts ) {
253
+ ts = thread_stack__new (thread , NULL );
254
+ if (!ts ) {
244
255
pr_warning ("Out of memory: no thread stack\n" );
245
256
return - ENOMEM ;
246
257
}
247
- thread -> ts -> trace_nr = trace_nr ;
258
+ ts -> trace_nr = trace_nr ;
248
259
}
249
260
250
261
/*
251
262
* When the trace is discontinuous, the trace_nr changes. In that case
252
263
* the stack might be completely invalid. Better to report nothing than
253
264
* to report something misleading, so flush the stack.
254
265
*/
255
- if (trace_nr != thread -> ts -> trace_nr ) {
256
- if (thread -> ts -> trace_nr )
257
- __thread_stack__flush (thread , thread -> ts );
258
- thread -> ts -> trace_nr = trace_nr ;
266
+ if (trace_nr != ts -> trace_nr ) {
267
+ if (ts -> trace_nr )
268
+ __thread_stack__flush (thread , ts );
269
+ ts -> trace_nr = trace_nr ;
259
270
}
260
271
261
272
/* Stop here if thread_stack__process() is in use */
262
- if (thread -> ts -> crp )
273
+ if (ts -> crp )
263
274
return 0 ;
264
275
265
276
if (flags & PERF_IP_FLAG_CALL ) {
@@ -270,7 +281,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
270
281
ret_addr = from_ip + insn_len ;
271
282
if (ret_addr == to_ip )
272
283
return 0 ; /* Zero-length calls are excluded */
273
- return thread_stack__push (thread -> ts , ret_addr ,
284
+ return thread_stack__push (ts , ret_addr ,
274
285
flags & PERF_IP_FLAG_TRACE_END );
275
286
} else if (flags & PERF_IP_FLAG_TRACE_BEGIN ) {
276
287
/*
@@ -280,32 +291,36 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
280
291
* address, so try to pop that. Also, do not expect a call made
281
292
* when the trace ended, to return, so pop that.
282
293
*/
283
- thread_stack__pop (thread -> ts , to_ip );
284
- thread_stack__pop_trace_end (thread -> ts );
294
+ thread_stack__pop (ts , to_ip );
295
+ thread_stack__pop_trace_end (ts );
285
296
} else if ((flags & PERF_IP_FLAG_RETURN ) && from_ip ) {
286
- thread_stack__pop (thread -> ts , to_ip );
297
+ thread_stack__pop (ts , to_ip );
287
298
}
288
299
289
300
return 0 ;
290
301
}
291
302
292
303
void thread_stack__set_trace_nr (struct thread * thread , u64 trace_nr )
293
304
{
294
- if (!thread || !thread -> ts )
305
+ struct thread_stack * ts = thread__stack (thread );
306
+
307
+ if (!ts )
295
308
return ;
296
309
297
- if (trace_nr != thread -> ts -> trace_nr ) {
298
- if (thread -> ts -> trace_nr )
299
- __thread_stack__flush (thread , thread -> ts );
300
- thread -> ts -> trace_nr = trace_nr ;
310
+ if (trace_nr != ts -> trace_nr ) {
311
+ if (ts -> trace_nr )
312
+ __thread_stack__flush (thread , ts );
313
+ ts -> trace_nr = trace_nr ;
301
314
}
302
315
}
303
316
304
317
void thread_stack__free (struct thread * thread )
305
318
{
306
- if (thread -> ts ) {
307
- __thread_stack__flush (thread , thread -> ts );
308
- zfree (& thread -> ts -> stack );
319
+ struct thread_stack * ts = thread -> ts ;
320
+
321
+ if (ts ) {
322
+ __thread_stack__flush (thread , ts );
323
+ zfree (& ts -> stack );
309
324
zfree (& thread -> ts );
310
325
}
311
326
}
@@ -318,6 +333,7 @@ static inline u64 callchain_context(u64 ip, u64 kernel_start)
318
333
void thread_stack__sample (struct thread * thread , struct ip_callchain * chain ,
319
334
size_t sz , u64 ip , u64 kernel_start )
320
335
{
336
+ struct thread_stack * ts = thread__stack (thread );
321
337
u64 context = callchain_context (ip , kernel_start );
322
338
u64 last_context ;
323
339
size_t i , j ;
@@ -330,15 +346,15 @@ void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
330
346
chain -> ips [0 ] = context ;
331
347
chain -> ips [1 ] = ip ;
332
348
333
- if (!thread || ! thread -> ts ) {
349
+ if (!ts ) {
334
350
chain -> nr = 2 ;
335
351
return ;
336
352
}
337
353
338
354
last_context = context ;
339
355
340
- for (i = 2 , j = 1 ; i < sz && j <= thread -> ts -> cnt ; i ++ , j ++ ) {
341
- ip = thread -> ts -> stack [thread -> ts -> cnt - j ].ret_addr ;
356
+ for (i = 2 , j = 1 ; i < sz && j <= ts -> cnt ; i ++ , j ++ ) {
357
+ ip = ts -> stack [ts -> cnt - j ].ret_addr ;
342
358
context = callchain_context (ip , kernel_start );
343
359
if (context != last_context ) {
344
360
if (i >= sz - 1 )
@@ -590,7 +606,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
590
606
struct addr_location * to_al , u64 ref ,
591
607
struct call_return_processor * crp )
592
608
{
593
- struct thread_stack * ts = thread -> ts ;
609
+ struct thread_stack * ts = thread__stack ( thread ) ;
594
610
int err = 0 ;
595
611
596
612
if (ts && !ts -> crp ) {
@@ -600,10 +616,9 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
600
616
}
601
617
602
618
if (!ts ) {
603
- thread -> ts = thread_stack__new (thread , crp );
604
- if (!thread -> ts )
619
+ ts = thread_stack__new (thread , crp );
620
+ if (!ts )
605
621
return - ENOMEM ;
606
- ts = thread -> ts ;
607
622
ts -> comm = comm ;
608
623
}
609
624
@@ -668,7 +683,9 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
668
683
669
684
size_t thread_stack__depth (struct thread * thread )
670
685
{
671
- if (!thread -> ts )
686
+ struct thread_stack * ts = thread__stack (thread );
687
+
688
+ if (!ts )
672
689
return 0 ;
673
- return thread -> ts -> cnt ;
690
+ return ts -> cnt ;
674
691
}
0 commit comments