Skip to content

Commit bd8e68a

Browse files
ahunter6acmel
authored andcommitted
perf thread-stack: Avoid direct reference to the thread's stack
In preparation for fixing thread stack processing for the idle task, avoid direct reference to the thread's stack. The thread stack will change to an array of thread stacks, at which point the meaning of the direct reference will change. Signed-off-by: Adrian Hunter <[email protected]> Acked-by: Jiri Olsa <[email protected]> Link: http://lkml.kernel.org/r/[email protected] [ Rename thread_stack__ts() to thread__stack() since this operates on a 'thread' struct ] Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
1 parent e0b8951 commit bd8e68a

File tree

1 file changed

+49
-32
lines changed

1 file changed

+49
-32
lines changed

tools/perf/util/thread-stack.c

Lines changed: 49 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -111,9 +111,16 @@ static struct thread_stack *thread_stack__new(struct thread *thread,
111111
ts->kernel_start = 1ULL << 63;
112112
ts->crp = crp;
113113

114+
thread->ts = ts;
115+
114116
return ts;
115117
}
116118

119+
static inline struct thread_stack *thread__stack(struct thread *thread)
120+
{
121+
return thread ? thread->ts : NULL;
122+
}
123+
117124
static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
118125
bool trace_end)
119126
{
@@ -226,40 +233,44 @@ static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
226233

227234
int thread_stack__flush(struct thread *thread)
228235
{
229-
if (thread->ts)
230-
return __thread_stack__flush(thread, thread->ts);
236+
struct thread_stack *ts = thread->ts;
237+
238+
if (ts)
239+
return __thread_stack__flush(thread, ts);
231240

232241
return 0;
233242
}
234243

235244
int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
236245
u64 to_ip, u16 insn_len, u64 trace_nr)
237246
{
247+
struct thread_stack *ts = thread__stack(thread);
248+
238249
if (!thread)
239250
return -EINVAL;
240251

241-
if (!thread->ts) {
242-
thread->ts = thread_stack__new(thread, NULL);
243-
if (!thread->ts) {
252+
if (!ts) {
253+
ts = thread_stack__new(thread, NULL);
254+
if (!ts) {
244255
pr_warning("Out of memory: no thread stack\n");
245256
return -ENOMEM;
246257
}
247-
thread->ts->trace_nr = trace_nr;
258+
ts->trace_nr = trace_nr;
248259
}
249260

250261
/*
251262
* When the trace is discontinuous, the trace_nr changes. In that case
252263
* the stack might be completely invalid. Better to report nothing than
253264
* to report something misleading, so flush the stack.
254265
*/
255-
if (trace_nr != thread->ts->trace_nr) {
256-
if (thread->ts->trace_nr)
257-
__thread_stack__flush(thread, thread->ts);
258-
thread->ts->trace_nr = trace_nr;
266+
if (trace_nr != ts->trace_nr) {
267+
if (ts->trace_nr)
268+
__thread_stack__flush(thread, ts);
269+
ts->trace_nr = trace_nr;
259270
}
260271

261272
/* Stop here if thread_stack__process() is in use */
262-
if (thread->ts->crp)
273+
if (ts->crp)
263274
return 0;
264275

265276
if (flags & PERF_IP_FLAG_CALL) {
@@ -270,7 +281,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
270281
ret_addr = from_ip + insn_len;
271282
if (ret_addr == to_ip)
272283
return 0; /* Zero-length calls are excluded */
273-
return thread_stack__push(thread->ts, ret_addr,
284+
return thread_stack__push(ts, ret_addr,
274285
flags & PERF_IP_FLAG_TRACE_END);
275286
} else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
276287
/*
@@ -280,32 +291,36 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
280291
* address, so try to pop that. Also, do not expect a call made
281292
* when the trace ended, to return, so pop that.
282293
*/
283-
thread_stack__pop(thread->ts, to_ip);
284-
thread_stack__pop_trace_end(thread->ts);
294+
thread_stack__pop(ts, to_ip);
295+
thread_stack__pop_trace_end(ts);
285296
} else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
286-
thread_stack__pop(thread->ts, to_ip);
297+
thread_stack__pop(ts, to_ip);
287298
}
288299

289300
return 0;
290301
}
291302

292303
void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
293304
{
294-
if (!thread || !thread->ts)
305+
struct thread_stack *ts = thread__stack(thread);
306+
307+
if (!ts)
295308
return;
296309

297-
if (trace_nr != thread->ts->trace_nr) {
298-
if (thread->ts->trace_nr)
299-
__thread_stack__flush(thread, thread->ts);
300-
thread->ts->trace_nr = trace_nr;
310+
if (trace_nr != ts->trace_nr) {
311+
if (ts->trace_nr)
312+
__thread_stack__flush(thread, ts);
313+
ts->trace_nr = trace_nr;
301314
}
302315
}
303316

304317
void thread_stack__free(struct thread *thread)
305318
{
306-
if (thread->ts) {
307-
__thread_stack__flush(thread, thread->ts);
308-
zfree(&thread->ts->stack);
319+
struct thread_stack *ts = thread->ts;
320+
321+
if (ts) {
322+
__thread_stack__flush(thread, ts);
323+
zfree(&ts->stack);
309324
zfree(&thread->ts);
310325
}
311326
}
@@ -318,6 +333,7 @@ static inline u64 callchain_context(u64 ip, u64 kernel_start)
318333
void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
319334
size_t sz, u64 ip, u64 kernel_start)
320335
{
336+
struct thread_stack *ts = thread__stack(thread);
321337
u64 context = callchain_context(ip, kernel_start);
322338
u64 last_context;
323339
size_t i, j;
@@ -330,15 +346,15 @@ void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
330346
chain->ips[0] = context;
331347
chain->ips[1] = ip;
332348

333-
if (!thread || !thread->ts) {
349+
if (!ts) {
334350
chain->nr = 2;
335351
return;
336352
}
337353

338354
last_context = context;
339355

340-
for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) {
341-
ip = thread->ts->stack[thread->ts->cnt - j].ret_addr;
356+
for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) {
357+
ip = ts->stack[ts->cnt - j].ret_addr;
342358
context = callchain_context(ip, kernel_start);
343359
if (context != last_context) {
344360
if (i >= sz - 1)
@@ -590,7 +606,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
590606
struct addr_location *to_al, u64 ref,
591607
struct call_return_processor *crp)
592608
{
593-
struct thread_stack *ts = thread->ts;
609+
struct thread_stack *ts = thread__stack(thread);
594610
int err = 0;
595611

596612
if (ts && !ts->crp) {
@@ -600,10 +616,9 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
600616
}
601617

602618
if (!ts) {
603-
thread->ts = thread_stack__new(thread, crp);
604-
if (!thread->ts)
619+
ts = thread_stack__new(thread, crp);
620+
if (!ts)
605621
return -ENOMEM;
606-
ts = thread->ts;
607622
ts->comm = comm;
608623
}
609624

@@ -668,7 +683,9 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
668683

669684
size_t thread_stack__depth(struct thread *thread)
670685
{
671-
if (!thread->ts)
686+
struct thread_stack *ts = thread__stack(thread);
687+
688+
if (!ts)
672689
return 0;
673-
return thread->ts->cnt;
690+
return ts->cnt;
674691
}

0 commit comments

Comments
 (0)