Skip to content

Commit 256d92b

Browse files
ahunter6acmel
authored andcommitted
perf thread-stack: Fix thread stack processing for the idle task
perf creates a single 'struct thread' to represent the idle task. That is because threads are identified by PID and TID, and the idle task always has PID == TID == 0. However, there are actually separate idle tasks for each CPU. That creates a problem for thread stack processing which assumes that each thread has a single stack, not one stack per CPU. Fix that by passing through the CPU number, and in the case of the idle "thread", pick the thread stack from an array based on the CPU number. Signed-off-by: Adrian Hunter <[email protected]> Acked-by: Jiri Olsa <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
1 parent 139f42f commit 256d92b

File tree

5 files changed

+69
-25
lines changed

5 files changed

+69
-25
lines changed

tools/perf/builtin-script.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1182,7 +1182,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
11821182
struct addr_location *al, FILE *fp)
11831183
{
11841184
struct perf_event_attr *attr = &evsel->attr;
1185-
size_t depth = thread_stack__depth(thread);
1185+
size_t depth = thread_stack__depth(thread, sample->cpu);
11861186
const char *name = NULL;
11871187
static int spacing;
11881188
int len = 0;
@@ -1716,7 +1716,7 @@ static bool show_event(struct perf_sample *sample,
17161716
struct thread *thread,
17171717
struct addr_location *al)
17181718
{
1719-
int depth = thread_stack__depth(thread);
1719+
int depth = thread_stack__depth(thread, sample->cpu);
17201720

17211721
if (!symbol_conf.graph_function)
17221722
return true;

tools/perf/util/intel-bts.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -451,7 +451,7 @@ static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
451451
continue;
452452
intel_bts_get_branch_type(btsq, branch);
453453
if (btsq->bts->synth_opts.thread_stack)
454-
thread_stack__event(thread, btsq->sample_flags,
454+
thread_stack__event(thread, btsq->cpu, btsq->sample_flags,
455455
le64_to_cpu(branch->from),
456456
le64_to_cpu(branch->to),
457457
btsq->intel_pt_insn.length,
@@ -523,7 +523,7 @@ static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
523523
!btsq->bts->synth_opts.thread_stack && thread &&
524524
(!old_buffer || btsq->bts->sampling_mode ||
525525
(btsq->bts->snapshot_mode && !buffer->consecutive)))
526-
thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1);
526+
thread_stack__set_trace_nr(thread, btsq->cpu, buffer->buffer_nr + 1);
527527

528528
err = intel_bts_process_buffer(btsq, buffer, thread);
529529

tools/perf/util/intel-pt.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1174,7 +1174,7 @@ static void intel_pt_prep_sample(struct intel_pt *pt,
11741174
intel_pt_prep_b_sample(pt, ptq, event, sample);
11751175

11761176
if (pt->synth_opts.callchain) {
1177-
thread_stack__sample(ptq->thread, ptq->chain,
1177+
thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
11781178
pt->synth_opts.callchain_sz + 1,
11791179
sample->ip, pt->kernel_start);
11801180
sample->callchain = ptq->chain;
@@ -1526,11 +1526,11 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
15261526
return 0;
15271527

15281528
if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
1529-
thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
1529+
thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, state->from_ip,
15301530
state->to_ip, ptq->insn_len,
15311531
state->trace_nr);
15321532
else
1533-
thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
1533+
thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
15341534

15351535
if (pt->sample_branches) {
15361536
err = intel_pt_synth_branch_sample(ptq);

tools/perf/util/thread-stack.c

Lines changed: 58 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515

1616
#include <linux/rbtree.h>
1717
#include <linux/list.h>
18+
#include <linux/log2.h>
1819
#include <errno.h>
1920
#include "thread.h"
2021
#include "event.h"
@@ -75,6 +76,16 @@ struct thread_stack {
7576
unsigned int arr_sz;
7677
};
7778

79+
/*
80+
* Assume pid == tid == 0 identifies the idle task as defined by
81+
* perf_session__register_idle_thread(). The idle task is really 1 task per cpu,
82+
* and therefore requires a stack for each cpu.
83+
*/
84+
static inline bool thread_stack__per_cpu(struct thread *thread)
85+
{
86+
return !(thread->tid || thread->pid_);
87+
}
88+
7889
static int thread_stack__grow(struct thread_stack *ts)
7990
{
8091
struct thread_stack_entry *new_stack;
@@ -111,13 +122,16 @@ static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
111122
return 0;
112123
}
113124

114-
static struct thread_stack *thread_stack__new(struct thread *thread,
125+
static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
115126
struct call_return_processor *crp)
116127
{
117128
struct thread_stack *ts = thread->ts, *new_ts;
118129
unsigned int old_sz = ts ? ts->arr_sz : 0;
119130
unsigned int new_sz = 1;
120131

132+
if (thread_stack__per_cpu(thread) && cpu > 0)
133+
new_sz = roundup_pow_of_two(cpu + 1);
134+
121135
if (!ts || new_sz > old_sz) {
122136
new_ts = calloc(new_sz, sizeof(*ts));
123137
if (!new_ts)
@@ -130,16 +144,45 @@ static struct thread_stack *thread_stack__new(struct thread *thread,
130144
ts = new_ts;
131145
}
132146

147+
if (thread_stack__per_cpu(thread) && cpu > 0 &&
148+
(unsigned int)cpu < ts->arr_sz)
149+
ts += cpu;
150+
133151
if (!ts->stack &&
134152
thread_stack__init(ts, thread, crp))
135153
return NULL;
136154

137155
return ts;
138156
}
139157

140-
static inline struct thread_stack *thread__stack(struct thread *thread)
158+
static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
141159
{
142-
return thread ? thread->ts : NULL;
160+
struct thread_stack *ts = thread->ts;
161+
162+
if (cpu < 0)
163+
cpu = 0;
164+
165+
if (!ts || (unsigned int)cpu >= ts->arr_sz)
166+
return NULL;
167+
168+
ts += cpu;
169+
170+
if (!ts->stack)
171+
return NULL;
172+
173+
return ts;
174+
}
175+
176+
static inline struct thread_stack *thread__stack(struct thread *thread,
177+
int cpu)
178+
{
179+
if (!thread)
180+
return NULL;
181+
182+
if (thread_stack__per_cpu(thread))
183+
return thread__cpu_stack(thread, cpu);
184+
185+
return thread->ts;
143186
}
144187

145188
static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
@@ -270,16 +313,16 @@ int thread_stack__flush(struct thread *thread)
270313
return err;
271314
}
272315

273-
int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
316+
int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
274317
u64 to_ip, u16 insn_len, u64 trace_nr)
275318
{
276-
struct thread_stack *ts = thread__stack(thread);
319+
struct thread_stack *ts = thread__stack(thread, cpu);
277320

278321
if (!thread)
279322
return -EINVAL;
280323

281324
if (!ts) {
282-
ts = thread_stack__new(thread, NULL);
325+
ts = thread_stack__new(thread, cpu, NULL);
283326
if (!ts) {
284327
pr_warning("Out of memory: no thread stack\n");
285328
return -ENOMEM;
@@ -329,9 +372,9 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
329372
return 0;
330373
}
331374

332-
void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
375+
void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
333376
{
334-
struct thread_stack *ts = thread__stack(thread);
377+
struct thread_stack *ts = thread__stack(thread, cpu);
335378

336379
if (!ts)
337380
return;
@@ -375,10 +418,11 @@ static inline u64 callchain_context(u64 ip, u64 kernel_start)
375418
return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
376419
}
377420

378-
void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
421+
void thread_stack__sample(struct thread *thread, int cpu,
422+
struct ip_callchain *chain,
379423
size_t sz, u64 ip, u64 kernel_start)
380424
{
381-
struct thread_stack *ts = thread__stack(thread);
425+
struct thread_stack *ts = thread__stack(thread, cpu);
382426
u64 context = callchain_context(ip, kernel_start);
383427
u64 last_context;
384428
size_t i, j;
@@ -651,7 +695,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
651695
struct addr_location *to_al, u64 ref,
652696
struct call_return_processor *crp)
653697
{
654-
struct thread_stack *ts = thread__stack(thread);
698+
struct thread_stack *ts = thread__stack(thread, sample->cpu);
655699
int err = 0;
656700

657701
if (ts && !ts->crp) {
@@ -661,7 +705,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
661705
}
662706

663707
if (!ts) {
664-
ts = thread_stack__new(thread, crp);
708+
ts = thread_stack__new(thread, sample->cpu, crp);
665709
if (!ts)
666710
return -ENOMEM;
667711
ts->comm = comm;
@@ -726,9 +770,9 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
726770
return err;
727771
}
728772

729-
size_t thread_stack__depth(struct thread *thread)
773+
size_t thread_stack__depth(struct thread *thread, int cpu)
730774
{
731-
struct thread_stack *ts = thread__stack(thread);
775+
struct thread_stack *ts = thread__stack(thread, cpu);
732776

733777
if (!ts)
734778
return 0;

tools/perf/util/thread-stack.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -80,14 +80,14 @@ struct call_return_processor {
8080
void *data;
8181
};
8282

83-
int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
83+
int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
8484
u64 to_ip, u16 insn_len, u64 trace_nr);
85-
void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
86-
void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
85+
void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr);
86+
void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain,
8787
size_t sz, u64 ip, u64 kernel_start);
8888
int thread_stack__flush(struct thread *thread);
8989
void thread_stack__free(struct thread *thread);
90-
size_t thread_stack__depth(struct thread *thread);
90+
size_t thread_stack__depth(struct thread *thread, int cpu);
9191

9292
struct call_return_processor *
9393
call_return_processor__new(int (*process)(struct call_return *cr, void *data),

0 commit comments

Comments
 (0)