Skip to content

Commit 0330f7a

Browse files
committed
tracing: Have hwlat trace migrate across tracing_cpumask CPUs
Instead of having the hwlat detector thread stay on one CPU, have it migrate across all the CPUs specified by tracing_cpumask. If the user modifies the thread's CPU affinity, the migration will stop until the next instance that the tracer is instantiated. The migration happens at the end of each window (period). Signed-off-by: Steven Rostedt <[email protected]>
1 parent c850ed3 commit 0330f7a

File tree

2 files changed

+61
-0
lines changed

2 files changed

+61
-0
lines changed

Documentation/trace/hwlat_detector.txt

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,5 +69,11 @@ in /sys/kernel/tracing:
6969

7070
tracing_threshold - minimum latency value to be considered (usecs)
7171
tracing_max_latency - maximum hardware latency actually observed (usecs)
72+
tracing_cpumask - the CPUs to move the hwlat thread across
7273
hwlat_detector/width - specified amount of time to spin within window (usecs)
7374
hwlat_detector/window - amount of time between (width) runs (usecs)
75+
76+
The hwlat detector's kernel thread will migrate across each CPU specified in
77+
tracing_cpumask between each window. To limit the migration, either modify
78+
tracing_cpumask, or modify the hwlat kernel thread (named [hwlatd]) CPU
79+
affinity directly, and the migration will stop.

kernel/trace/trace_hwlat.c

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
#include <linux/kthread.h>
4343
#include <linux/tracefs.h>
4444
#include <linux/uaccess.h>
45+
#include <linux/cpumask.h>
4546
#include <linux/delay.h>
4647
#include "trace.h"
4748

@@ -211,6 +212,57 @@ static int get_sample(void)
211212
return ret;
212213
}
213214

215+
static struct cpumask save_cpumask;
216+
static bool disable_migrate;
217+
218+
static void move_to_next_cpu(void)
219+
{
220+
static struct cpumask *current_mask;
221+
int next_cpu;
222+
223+
if (disable_migrate)
224+
return;
225+
226+
/* Just pick the first CPU on first iteration */
227+
if (!current_mask) {
228+
current_mask = &save_cpumask;
229+
get_online_cpus();
230+
cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
231+
put_online_cpus();
232+
next_cpu = cpumask_first(current_mask);
233+
goto set_affinity;
234+
}
235+
236+
/*
237+
* If for some reason the user modifies the CPU affinity
238+
* of this thread, than stop migrating for the duration
239+
* of the current test.
240+
*/
241+
if (!cpumask_equal(current_mask, &current->cpus_allowed))
242+
goto disable;
243+
244+
get_online_cpus();
245+
cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
246+
next_cpu = cpumask_next(smp_processor_id(), current_mask);
247+
put_online_cpus();
248+
249+
if (next_cpu >= nr_cpu_ids)
250+
next_cpu = cpumask_first(current_mask);
251+
252+
set_affinity:
253+
if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
254+
goto disable;
255+
256+
cpumask_clear(current_mask);
257+
cpumask_set_cpu(next_cpu, current_mask);
258+
259+
sched_setaffinity(0, current_mask);
260+
return;
261+
262+
disable:
263+
disable_migrate = true;
264+
}
265+
214266
/*
215267
* kthread_fn - The CPU time sampling/hardware latency detection kernel thread
216268
*
@@ -230,6 +282,8 @@ static int kthread_fn(void *data)
230282

231283
while (!kthread_should_stop()) {
232284

285+
move_to_next_cpu();
286+
233287
local_irq_disable();
234288
get_sample();
235289
local_irq_enable();
@@ -473,6 +527,7 @@ static int hwlat_tracer_init(struct trace_array *tr)
473527

474528
hwlat_trace = tr;
475529

530+
disable_migrate = false;
476531
hwlat_data.count = 0;
477532
tr->max_latency = 0;
478533
save_tracing_thresh = tracing_thresh;

0 commit comments

Comments
 (0)