Skip to content

Commit 7ffbd48

Browse files
Steven Rostedtrostedt
authored andcommitted
tracing: Cache comms only after an event occurred
Whenever an event is registered, the comm of tasks are saved at every task switch instead of saving them at every event. But if an event isn't executed much, the comm cache will be filled up by tasks that did not record the event and you lose out on the comms that did. Here's an example, if you enable the following events: echo 1 > /debug/tracing/events/kvm/kvm_cr/enable echo 1 > /debug/tracing/events/net/net_dev_xmit/enable Note, there's no kvm running on this machine so the first event will never be triggered, but because it is enabled, the storing of comms will continue. If we now disable the network event: echo 0 > /debug/tracing/events/net/net_dev_xmit/enable and look at the trace: cat /debug/tracing/trace sshd-2672 [001] ..s2 375.731616: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=242 rc=0 sshd-2672 [001] ..s1 375.731617: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=242 rc=0 sshd-2672 [001] ..s2 375.859356: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=242 rc=0 sshd-2672 [001] ..s1 375.859357: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=242 rc=0 sshd-2672 [001] ..s2 375.947351: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=242 rc=0 sshd-2672 [001] ..s1 375.947352: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=242 rc=0 sshd-2672 [001] ..s2 376.035383: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=242 rc=0 sshd-2672 [001] ..s1 376.035383: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=242 rc=0 sshd-2672 [001] ..s2 377.563806: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=226 rc=0 sshd-2672 [001] ..s1 377.563807: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=226 rc=0 sshd-2672 [001] ..s2 377.563834: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6be0 len=114 rc=0 sshd-2672 [001] ..s1 377.563842: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6be0 len=114 rc=0 We see that process 2672 which triggered the events has the comm "sshd". But if we run hackbench for a bit and look again: cat /debug/tracing/trace <...>-2672 [001] ..s2 375.731616: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=242 rc=0 <...>-2672 [001] ..s1 375.731617: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=242 rc=0 <...>-2672 [001] ..s2 375.859356: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=242 rc=0 <...>-2672 [001] ..s1 375.859357: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=242 rc=0 <...>-2672 [001] ..s2 375.947351: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=242 rc=0 <...>-2672 [001] ..s1 375.947352: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=242 rc=0 <...>-2672 [001] ..s2 376.035383: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=242 rc=0 <...>-2672 [001] ..s1 376.035383: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=242 rc=0 <...>-2672 [001] ..s2 377.563806: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6de0 len=226 rc=0 <...>-2672 [001] ..s1 377.563807: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6de0 len=226 rc=0 <...>-2672 [001] ..s2 377.563834: net_dev_xmit: dev=eth0 skbaddr=ffff88005cbb6be0 len=114 rc=0 <...>-2672 [001] ..s1 377.563842: net_dev_xmit: dev=br0 skbaddr=ffff88005cbb6be0 len=114 rc=0 The stored "sshd" comm has been flushed out and we get a useless "<...>". But by only storing comms after a trace event occurred, we can run hackbench all day and still get the same output. Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Signed-off-by: Steven Rostedt <[email protected]>
1 parent 2b70e59 commit 7ffbd48

File tree

4 files changed

+33
-11
lines changed

4 files changed

+33
-11
lines changed

kernel/trace/trace.c

Lines changed: 27 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,13 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
7777
return 0;
7878
}
7979

80+
/*
81+
* To prevent the comm cache from being overwritten when no
82+
* tracing is active, only save the comm when a trace event
83+
* occurred.
84+
*/
85+
static DEFINE_PER_CPU(bool, trace_cmdline_save);
86+
8087
/*
8188
* Kill all tracing for good (never come back).
8289
* It is initialized to 1 but will turn to zero if the initialization
@@ -1135,6 +1142,11 @@ void tracing_record_cmdline(struct task_struct *tsk)
11351142
!tracing_is_on())
11361143
return;
11371144

1145+
if (!__this_cpu_read(trace_cmdline_save))
1146+
return;
1147+
1148+
__this_cpu_write(trace_cmdline_save, false);
1149+
11381150
trace_save_cmdline(tsk);
11391151
}
11401152

@@ -1178,13 +1190,20 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
11781190
return event;
11791191
}
11801192

1193+
void
1194+
__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1195+
{
1196+
__this_cpu_write(trace_cmdline_save, true);
1197+
ring_buffer_unlock_commit(buffer, event);
1198+
}
1199+
11811200
static inline void
11821201
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
11831202
struct ring_buffer_event *event,
11841203
unsigned long flags, int pc,
11851204
int wake)
11861205
{
1187-
ring_buffer_unlock_commit(buffer, event);
1206+
__buffer_unlock_commit(buffer, event);
11881207

11891208
ftrace_trace_stack(buffer, flags, 6, pc);
11901209
ftrace_trace_userstack(buffer, flags, pc);
@@ -1232,7 +1251,7 @@ void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
12321251
unsigned long flags, int pc,
12331252
struct pt_regs *regs)
12341253
{
1235-
ring_buffer_unlock_commit(buffer, event);
1254+
__buffer_unlock_commit(buffer, event);
12361255

12371256
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
12381257
ftrace_trace_userstack(buffer, flags, pc);
@@ -1269,7 +1288,7 @@ trace_function(struct trace_array *tr,
12691288
entry->parent_ip = parent_ip;
12701289

12711290
if (!filter_check_discard(call, entry, buffer, event))
1272-
ring_buffer_unlock_commit(buffer, event);
1291+
__buffer_unlock_commit(buffer, event);
12731292
}
12741293

12751294
void
@@ -1362,7 +1381,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
13621381
entry->size = trace.nr_entries;
13631382

13641383
if (!filter_check_discard(call, entry, buffer, event))
1365-
ring_buffer_unlock_commit(buffer, event);
1384+
__buffer_unlock_commit(buffer, event);
13661385

13671386
out:
13681387
/* Again, don't let gcc optimize things here */
@@ -1458,7 +1477,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
14581477

14591478
save_stack_trace_user(&trace);
14601479
if (!filter_check_discard(call, entry, buffer, event))
1461-
ring_buffer_unlock_commit(buffer, event);
1480+
__buffer_unlock_commit(buffer, event);
14621481

14631482
out_drop_count:
14641483
__this_cpu_dec(user_stack_count);
@@ -1653,7 +1672,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
16531672

16541673
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
16551674
if (!filter_check_discard(call, entry, buffer, event)) {
1656-
ring_buffer_unlock_commit(buffer, event);
1675+
__buffer_unlock_commit(buffer, event);
16571676
ftrace_trace_stack(buffer, flags, 6, pc);
16581677
}
16591678

@@ -1724,7 +1743,7 @@ int trace_array_vprintk(struct trace_array *tr,
17241743
memcpy(&entry->buf, tbuffer, len);
17251744
entry->buf[len] = '\0';
17261745
if (!filter_check_discard(call, entry, buffer, event)) {
1727-
ring_buffer_unlock_commit(buffer, event);
1746+
__buffer_unlock_commit(buffer, event);
17281747
ftrace_trace_stack(buffer, flags, 6, pc);
17291748
}
17301749
out:
@@ -3993,7 +4012,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
39934012
} else
39944013
entry->buf[cnt] = '\0';
39954014

3996-
ring_buffer_unlock_commit(buffer, event);
4015+
__buffer_unlock_commit(buffer, event);
39974016

39984017
written = cnt;
39994018

kernel/trace/trace.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -359,6 +359,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
359359
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
360360
int *ent_cpu, u64 *ent_ts);
361361

362+
void __buffer_unlock_commit(struct ring_buffer *buffer,
363+
struct ring_buffer_event *event);
364+
362365
int trace_empty(struct trace_iterator *iter);
363366

364367
void *trace_find_next_entry_inc(struct trace_iterator *iter);

kernel/trace/trace_branch.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
7777
entry->correct = val == expect;
7878

7979
if (!filter_check_discard(call, entry, buffer, event))
80-
ring_buffer_unlock_commit(buffer, event);
80+
__buffer_unlock_commit(buffer, event);
8181

8282
out:
8383
atomic_dec(&tr->data[cpu]->disabled);

kernel/trace/trace_functions_graph.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr,
223223
entry = ring_buffer_event_data(event);
224224
entry->graph_ent = *trace;
225225
if (!filter_current_check_discard(buffer, call, entry, event))
226-
ring_buffer_unlock_commit(buffer, event);
226+
__buffer_unlock_commit(buffer, event);
227227

228228
return 1;
229229
}
@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr,
327327
entry = ring_buffer_event_data(event);
328328
entry->ret = *trace;
329329
if (!filter_current_check_discard(buffer, call, entry, event))
330-
ring_buffer_unlock_commit(buffer, event);
330+
__buffer_unlock_commit(buffer, event);
331331
}
332332

333333
void trace_graph_return(struct ftrace_graph_ret *trace)

0 commit comments

Comments
 (0)