Skip to content

Commit 45ad21c

Browse files
Steven Rostedt (Red Hat)rostedt
authored andcommitted
tracing: Have trace_array keep track if snapshot buffer is allocated
The snapshot buffer belongs to the trace array not the tracer that is running. The trace array should be the data structure that keeps track of whether or not the snapshot buffer is allocated, not the tracer desciptor. Having the trace array keep track of it makes modifications so much easier. Signed-off-by: Steven Rostedt <[email protected]>
1 parent 6de58e6 commit 45ad21c

File tree

2 files changed

+16
-18
lines changed

2 files changed

+16
-18
lines changed

kernel/trace/trace.c

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -667,7 +667,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
667667

668668
WARN_ON_ONCE(!irqs_disabled());
669669

670-
if (!tr->current_trace->allocated_snapshot) {
670+
if (!tr->allocated_snapshot) {
671671
/* Only the nop tracer should hit this when disabling */
672672
WARN_ON_ONCE(tr->current_trace != &nop_trace);
673673
return;
@@ -700,7 +700,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
700700
return;
701701

702702
WARN_ON_ONCE(!irqs_disabled());
703-
if (WARN_ON_ONCE(!tr->current_trace->allocated_snapshot))
703+
if (WARN_ON_ONCE(!tr->allocated_snapshot))
704704
return;
705705

706706
arch_spin_lock(&ftrace_max_lock);
@@ -802,7 +802,7 @@ int register_tracer(struct tracer *type)
802802
if (ring_buffer_expanded)
803803
ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
804804
RING_BUFFER_ALL_CPUS);
805-
type->allocated_snapshot = true;
805+
tr->allocated_snapshot = true;
806806
}
807807
#endif
808808

@@ -822,7 +822,7 @@ int register_tracer(struct tracer *type)
822822

823823
#ifdef CONFIG_TRACER_MAX_TRACE
824824
if (type->use_max_tr) {
825-
type->allocated_snapshot = false;
825+
tr->allocated_snapshot = false;
826826

827827
/* Shrink the max buffer again */
828828
if (ring_buffer_expanded)
@@ -2463,7 +2463,7 @@ static void show_snapshot_percpu_help(struct seq_file *m)
24632463

24642464
static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
24652465
{
2466-
if (iter->trace->allocated_snapshot)
2466+
if (iter->tr->allocated_snapshot)
24672467
seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
24682468
else
24692469
seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
@@ -3364,12 +3364,12 @@ static int tracing_set_tracer(const char *buf)
33643364
if (tr->current_trace->reset)
33653365
tr->current_trace->reset(tr);
33663366

3367-
#ifdef CONFIG_TRACER_MAX_TRACE
3368-
had_max_tr = tr->current_trace->allocated_snapshot;
3369-
33703367
/* Current trace needs to be nop_trace before synchronize_sched */
33713368
tr->current_trace = &nop_trace;
33723369

3370+
#ifdef CONFIG_TRACER_MAX_TRACE
3371+
had_max_tr = tr->allocated_snapshot;
3372+
33733373
if (had_max_tr && !t->use_max_tr) {
33743374
/*
33753375
* We need to make sure that the update_max_tr sees that
@@ -3387,10 +3387,8 @@ static int tracing_set_tracer(const char *buf)
33873387
ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
33883388
set_buffer_entries(&tr->max_buffer, 1);
33893389
tracing_reset_online_cpus(&tr->max_buffer);
3390-
tr->current_trace->allocated_snapshot = false;
3390+
tr->allocated_snapshot = false;
33913391
}
3392-
#else
3393-
tr->current_trace = &nop_trace;
33943392
#endif
33953393
destroy_trace_option_files(topts);
33963394

@@ -3403,7 +3401,7 @@ static int tracing_set_tracer(const char *buf)
34033401
RING_BUFFER_ALL_CPUS);
34043402
if (ret < 0)
34053403
goto out;
3406-
t->allocated_snapshot = true;
3404+
tr->allocated_snapshot = true;
34073405
}
34083406
#endif
34093407

@@ -4275,13 +4273,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
42754273
ret = -EINVAL;
42764274
break;
42774275
}
4278-
if (tr->current_trace->allocated_snapshot) {
4276+
if (tr->allocated_snapshot) {
42794277
/* free spare buffer */
42804278
ring_buffer_resize(tr->max_buffer.buffer, 1,
42814279
RING_BUFFER_ALL_CPUS);
42824280
set_buffer_entries(&tr->max_buffer, 1);
42834281
tracing_reset_online_cpus(&tr->max_buffer);
4284-
tr->current_trace->allocated_snapshot = false;
4282+
tr->allocated_snapshot = false;
42854283
}
42864284
break;
42874285
case 1:
@@ -4292,13 +4290,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
42924290
break;
42934291
}
42944292
#endif
4295-
if (!tr->current_trace->allocated_snapshot) {
4293+
if (!tr->allocated_snapshot) {
42964294
/* allocate spare buffer */
42974295
ret = resize_buffer_duplicate_size(&tr->max_buffer,
42984296
&tr->trace_buffer, RING_BUFFER_ALL_CPUS);
42994297
if (ret < 0)
43004298
break;
4301-
tr->current_trace->allocated_snapshot = true;
4299+
tr->allocated_snapshot = true;
43024300
}
43034301
local_irq_disable();
43044302
/* Now, we're going to swap */
@@ -4309,7 +4307,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
43094307
local_irq_enable();
43104308
break;
43114309
default:
4312-
if (tr->current_trace->allocated_snapshot) {
4310+
if (tr->allocated_snapshot) {
43134311
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
43144312
tracing_reset_online_cpus(&tr->max_buffer);
43154313
else

kernel/trace/trace.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,6 +197,7 @@ struct trace_array {
197197
* the trace_buffer so the tracing can continue.
198198
*/
199199
struct trace_buffer max_buffer;
200+
bool allocated_snapshot;
200201
#endif
201202
int buffer_disabled;
202203
struct trace_cpu trace_cpu; /* place holder */
@@ -367,7 +368,6 @@ struct tracer {
367368
bool enabled;
368369
#ifdef CONFIG_TRACER_MAX_TRACE
369370
bool use_max_tr;
370-
bool allocated_snapshot;
371371
#endif
372372
};
373373

0 commit comments

Comments
 (0)