Skip to content

Commit 737223f

Browse files
Steven Rostedt (Red Hat)rostedt
authored andcommitted
tracing: Consolidate buffer allocation code
There's a bit of duplicate code in creating the trace buffers for the normal trace buffer and the max trace buffer among the instances and the main global_trace. This code can be consolidated and cleaned up a bit making the code cleaner and more readable as well as less duplication. Signed-off-by: Steven Rostedt <[email protected]>
1 parent 45ad21c commit 737223f

File tree

1 file changed

+63
-67
lines changed

1 file changed

+63
-67
lines changed

kernel/trace/trace.c

Lines changed: 63 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -3171,6 +3171,7 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
31713171
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
31723172
{
31733173
int cpu;
3174+
31743175
for_each_tracing_cpu(cpu)
31753176
per_cpu_ptr(buf->data, cpu)->entries = val;
31763177
}
@@ -5267,12 +5268,70 @@ struct dentry *trace_instance_dir;
52675268
static void
52685269
init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
52695270

5270-
static int new_instance_create(const char *name)
5271+
static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5272+
{
5273+
int cpu;
5274+
5275+
for_each_tracing_cpu(cpu) {
5276+
memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5277+
per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5278+
per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5279+
}
5280+
}
5281+
5282+
static int allocate_trace_buffers(struct trace_array *tr, int size)
52715283
{
52725284
enum ring_buffer_flags rb_flags;
5285+
5286+
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5287+
5288+
tr->trace_buffer.buffer = ring_buffer_alloc(size, rb_flags);
5289+
if (!tr->trace_buffer.buffer)
5290+
goto out_free;
5291+
5292+
tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5293+
if (!tr->trace_buffer.data)
5294+
goto out_free;
5295+
5296+
init_trace_buffers(tr, &tr->trace_buffer);
5297+
5298+
/* Allocate the first page for all buffers */
5299+
set_buffer_entries(&tr->trace_buffer,
5300+
ring_buffer_size(tr->trace_buffer.buffer, 0));
5301+
5302+
#ifdef CONFIG_TRACER_MAX_TRACE
5303+
5304+
tr->max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
5305+
if (!tr->max_buffer.buffer)
5306+
goto out_free;
5307+
5308+
tr->max_buffer.data = alloc_percpu(struct trace_array_cpu);
5309+
if (!tr->max_buffer.data)
5310+
goto out_free;
5311+
5312+
init_trace_buffers(tr, &tr->max_buffer);
5313+
5314+
set_buffer_entries(&tr->max_buffer, 1);
5315+
#endif
5316+
return 0;
5317+
5318+
out_free:
5319+
if (tr->trace_buffer.buffer)
5320+
ring_buffer_free(tr->trace_buffer.buffer);
5321+
free_percpu(tr->trace_buffer.data);
5322+
5323+
#ifdef CONFIG_TRACER_MAX_TRACE
5324+
if (tr->max_buffer.buffer)
5325+
ring_buffer_free(tr->max_buffer.buffer);
5326+
free_percpu(tr->max_buffer.data);
5327+
#endif
5328+
return -ENOMEM;
5329+
}
5330+
5331+
static int new_instance_create(const char *name)
5332+
{
52735333
struct trace_array *tr;
52745334
int ret;
5275-
int i;
52765335

52775336
mutex_lock(&trace_types_lock);
52785337

@@ -5298,22 +5357,9 @@ static int new_instance_create(const char *name)
52985357
INIT_LIST_HEAD(&tr->systems);
52995358
INIT_LIST_HEAD(&tr->events);
53005359

5301-
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5302-
5303-
tr->trace_buffer.buffer = ring_buffer_alloc(trace_buf_size, rb_flags);
5304-
if (!tr->trace_buffer.buffer)
5305-
goto out_free_tr;
5306-
5307-
tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5308-
if (!tr->trace_buffer.data)
5360+
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
53095361
goto out_free_tr;
53105362

5311-
for_each_tracing_cpu(i) {
5312-
memset(per_cpu_ptr(tr->trace_buffer.data, i), 0, sizeof(struct trace_array_cpu));
5313-
per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.cpu = i;
5314-
per_cpu_ptr(tr->trace_buffer.data, i)->trace_cpu.tr = tr;
5315-
}
5316-
53175363
/* Holder for file callbacks */
53185364
tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
53195365
tr->trace_cpu.tr = tr;
@@ -5736,8 +5782,6 @@ EXPORT_SYMBOL_GPL(ftrace_dump);
57365782
__init static int tracer_alloc_buffers(void)
57375783
{
57385784
int ring_buf_size;
5739-
enum ring_buffer_flags rb_flags;
5740-
int i;
57415785
int ret = -ENOMEM;
57425786

57435787

@@ -5758,69 +5802,21 @@ __init static int tracer_alloc_buffers(void)
57585802
else
57595803
ring_buf_size = 1;
57605804

5761-
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5762-
57635805
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
57645806
cpumask_copy(tracing_cpumask, cpu_all_mask);
57655807

57665808
raw_spin_lock_init(&global_trace.start_lock);
57675809

57685810
/* TODO: make the number of buffers hot pluggable with CPUS */
5769-
global_trace.trace_buffer.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
5770-
if (!global_trace.trace_buffer.buffer) {
5811+
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
57715812
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
57725813
WARN_ON(1);
57735814
goto out_free_cpumask;
57745815
}
57755816

5776-
global_trace.trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5777-
5778-
if (!global_trace.trace_buffer.data) {
5779-
printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
5780-
WARN_ON(1);
5781-
goto out_free_cpumask;
5782-
}
5783-
5784-
for_each_tracing_cpu(i) {
5785-
memset(per_cpu_ptr(global_trace.trace_buffer.data, i), 0,
5786-
sizeof(struct trace_array_cpu));
5787-
per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.cpu = i;
5788-
per_cpu_ptr(global_trace.trace_buffer.data, i)->trace_cpu.tr = &global_trace;
5789-
}
5790-
57915817
if (global_trace.buffer_disabled)
57925818
tracing_off();
57935819

5794-
#ifdef CONFIG_TRACER_MAX_TRACE
5795-
global_trace.max_buffer.data = alloc_percpu(struct trace_array_cpu);
5796-
if (!global_trace.max_buffer.data) {
5797-
printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
5798-
WARN_ON(1);
5799-
goto out_free_cpumask;
5800-
}
5801-
global_trace.max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
5802-
if (!global_trace.max_buffer.buffer) {
5803-
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5804-
WARN_ON(1);
5805-
ring_buffer_free(global_trace.trace_buffer.buffer);
5806-
goto out_free_cpumask;
5807-
}
5808-
5809-
for_each_tracing_cpu(i) {
5810-
memset(per_cpu_ptr(global_trace.max_buffer.data, i), 0,
5811-
sizeof(struct trace_array_cpu));
5812-
per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.cpu = i;
5813-
per_cpu_ptr(global_trace.max_buffer.data, i)->trace_cpu.tr = &global_trace;
5814-
}
5815-
#endif
5816-
5817-
/* Allocate the first page for all buffers */
5818-
set_buffer_entries(&global_trace.trace_buffer,
5819-
ring_buffer_size(global_trace.trace_buffer.buffer, 0));
5820-
#ifdef CONFIG_TRACER_MAX_TRACE
5821-
set_buffer_entries(&global_trace.max_buffer, 1);
5822-
#endif
5823-
58245820
trace_init_cmdlines();
58255821

58265822
register_tracer(&nop_trace);

0 commit comments

Comments
 (0)