@@ -710,12 +710,11 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
710
710
711
711
WARN_ON_ONCE (!irqs_disabled ());
712
712
713
- /* If we disabled the tracer, stop now */
714
- if (current_trace == & nop_trace )
715
- return ;
716
-
717
- if (WARN_ON_ONCE (!current_trace -> use_max_tr ))
713
+ if (!current_trace -> allocated_snapshot ) {
714
+ /* Only the nop tracer should hit this when disabling */
715
+ WARN_ON_ONCE (current_trace != & nop_trace );
718
716
return ;
717
+ }
719
718
720
719
arch_spin_lock (& ftrace_max_lock );
721
720
@@ -743,10 +742,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
743
742
return ;
744
743
745
744
WARN_ON_ONCE (!irqs_disabled ());
746
- if (!current_trace -> use_max_tr ) {
747
- WARN_ON_ONCE (1 );
745
+ if (WARN_ON_ONCE (!current_trace -> allocated_snapshot ))
748
746
return ;
749
- }
750
747
751
748
arch_spin_lock (& ftrace_max_lock );
752
749
@@ -866,10 +863,13 @@ int register_tracer(struct tracer *type)
866
863
867
864
current_trace = type ;
868
865
869
- /* If we expanded the buffers, make sure the max is expanded too */
870
- if (ring_buffer_expanded && type -> use_max_tr )
871
- ring_buffer_resize (max_tr .buffer , trace_buf_size ,
872
- RING_BUFFER_ALL_CPUS );
866
+ if (type -> use_max_tr ) {
867
+ /* If we expanded the buffers, make sure the max is expanded too */
868
+ if (ring_buffer_expanded )
869
+ ring_buffer_resize (max_tr .buffer , trace_buf_size ,
870
+ RING_BUFFER_ALL_CPUS );
871
+ type -> allocated_snapshot = true;
872
+ }
873
873
874
874
/* the test is responsible for initializing and enabling */
875
875
pr_info ("Testing tracer %s: " , type -> name );
@@ -885,10 +885,14 @@ int register_tracer(struct tracer *type)
885
885
/* Only reset on passing, to avoid touching corrupted buffers */
886
886
tracing_reset_online_cpus (tr );
887
887
888
- /* Shrink the max buffer again */
889
- if (ring_buffer_expanded && type -> use_max_tr )
890
- ring_buffer_resize (max_tr .buffer , 1 ,
891
- RING_BUFFER_ALL_CPUS );
888
+ if (type -> use_max_tr ) {
889
+ type -> allocated_snapshot = false;
890
+
891
+ /* Shrink the max buffer again */
892
+ if (ring_buffer_expanded )
893
+ ring_buffer_resize (max_tr .buffer , 1 ,
894
+ RING_BUFFER_ALL_CPUS );
895
+ }
892
896
893
897
printk (KERN_CONT "PASSED\n" );
894
898
}
@@ -1964,7 +1968,11 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1964
1968
* iter -> trace = * current_trace ;
1965
1969
mutex_unlock (& trace_types_lock );
1966
1970
1967
- atomic_inc (& trace_record_cmdline_disabled );
1971
+ if (iter -> snapshot && iter -> trace -> use_max_tr )
1972
+ return ERR_PTR (- EBUSY );
1973
+
1974
+ if (!iter -> snapshot )
1975
+ atomic_inc (& trace_record_cmdline_disabled );
1968
1976
1969
1977
if (* pos != iter -> pos ) {
1970
1978
iter -> ent = NULL ;
@@ -2003,7 +2011,11 @@ static void s_stop(struct seq_file *m, void *p)
2003
2011
{
2004
2012
struct trace_iterator * iter = m -> private ;
2005
2013
2006
- atomic_dec (& trace_record_cmdline_disabled );
2014
+ if (iter -> snapshot && iter -> trace -> use_max_tr )
2015
+ return ;
2016
+
2017
+ if (!iter -> snapshot )
2018
+ atomic_dec (& trace_record_cmdline_disabled );
2007
2019
trace_access_unlock (iter -> cpu_file );
2008
2020
trace_event_read_unlock ();
2009
2021
}
@@ -2438,7 +2450,7 @@ static const struct seq_operations tracer_seq_ops = {
2438
2450
};
2439
2451
2440
2452
static struct trace_iterator *
2441
- __tracing_open (struct inode * inode , struct file * file )
2453
+ __tracing_open (struct inode * inode , struct file * file , bool snapshot )
2442
2454
{
2443
2455
long cpu_file = (long ) inode -> i_private ;
2444
2456
struct trace_iterator * iter ;
@@ -2471,10 +2483,11 @@ __tracing_open(struct inode *inode, struct file *file)
2471
2483
if (!zalloc_cpumask_var (& iter -> started , GFP_KERNEL ))
2472
2484
goto fail ;
2473
2485
2474
- if (current_trace && current_trace -> print_max )
2486
+ if (( current_trace && current_trace -> print_max ) || snapshot )
2475
2487
iter -> tr = & max_tr ;
2476
2488
else
2477
2489
iter -> tr = & global_trace ;
2490
+ iter -> snapshot = snapshot ;
2478
2491
iter -> pos = -1 ;
2479
2492
mutex_init (& iter -> mutex );
2480
2493
iter -> cpu_file = cpu_file ;
@@ -2491,8 +2504,9 @@ __tracing_open(struct inode *inode, struct file *file)
2491
2504
if (trace_clocks [trace_clock_id ].in_ns )
2492
2505
iter -> iter_flags |= TRACE_FILE_TIME_IN_NS ;
2493
2506
2494
- /* stop the trace while dumping */
2495
- tracing_stop ();
2507
+ /* stop the trace while dumping if we are not opening "snapshot" */
2508
+ if (!iter -> snapshot )
2509
+ tracing_stop ();
2496
2510
2497
2511
if (iter -> cpu_file == TRACE_PIPE_ALL_CPU ) {
2498
2512
for_each_tracing_cpu (cpu ) {
@@ -2555,8 +2569,9 @@ static int tracing_release(struct inode *inode, struct file *file)
2555
2569
if (iter -> trace && iter -> trace -> close )
2556
2570
iter -> trace -> close (iter );
2557
2571
2558
- /* reenable tracing if it was previously enabled */
2559
- tracing_start ();
2572
+ if (!iter -> snapshot )
2573
+ /* reenable tracing if it was previously enabled */
2574
+ tracing_start ();
2560
2575
mutex_unlock (& trace_types_lock );
2561
2576
2562
2577
mutex_destroy (& iter -> mutex );
@@ -2584,7 +2599,7 @@ static int tracing_open(struct inode *inode, struct file *file)
2584
2599
}
2585
2600
2586
2601
if (file -> f_mode & FMODE_READ ) {
2587
- iter = __tracing_open (inode , file );
2602
+ iter = __tracing_open (inode , file , false );
2588
2603
if (IS_ERR (iter ))
2589
2604
ret = PTR_ERR (iter );
2590
2605
else if (trace_flags & TRACE_ITER_LATENCY_FMT )
@@ -3219,7 +3234,7 @@ static int tracing_set_tracer(const char *buf)
3219
3234
if (current_trace && current_trace -> reset )
3220
3235
current_trace -> reset (tr );
3221
3236
3222
- had_max_tr = current_trace && current_trace -> use_max_tr ;
3237
+ had_max_tr = current_trace && current_trace -> allocated_snapshot ;
3223
3238
current_trace = & nop_trace ;
3224
3239
3225
3240
if (had_max_tr && !t -> use_max_tr ) {
@@ -3238,6 +3253,8 @@ static int tracing_set_tracer(const char *buf)
3238
3253
*/
3239
3254
ring_buffer_resize (max_tr .buffer , 1 , RING_BUFFER_ALL_CPUS );
3240
3255
set_buffer_entries (& max_tr , 1 );
3256
+ tracing_reset_online_cpus (& max_tr );
3257
+ current_trace -> allocated_snapshot = false;
3241
3258
}
3242
3259
destroy_trace_option_files (topts );
3243
3260
@@ -3248,6 +3265,7 @@ static int tracing_set_tracer(const char *buf)
3248
3265
RING_BUFFER_ALL_CPUS );
3249
3266
if (ret < 0 )
3250
3267
goto out ;
3268
+ t -> allocated_snapshot = true;
3251
3269
}
3252
3270
3253
3271
if (t -> init ) {
@@ -4066,6 +4084,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
4066
4084
return single_open (file , tracing_clock_show , NULL );
4067
4085
}
4068
4086
4087
+ #ifdef CONFIG_TRACER_SNAPSHOT
4088
+ static int tracing_snapshot_open (struct inode * inode , struct file * file )
4089
+ {
4090
+ struct trace_iterator * iter ;
4091
+ int ret = 0 ;
4092
+
4093
+ if (file -> f_mode & FMODE_READ ) {
4094
+ iter = __tracing_open (inode , file , true);
4095
+ if (IS_ERR (iter ))
4096
+ ret = PTR_ERR (iter );
4097
+ }
4098
+ return ret ;
4099
+ }
4100
+
4101
+ static ssize_t
4102
+ tracing_snapshot_write (struct file * filp , const char __user * ubuf , size_t cnt ,
4103
+ loff_t * ppos )
4104
+ {
4105
+ unsigned long val ;
4106
+ int ret ;
4107
+
4108
+ ret = tracing_update_buffers ();
4109
+ if (ret < 0 )
4110
+ return ret ;
4111
+
4112
+ ret = kstrtoul_from_user (ubuf , cnt , 10 , & val );
4113
+ if (ret )
4114
+ return ret ;
4115
+
4116
+ mutex_lock (& trace_types_lock );
4117
+
4118
+ if (current_trace && current_trace -> use_max_tr ) {
4119
+ ret = - EBUSY ;
4120
+ goto out ;
4121
+ }
4122
+
4123
+ switch (val ) {
4124
+ case 0 :
4125
+ if (current_trace -> allocated_snapshot ) {
4126
+ /* free spare buffer */
4127
+ ring_buffer_resize (max_tr .buffer , 1 ,
4128
+ RING_BUFFER_ALL_CPUS );
4129
+ set_buffer_entries (& max_tr , 1 );
4130
+ tracing_reset_online_cpus (& max_tr );
4131
+ current_trace -> allocated_snapshot = false;
4132
+ }
4133
+ break ;
4134
+ case 1 :
4135
+ if (!current_trace -> allocated_snapshot ) {
4136
+ /* allocate spare buffer */
4137
+ ret = resize_buffer_duplicate_size (& max_tr ,
4138
+ & global_trace , RING_BUFFER_ALL_CPUS );
4139
+ if (ret < 0 )
4140
+ break ;
4141
+ current_trace -> allocated_snapshot = true;
4142
+ }
4143
+
4144
+ local_irq_disable ();
4145
+ /* Now, we're going to swap */
4146
+ update_max_tr (& global_trace , current , smp_processor_id ());
4147
+ local_irq_enable ();
4148
+ break ;
4149
+ default :
4150
+ if (current_trace -> allocated_snapshot )
4151
+ tracing_reset_online_cpus (& max_tr );
4152
+ else
4153
+ ret = - EINVAL ;
4154
+ break ;
4155
+ }
4156
+
4157
+ if (ret >= 0 ) {
4158
+ * ppos += cnt ;
4159
+ ret = cnt ;
4160
+ }
4161
+ out :
4162
+ mutex_unlock (& trace_types_lock );
4163
+ return ret ;
4164
+ }
4165
+ #endif /* CONFIG_TRACER_SNAPSHOT */
4166
+
4167
+
4069
4168
static const struct file_operations tracing_max_lat_fops = {
4070
4169
.open = tracing_open_generic ,
4071
4170
.read = tracing_max_lat_read ,
@@ -4122,6 +4221,16 @@ static const struct file_operations trace_clock_fops = {
4122
4221
.write = tracing_clock_write ,
4123
4222
};
4124
4223
4224
+ #ifdef CONFIG_TRACER_SNAPSHOT
4225
+ static const struct file_operations snapshot_fops = {
4226
+ .open = tracing_snapshot_open ,
4227
+ .read = seq_read ,
4228
+ .write = tracing_snapshot_write ,
4229
+ .llseek = tracing_seek ,
4230
+ .release = tracing_release ,
4231
+ };
4232
+ #endif /* CONFIG_TRACER_SNAPSHOT */
4233
+
4125
4234
struct ftrace_buffer_info {
4126
4235
struct trace_array * tr ;
4127
4236
void * spare ;
@@ -4921,6 +5030,11 @@ static __init int tracer_init_debugfs(void)
4921
5030
& ftrace_update_tot_cnt , & tracing_dyn_info_fops );
4922
5031
#endif
4923
5032
5033
+ #ifdef CONFIG_TRACER_SNAPSHOT
5034
+ trace_create_file ("snapshot" , 0644 , d_tracer ,
5035
+ (void * ) TRACE_PIPE_ALL_CPU , & snapshot_fops );
5036
+ #endif
5037
+
4924
5038
create_trace_options_dir ();
4925
5039
4926
5040
for_each_tracing_cpu (cpu )
0 commit comments