@@ -2796,6 +2796,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2796
2796
if (nr_pages < 2 )
2797
2797
nr_pages = 2 ;
2798
2798
2799
+ /*
2800
+ * Keep CPUs from coming online while resizing to synchronize
2801
+ * with new per CPU buffers being created.
2802
+ */
2803
+ guard (cpus_read_lock )();
2804
+
2799
2805
/* prevent another thread from changing buffer sizes */
2800
2806
mutex_lock (& buffer -> mutex );
2801
2807
atomic_inc (& buffer -> resizing );
@@ -2840,7 +2846,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2840
2846
cond_resched ();
2841
2847
}
2842
2848
2843
- cpus_read_lock ();
2844
2849
/*
2845
2850
* Fire off all the required work handlers
2846
2851
* We can't schedule on offline CPUs, but it's not necessary
@@ -2880,7 +2885,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2880
2885
cpu_buffer -> nr_pages_to_update = 0 ;
2881
2886
}
2882
2887
2883
- cpus_read_unlock ();
2884
2888
} else {
2885
2889
cpu_buffer = buffer -> buffers [cpu_id ];
2886
2890
@@ -2908,8 +2912,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2908
2912
goto out_err ;
2909
2913
}
2910
2914
2911
- cpus_read_lock ();
2912
-
2913
2915
/* Can't run something on an offline CPU. */
2914
2916
if (!cpu_online (cpu_id ))
2915
2917
rb_update_pages (cpu_buffer );
@@ -2928,7 +2930,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2928
2930
}
2929
2931
2930
2932
cpu_buffer -> nr_pages_to_update = 0 ;
2931
- cpus_read_unlock ();
2932
2933
}
2933
2934
2934
2935
out :
0 commit comments