|
27 | 27 | #include <linux/smpboot.h>
|
28 | 28 | #include <linux/relay.h>
|
29 | 29 | #include <linux/slab.h>
|
| 30 | +#include <linux/percpu-rwsem.h> |
30 | 31 |
|
31 | 32 | #include <trace/events/power.h>
|
32 | 33 | #define CREATE_TRACE_POINTS
|
@@ -196,121 +197,41 @@ void cpu_maps_update_done(void)
|
196 | 197 | mutex_unlock(&cpu_add_remove_lock);
|
197 | 198 | }
|
198 | 199 |
|
199 |
| -/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
| 200 | +/* |
| 201 | + * If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
200 | 202 | * Should always be manipulated under cpu_add_remove_lock
|
201 | 203 | */
|
202 | 204 | static int cpu_hotplug_disabled;
|
203 | 205 |
|
204 | 206 | #ifdef CONFIG_HOTPLUG_CPU
|
205 | 207 |
|
206 |
| -static struct { |
207 |
| - struct task_struct *active_writer; |
208 |
| - /* wait queue to wake up the active_writer */ |
209 |
| - wait_queue_head_t wq; |
210 |
| - /* verifies that no writer will get active while readers are active */ |
211 |
| - struct mutex lock; |
212 |
| - /* |
213 |
| - * Also blocks the new readers during |
214 |
| - * an ongoing cpu hotplug operation. |
215 |
| - */ |
216 |
| - atomic_t refcount; |
217 |
| - |
218 |
| -#ifdef CONFIG_DEBUG_LOCK_ALLOC |
219 |
| - struct lockdep_map dep_map; |
220 |
| -#endif |
221 |
| -} cpu_hotplug = { |
222 |
| - .active_writer = NULL, |
223 |
| - .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), |
224 |
| - .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), |
225 |
| -#ifdef CONFIG_DEBUG_LOCK_ALLOC |
226 |
| - .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map), |
227 |
| -#endif |
228 |
| -}; |
229 |
| - |
230 |
| -/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ |
231 |
| -#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) |
232 |
| -#define cpuhp_lock_acquire_tryread() \ |
233 |
| - lock_map_acquire_tryread(&cpu_hotplug.dep_map) |
234 |
| -#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) |
235 |
| -#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) |
236 |
| - |
| 208 | +DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); |
237 | 209 |
|
238 | 210 | void cpus_read_lock(void)
|
239 | 211 | {
|
240 |
| - might_sleep(); |
241 |
| - if (cpu_hotplug.active_writer == current) |
242 |
| - return; |
243 |
| - cpuhp_lock_acquire_read(); |
244 |
| - mutex_lock(&cpu_hotplug.lock); |
245 |
| - atomic_inc(&cpu_hotplug.refcount); |
246 |
| - mutex_unlock(&cpu_hotplug.lock); |
| 212 | + percpu_down_read(&cpu_hotplug_lock); |
247 | 213 | }
|
248 | 214 | EXPORT_SYMBOL_GPL(cpus_read_lock);
|
249 | 215 |
|
250 | 216 | void cpus_read_unlock(void)
|
251 | 217 | {
|
252 |
| - int refcount; |
253 |
| - |
254 |
| - if (cpu_hotplug.active_writer == current) |
255 |
| - return; |
256 |
| - |
257 |
| - refcount = atomic_dec_return(&cpu_hotplug.refcount); |
258 |
| - if (WARN_ON(refcount < 0)) /* try to fix things up */ |
259 |
| - atomic_inc(&cpu_hotplug.refcount); |
260 |
| - |
261 |
| - if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) |
262 |
| - wake_up(&cpu_hotplug.wq); |
263 |
| - |
264 |
| - cpuhp_lock_release(); |
265 |
| - |
| 218 | + percpu_up_read(&cpu_hotplug_lock); |
266 | 219 | }
|
267 | 220 | EXPORT_SYMBOL_GPL(cpus_read_unlock);
|
268 | 221 |
|
269 |
| -/* |
270 |
| - * This ensures that the hotplug operation can begin only when the |
271 |
| - * refcount goes to zero. |
272 |
| - * |
273 |
| - * Note that during a cpu-hotplug operation, the new readers, if any, |
274 |
| - * will be blocked by the cpu_hotplug.lock |
275 |
| - * |
276 |
| - * Since cpu_hotplug_begin() is always called after invoking |
277 |
| - * cpu_maps_update_begin(), we can be sure that only one writer is active. |
278 |
| - * |
279 |
| - * Note that theoretically, there is a possibility of a livelock: |
280 |
| - * - Refcount goes to zero, last reader wakes up the sleeping |
281 |
| - * writer. |
282 |
| - * - Last reader unlocks the cpu_hotplug.lock. |
283 |
| - * - A new reader arrives at this moment, bumps up the refcount. |
284 |
| - * - The writer acquires the cpu_hotplug.lock finds the refcount |
285 |
| - * non zero and goes to sleep again. |
286 |
| - * |
287 |
| - * However, this is very difficult to achieve in practice since |
288 |
| - * get_online_cpus() not an api which is called all that often. |
289 |
| - * |
290 |
| - */ |
291 | 222 | void cpus_write_lock(void)
|
292 | 223 | {
|
293 |
| - DEFINE_WAIT(wait); |
294 |
| - |
295 |
| - cpu_hotplug.active_writer = current; |
296 |
| - cpuhp_lock_acquire(); |
297 |
| - |
298 |
| - for (;;) { |
299 |
| - mutex_lock(&cpu_hotplug.lock); |
300 |
| - prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); |
301 |
| - if (likely(!atomic_read(&cpu_hotplug.refcount))) |
302 |
| - break; |
303 |
| - mutex_unlock(&cpu_hotplug.lock); |
304 |
| - schedule(); |
305 |
| - } |
306 |
| - finish_wait(&cpu_hotplug.wq, &wait); |
| 224 | + percpu_down_write(&cpu_hotplug_lock); |
307 | 225 | }
|
308 | 226 |
|
309 | 227 | void cpus_write_unlock(void)
|
310 | 228 | {
|
311 |
| - cpu_hotplug.active_writer = NULL; |
312 |
| - mutex_unlock(&cpu_hotplug.lock); |
313 |
| - cpuhp_lock_release(); |
| 229 | + percpu_up_write(&cpu_hotplug_lock); |
| 230 | +} |
| 231 | + |
| 232 | +void lockdep_assert_cpus_held(void) |
| 233 | +{ |
| 234 | + percpu_rwsem_assert_held(&cpu_hotplug_lock); |
314 | 235 | }
|
315 | 236 |
|
316 | 237 | /*
|
@@ -344,8 +265,6 @@ void cpu_hotplug_enable(void)
|
344 | 265 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
|
345 | 266 | #endif /* CONFIG_HOTPLUG_CPU */
|
346 | 267 |
|
347 |
| -/* Notifier wrappers for transitioning to state machine */ |
348 |
| - |
349 | 268 | static int bringup_wait_for_ap(unsigned int cpu)
|
350 | 269 | {
|
351 | 270 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
0 commit comments