Skip to content

Commit fc8dffd

Browse files
committed
cpu/hotplug: Convert hotplug locking to percpu rwsem
There are no more (known) nested calls to get_online_cpus() and all observed lock ordering problems have been addressed. Replace the magic nested 'rwsem' hackery with a percpu-rwsem. Signed-off-by: Thomas Gleixner <[email protected]> Tested-by: Paul E. McKenney <[email protected]> Acked-by: Ingo Molnar <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Sebastian Siewior <[email protected]> Cc: Steven Rostedt <[email protected]> Link: http://lkml.kernel.org/r/[email protected]
1 parent 5d5dbc4 commit fc8dffd

File tree

2 files changed

+14
-95
lines changed

2 files changed

+14
-95
lines changed

include/linux/cpu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ extern void cpus_write_lock(void);
103103
extern void cpus_write_unlock(void);
104104
extern void cpus_read_lock(void);
105105
extern void cpus_read_unlock(void);
106-
static inline void lockdep_assert_cpus_held(void) { }
106+
extern void lockdep_assert_cpus_held(void);
107107
extern void cpu_hotplug_disable(void);
108108
extern void cpu_hotplug_enable(void);
109109
void clear_tasks_mm_cpumask(int cpu);

kernel/cpu.c

Lines changed: 13 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include <linux/smpboot.h>
2828
#include <linux/relay.h>
2929
#include <linux/slab.h>
30+
#include <linux/percpu-rwsem.h>
3031

3132
#include <trace/events/power.h>
3233
#define CREATE_TRACE_POINTS
@@ -196,121 +197,41 @@ void cpu_maps_update_done(void)
196197
mutex_unlock(&cpu_add_remove_lock);
197198
}
198199

199-
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
200+
/*
201+
* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
200202
* Should always be manipulated under cpu_add_remove_lock
201203
*/
202204
static int cpu_hotplug_disabled;
203205

204206
#ifdef CONFIG_HOTPLUG_CPU
205207

206-
static struct {
207-
struct task_struct *active_writer;
208-
/* wait queue to wake up the active_writer */
209-
wait_queue_head_t wq;
210-
/* verifies that no writer will get active while readers are active */
211-
struct mutex lock;
212-
/*
213-
* Also blocks the new readers during
214-
* an ongoing cpu hotplug operation.
215-
*/
216-
atomic_t refcount;
217-
218-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
219-
struct lockdep_map dep_map;
220-
#endif
221-
} cpu_hotplug = {
222-
.active_writer = NULL,
223-
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
224-
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
225-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
226-
.dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
227-
#endif
228-
};
229-
230-
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
231-
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
232-
#define cpuhp_lock_acquire_tryread() \
233-
lock_map_acquire_tryread(&cpu_hotplug.dep_map)
234-
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
235-
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
236-
208+
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
237209

238210
void cpus_read_lock(void)
239211
{
240-
might_sleep();
241-
if (cpu_hotplug.active_writer == current)
242-
return;
243-
cpuhp_lock_acquire_read();
244-
mutex_lock(&cpu_hotplug.lock);
245-
atomic_inc(&cpu_hotplug.refcount);
246-
mutex_unlock(&cpu_hotplug.lock);
212+
percpu_down_read(&cpu_hotplug_lock);
247213
}
248214
EXPORT_SYMBOL_GPL(cpus_read_lock);
249215

250216
void cpus_read_unlock(void)
251217
{
252-
int refcount;
253-
254-
if (cpu_hotplug.active_writer == current)
255-
return;
256-
257-
refcount = atomic_dec_return(&cpu_hotplug.refcount);
258-
if (WARN_ON(refcount < 0)) /* try to fix things up */
259-
atomic_inc(&cpu_hotplug.refcount);
260-
261-
if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
262-
wake_up(&cpu_hotplug.wq);
263-
264-
cpuhp_lock_release();
265-
218+
percpu_up_read(&cpu_hotplug_lock);
266219
}
267220
EXPORT_SYMBOL_GPL(cpus_read_unlock);
268221

269-
/*
270-
* This ensures that the hotplug operation can begin only when the
271-
* refcount goes to zero.
272-
*
273-
* Note that during a cpu-hotplug operation, the new readers, if any,
274-
* will be blocked by the cpu_hotplug.lock
275-
*
276-
* Since cpu_hotplug_begin() is always called after invoking
277-
* cpu_maps_update_begin(), we can be sure that only one writer is active.
278-
*
279-
* Note that theoretically, there is a possibility of a livelock:
280-
* - Refcount goes to zero, last reader wakes up the sleeping
281-
* writer.
282-
* - Last reader unlocks the cpu_hotplug.lock.
283-
* - A new reader arrives at this moment, bumps up the refcount.
284-
* - The writer acquires the cpu_hotplug.lock finds the refcount
285-
* non zero and goes to sleep again.
286-
*
287-
* However, this is very difficult to achieve in practice since
288-
* get_online_cpus() not an api which is called all that often.
289-
*
290-
*/
291222
void cpus_write_lock(void)
292223
{
293-
DEFINE_WAIT(wait);
294-
295-
cpu_hotplug.active_writer = current;
296-
cpuhp_lock_acquire();
297-
298-
for (;;) {
299-
mutex_lock(&cpu_hotplug.lock);
300-
prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
301-
if (likely(!atomic_read(&cpu_hotplug.refcount)))
302-
break;
303-
mutex_unlock(&cpu_hotplug.lock);
304-
schedule();
305-
}
306-
finish_wait(&cpu_hotplug.wq, &wait);
224+
percpu_down_write(&cpu_hotplug_lock);
307225
}
308226

309227
void cpus_write_unlock(void)
310228
{
311-
cpu_hotplug.active_writer = NULL;
312-
mutex_unlock(&cpu_hotplug.lock);
313-
cpuhp_lock_release();
229+
percpu_up_write(&cpu_hotplug_lock);
230+
}
231+
232+
void lockdep_assert_cpus_held(void)
233+
{
234+
percpu_rwsem_assert_held(&cpu_hotplug_lock);
314235
}
315236

316237
/*
@@ -344,8 +265,6 @@ void cpu_hotplug_enable(void)
344265
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
345266
#endif /* CONFIG_HOTPLUG_CPU */
346267

347-
/* Notifier wrappers for transitioning to state machine */
348-
349268
static int bringup_wait_for_ap(unsigned int cpu)
350269
{
351270
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);

0 commit comments

Comments
 (0)