Skip to content

Commit 5f4b55e

Browse files
Peter ZijlstraKAGA-KOKO
authored andcommitted
smp/hotplug: Differentiate the AP-work lockdep class between up and down
With lockdep-crossrelease we get deadlock reports that span cpu-up and cpu-down chains. Such deadlocks cannot possibly happen because cpu-up and cpu-down are globally serialized. CPU0 CPU1 CPU2 cpuhp_up_callbacks: takedown_cpu: cpuhp_thread_fun: cpuhp_state irq_lock_sparse() irq_lock_sparse() wait_for_completion() cpuhp_state complete() Now that we have consistent AP state, we can trivially separate the AP-work class between up and down using st->bringup. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected]
1 parent 724a868 commit 5f4b55e

File tree

1 file changed

+32
-9
lines changed

1 file changed

+32
-9
lines changed

kernel/cpu.c

Lines changed: 32 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -68,9 +68,26 @@ struct cpuhp_cpu_state {
6868
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
6969

7070
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
71-
static struct lock_class_key cpuhp_state_key;
72-
static struct lockdep_map cpuhp_state_lock_map =
73-
STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
71+
static struct lockdep_map cpuhp_state_up_map =
72+
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
73+
static struct lockdep_map cpuhp_state_down_map =
74+
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
75+
76+
77+
static void inline cpuhp_lock_acquire(bool bringup)
78+
{
79+
lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
80+
}
81+
82+
static void inline cpuhp_lock_release(bool bringup)
83+
{
84+
lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
85+
}
86+
#else
87+
88+
static void inline cpuhp_lock_acquire(bool bringup) { }
89+
static void inline cpuhp_lock_release(bool bringup) { }
90+
7491
#endif
7592

7693
/**
@@ -486,7 +503,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
486503
if (WARN_ON_ONCE(!st->should_run))
487504
return;
488505

489-
lock_map_acquire(&cpuhp_state_lock_map);
506+
cpuhp_lock_acquire(bringup);
490507

491508
if (st->single) {
492509
state = st->cb_state;
@@ -537,7 +554,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
537554
}
538555

539556
next:
540-
lock_map_release(&cpuhp_state_lock_map);
557+
cpuhp_lock_release(bringup);
541558

542559
if (!st->should_run)
543560
complete(&st->done);
@@ -554,8 +571,11 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
554571
if (!cpu_online(cpu))
555572
return 0;
556573

557-
lock_map_acquire(&cpuhp_state_lock_map);
558-
lock_map_release(&cpuhp_state_lock_map);
574+
cpuhp_lock_acquire(false);
575+
cpuhp_lock_release(false);
576+
577+
cpuhp_lock_acquire(true);
578+
cpuhp_lock_release(true);
559579

560580
/*
561581
* If we are up and running, use the hotplug thread. For early calls
@@ -593,8 +613,11 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
593613
enum cpuhp_state prev_state = st->state;
594614
int ret;
595615

596-
lock_map_acquire(&cpuhp_state_lock_map);
597-
lock_map_release(&cpuhp_state_lock_map);
616+
cpuhp_lock_acquire(false);
617+
cpuhp_lock_release(false);
618+
619+
cpuhp_lock_acquire(true);
620+
cpuhp_lock_release(true);
598621

599622
trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
600623
ret = cpuhp_kick_ap(st, st->target);

0 commit comments

Comments
 (0)