Skip to content

Commit 7170509

Browse files
author
Peter Zijlstra
committed
sched: Simplify sched_core_cpu_{starting,deactivate}()
Use guards to reduce gotos and simplify control flow. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Valentin Schneider <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent b4e1fa1 commit 7170509

File tree

1 file changed

+12
-15
lines changed

1 file changed

+12
-15
lines changed

kernel/sched/core.c

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -6400,20 +6400,24 @@ static void queue_core_balance(struct rq *rq)
64006400
queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
64016401
}
64026402

6403+
DEFINE_LOCK_GUARD_1(core_lock, int,
6404+
sched_core_lock(*_T->lock, &_T->flags),
6405+
sched_core_unlock(*_T->lock, &_T->flags),
6406+
unsigned long flags)
6407+
64036408
static void sched_core_cpu_starting(unsigned int cpu)
64046409
{
64056410
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
64066411
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6407-
unsigned long flags;
64086412
int t;
64096413

6410-
sched_core_lock(cpu, &flags);
6414+
guard(core_lock)(&cpu);
64116415

64126416
WARN_ON_ONCE(rq->core != rq);
64136417

64146418
/* if we're the first, we'll be our own leader */
64156419
if (cpumask_weight(smt_mask) == 1)
6416-
goto unlock;
6420+
return;
64176421

64186422
/* find the leader */
64196423
for_each_cpu(t, smt_mask) {
@@ -6427,7 +6431,7 @@ static void sched_core_cpu_starting(unsigned int cpu)
64276431
}
64286432

64296433
if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6430-
goto unlock;
6434+
return;
64316435

64326436
/* install and validate core_rq */
64336437
for_each_cpu(t, smt_mask) {
@@ -6438,29 +6442,25 @@ static void sched_core_cpu_starting(unsigned int cpu)
64386442

64396443
WARN_ON_ONCE(rq->core != core_rq);
64406444
}
6441-
6442-
unlock:
6443-
sched_core_unlock(cpu, &flags);
64446445
}
64456446

64466447
static void sched_core_cpu_deactivate(unsigned int cpu)
64476448
{
64486449
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
64496450
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6450-
unsigned long flags;
64516451
int t;
64526452

6453-
sched_core_lock(cpu, &flags);
6453+
guard(core_lock)(&cpu);
64546454

64556455
/* if we're the last man standing, nothing to do */
64566456
if (cpumask_weight(smt_mask) == 1) {
64576457
WARN_ON_ONCE(rq->core != rq);
6458-
goto unlock;
6458+
return;
64596459
}
64606460

64616461
/* if we're not the leader, nothing to do */
64626462
if (rq->core != rq)
6463-
goto unlock;
6463+
return;
64646464

64656465
/* find a new leader */
64666466
for_each_cpu(t, smt_mask) {
@@ -6471,7 +6471,7 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
64716471
}
64726472

64736473
if (WARN_ON_ONCE(!core_rq)) /* impossible */
6474-
goto unlock;
6474+
return;
64756475

64766476
/* copy the shared state to the new leader */
64776477
core_rq->core_task_seq = rq->core_task_seq;
@@ -6493,9 +6493,6 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
64936493
rq = cpu_rq(t);
64946494
rq->core = core_rq;
64956495
}
6496-
6497-
unlock:
6498-
sched_core_unlock(cpu, &flags);
64996496
}
65006497

65016498
static inline void sched_core_cpu_dying(unsigned int cpu)

0 commit comments

Comments
 (0)