Skip to content

Commit 31cb1bc

Browse files
rodrigosiqueiraIngo Molnar
authored andcommitted
sched/core: Rework and clarify prepare_lock_switch()
The prepare_lock_switch() function has an unused parameter, and also the function name was not descriptive. To improve readability and remove the extra parameter, do the following changes: * Move prepare_lock_switch() from kernel/sched/sched.h to kernel/sched/core.c, rename it to prepare_task(), and remove the unused parameter. * Split the smp_store_release() out from finish_lock_switch() to a function named finish_task. * Comments ajdustments. Signed-off-by: Rodrigo Siqueira <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent cb1f34d commit 31cb1bc

File tree

2 files changed

+49
-45
lines changed

2 files changed

+49
-45
lines changed

kernel/sched/core.c

Lines changed: 49 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2045,7 +2045,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
20452045
* If the owning (remote) CPU is still in the middle of schedule() with
20462046
* this task as prev, wait until its done referencing the task.
20472047
*
2048-
* Pairs with the smp_store_release() in finish_lock_switch().
2048+
* Pairs with the smp_store_release() in finish_task().
20492049
*
20502050
* This ensures that tasks getting woken will be fully ordered against
20512051
* their previous state and preserve Program Order.
@@ -2571,6 +2571,50 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
25712571

25722572
#endif /* CONFIG_PREEMPT_NOTIFIERS */
25732573

2574+
static inline void prepare_task(struct task_struct *next)
2575+
{
2576+
#ifdef CONFIG_SMP
2577+
/*
2578+
* Claim the task as running, we do this before switching to it
2579+
* such that any running task will have this set.
2580+
*/
2581+
next->on_cpu = 1;
2582+
#endif
2583+
}
2584+
2585+
static inline void finish_task(struct task_struct *prev)
2586+
{
2587+
#ifdef CONFIG_SMP
2588+
/*
2589+
* After ->on_cpu is cleared, the task can be moved to a different CPU.
2590+
* We must ensure this doesn't happen until the switch is completely
2591+
* finished.
2592+
*
2593+
* In particular, the load of prev->state in finish_task_switch() must
2594+
* happen before this.
2595+
*
2596+
* Pairs with the smp_cond_load_acquire() in try_to_wake_up().
2597+
*/
2598+
smp_store_release(&prev->on_cpu, 0);
2599+
#endif
2600+
}
2601+
2602+
static inline void finish_lock_switch(struct rq *rq)
2603+
{
2604+
#ifdef CONFIG_DEBUG_SPINLOCK
2605+
/* this is a valid case when another task releases the spinlock */
2606+
rq->lock.owner = current;
2607+
#endif
2608+
/*
2609+
* If we are tracking spinlock dependencies then we have to
2610+
* fix up the runqueue lock - which gets 'carried over' from
2611+
* prev into current:
2612+
*/
2613+
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
2614+
2615+
raw_spin_unlock_irq(&rq->lock);
2616+
}
2617+
25742618
/**
25752619
* prepare_task_switch - prepare to switch tasks
25762620
* @rq: the runqueue preparing to switch
@@ -2591,7 +2635,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
25912635
sched_info_switch(rq, prev, next);
25922636
perf_event_task_sched_out(prev, next);
25932637
fire_sched_out_preempt_notifiers(prev, next);
2594-
prepare_lock_switch(rq, next);
2638+
prepare_task(next);
25952639
prepare_arch_switch(next);
25962640
}
25972641

@@ -2646,7 +2690,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
26462690
* the scheduled task must drop that reference.
26472691
*
26482692
* We must observe prev->state before clearing prev->on_cpu (in
2649-
* finish_lock_switch), otherwise a concurrent wakeup can get prev
2693+
* finish_task), otherwise a concurrent wakeup can get prev
26502694
* running on another CPU and we could rave with its RUNNING -> DEAD
26512695
* transition, resulting in a double drop.
26522696
*/
@@ -2663,7 +2707,8 @@ static struct rq *finish_task_switch(struct task_struct *prev)
26632707
* to use.
26642708
*/
26652709
smp_mb__after_unlock_lock();
2666-
finish_lock_switch(rq, prev);
2710+
finish_task(prev);
2711+
finish_lock_switch(rq);
26672712
finish_arch_post_lock_switch();
26682713

26692714
fire_sched_in_preempt_notifiers(current);

kernel/sched/sched.h

Lines changed: 0 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1328,47 +1328,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
13281328
# define finish_arch_post_lock_switch() do { } while (0)
13291329
#endif
13301330

1331-
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1332-
{
1333-
#ifdef CONFIG_SMP
1334-
/*
1335-
* We can optimise this out completely for !SMP, because the
1336-
* SMP rebalancing from interrupt is the only thing that cares
1337-
* here.
1338-
*/
1339-
next->on_cpu = 1;
1340-
#endif
1341-
}
1342-
1343-
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1344-
{
1345-
#ifdef CONFIG_SMP
1346-
/*
1347-
* After ->on_cpu is cleared, the task can be moved to a different CPU.
1348-
* We must ensure this doesn't happen until the switch is completely
1349-
* finished.
1350-
*
1351-
* In particular, the load of prev->state in finish_task_switch() must
1352-
* happen before this.
1353-
*
1354-
* Pairs with the smp_cond_load_acquire() in try_to_wake_up().
1355-
*/
1356-
smp_store_release(&prev->on_cpu, 0);
1357-
#endif
1358-
#ifdef CONFIG_DEBUG_SPINLOCK
1359-
/* this is a valid case when another task releases the spinlock */
1360-
rq->lock.owner = current;
1361-
#endif
1362-
/*
1363-
* If we are tracking spinlock dependencies then we have to
1364-
* fix up the runqueue lock - which gets 'carried over' from
1365-
* prev into current:
1366-
*/
1367-
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1368-
1369-
raw_spin_unlock_irq(&rq->lock);
1370-
}
1371-
13721331
/*
13731332
* wake flags
13741333
*/

0 commit comments

Comments
 (0)