@@ -599,7 +599,7 @@ static inline int cpu_of(struct rq *rq)
599
599
* Return the group to which this tasks belongs.
600
600
*
601
601
* We use task_subsys_state_check() and extend the RCU verification
602
- * with lockdep_is_held(&task_rq(p)->lock ) because cpu_cgroup_attach()
602
+ * with lockdep_is_held(&p->pi_lock ) because cpu_cgroup_attach()
603
603
* holds that lock for each task it moves into the cgroup. Therefore
604
604
* by holding that lock, we pin the task to the current cgroup.
605
605
*/
@@ -609,7 +609,7 @@ static inline struct task_group *task_group(struct task_struct *p)
609
609
struct cgroup_subsys_state * css ;
610
610
611
611
css = task_subsys_state_check (p , cpu_cgroup_subsys_id ,
612
- lockdep_is_held (& task_rq ( p ) -> lock ));
612
+ lockdep_is_held (& p -> pi_lock ));
613
613
tg = container_of (css , struct task_group , css );
614
614
615
615
return autogroup_task_group (p , tg );
@@ -924,23 +924,15 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
924
924
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
925
925
926
926
/*
927
- * Check whether the task is waking, we use this to synchronize ->cpus_allowed
928
- * against ttwu().
929
- */
930
- static inline int task_is_waking (struct task_struct * p )
931
- {
932
- return unlikely (p -> state == TASK_WAKING );
933
- }
934
-
935
- /*
936
- * __task_rq_lock - lock the runqueue a given task resides on.
937
- * Must be called interrupts disabled.
927
+ * __task_rq_lock - lock the rq @p resides on.
938
928
*/
939
929
static inline struct rq * __task_rq_lock (struct task_struct * p )
940
930
__acquires (rq - > lock )
941
931
{
942
932
struct rq * rq ;
943
933
934
+ lockdep_assert_held (& p -> pi_lock );
935
+
944
936
for (;;) {
945
937
rq = task_rq (p );
946
938
raw_spin_lock (& rq -> lock );
@@ -951,22 +943,22 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
951
943
}
952
944
953
945
/*
954
- * task_rq_lock - lock the runqueue a given task resides on and disable
955
- * interrupts. Note the ordering: we can safely lookup the task_rq without
956
- * explicitly disabling preemption.
946
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
957
947
*/
958
948
static struct rq * task_rq_lock (struct task_struct * p , unsigned long * flags )
949
+ __acquires (p - > pi_lock )
959
950
__acquires (rq - > lock )
960
951
{
961
952
struct rq * rq ;
962
953
963
954
for (;;) {
964
- local_irq_save ( * flags );
955
+ raw_spin_lock_irqsave ( & p -> pi_lock , * flags );
965
956
rq = task_rq (p );
966
957
raw_spin_lock (& rq -> lock );
967
958
if (likely (rq == task_rq (p )))
968
959
return rq ;
969
- raw_spin_unlock_irqrestore (& rq -> lock , * flags );
960
+ raw_spin_unlock (& rq -> lock );
961
+ raw_spin_unlock_irqrestore (& p -> pi_lock , * flags );
970
962
}
971
963
}
972
964
@@ -976,10 +968,13 @@ static void __task_rq_unlock(struct rq *rq)
976
968
raw_spin_unlock (& rq -> lock );
977
969
}
978
970
979
- static inline void task_rq_unlock (struct rq * rq , unsigned long * flags )
971
+ static inline void
972
+ task_rq_unlock (struct rq * rq , struct task_struct * p , unsigned long * flags )
980
973
__releases (rq - > lock )
974
+ __releases (p - > pi_lock )
981
975
{
982
- raw_spin_unlock_irqrestore (& rq -> lock , * flags );
976
+ raw_spin_unlock (& rq -> lock );
977
+ raw_spin_unlock_irqrestore (& p -> pi_lock , * flags );
983
978
}
984
979
985
980
/*
@@ -2175,6 +2170,11 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2175
2170
*/
2176
2171
WARN_ON_ONCE (p -> state != TASK_RUNNING && p -> state != TASK_WAKING &&
2177
2172
!(task_thread_info (p )-> preempt_count & PREEMPT_ACTIVE ));
2173
+
2174
+ #ifdef CONFIG_LOCKDEP
2175
+ WARN_ON_ONCE (debug_locks && !(lockdep_is_held (& p -> pi_lock ) ||
2176
+ lockdep_is_held (& task_rq (p )-> lock )));
2177
+ #endif
2178
2178
#endif
2179
2179
2180
2180
trace_sched_migrate_task (p , new_cpu );
@@ -2270,7 +2270,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2270
2270
ncsw = 0 ;
2271
2271
if (!match_state || p -> state == match_state )
2272
2272
ncsw = p -> nvcsw | LONG_MIN ; /* sets MSB */
2273
- task_rq_unlock (rq , & flags );
2273
+ task_rq_unlock (rq , p , & flags );
2274
2274
2275
2275
/*
2276
2276
* If it changed from the expected state, bail out now.
@@ -2652,6 +2652,7 @@ static void __sched_fork(struct task_struct *p)
2652
2652
*/
2653
2653
void sched_fork (struct task_struct * p , int clone_flags )
2654
2654
{
2655
+ unsigned long flags ;
2655
2656
int cpu = get_cpu ();
2656
2657
2657
2658
__sched_fork (p );
@@ -2702,9 +2703,9 @@ void sched_fork(struct task_struct *p, int clone_flags)
2702
2703
*
2703
2704
* Silence PROVE_RCU.
2704
2705
*/
2705
- rcu_read_lock ( );
2706
+ raw_spin_lock_irqsave ( & p -> pi_lock , flags );
2706
2707
set_task_cpu (p , cpu );
2707
- rcu_read_unlock ( );
2708
+ raw_spin_unlock_irqrestore ( & p -> pi_lock , flags );
2708
2709
2709
2710
#if defined(CONFIG_SCHEDSTATS ) || defined(CONFIG_TASK_DELAY_ACCT )
2710
2711
if (likely (sched_info_on ()))
@@ -2753,7 +2754,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2753
2754
set_task_cpu (p , cpu );
2754
2755
2755
2756
p -> state = TASK_RUNNING ;
2756
- task_rq_unlock (rq , & flags );
2757
+ task_rq_unlock (rq , p , & flags );
2757
2758
#endif
2758
2759
2759
2760
rq = task_rq_lock (p , & flags );
@@ -2765,7 +2766,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2765
2766
if (p -> sched_class -> task_woken )
2766
2767
p -> sched_class -> task_woken (rq , p );
2767
2768
#endif
2768
- task_rq_unlock (rq , & flags );
2769
+ task_rq_unlock (rq , p , & flags );
2769
2770
put_cpu ();
2770
2771
}
2771
2772
@@ -3490,12 +3491,12 @@ void sched_exec(void)
3490
3491
likely (cpu_active (dest_cpu )) && need_migrate_task (p )) {
3491
3492
struct migration_arg arg = { p , dest_cpu };
3492
3493
3493
- task_rq_unlock (rq , & flags );
3494
+ task_rq_unlock (rq , p , & flags );
3494
3495
stop_one_cpu (cpu_of (rq ), migration_cpu_stop , & arg );
3495
3496
return ;
3496
3497
}
3497
3498
unlock :
3498
- task_rq_unlock (rq , & flags );
3499
+ task_rq_unlock (rq , p , & flags );
3499
3500
}
3500
3501
3501
3502
#endif
@@ -3532,7 +3533,7 @@ unsigned long long task_delta_exec(struct task_struct *p)
3532
3533
3533
3534
rq = task_rq_lock (p , & flags );
3534
3535
ns = do_task_delta_exec (p , rq );
3535
- task_rq_unlock (rq , & flags );
3536
+ task_rq_unlock (rq , p , & flags );
3536
3537
3537
3538
return ns ;
3538
3539
}
@@ -3550,7 +3551,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
3550
3551
3551
3552
rq = task_rq_lock (p , & flags );
3552
3553
ns = p -> se .sum_exec_runtime + do_task_delta_exec (p , rq );
3553
- task_rq_unlock (rq , & flags );
3554
+ task_rq_unlock (rq , p , & flags );
3554
3555
3555
3556
return ns ;
3556
3557
}
@@ -3574,7 +3575,7 @@ unsigned long long thread_group_sched_runtime(struct task_struct *p)
3574
3575
rq = task_rq_lock (p , & flags );
3575
3576
thread_group_cputime (p , & totals );
3576
3577
ns = totals .sum_exec_runtime + do_task_delta_exec (p , rq );
3577
- task_rq_unlock (rq , & flags );
3578
+ task_rq_unlock (rq , p , & flags );
3578
3579
3579
3580
return ns ;
3580
3581
}
@@ -4693,16 +4694,13 @@ EXPORT_SYMBOL(sleep_on_timeout);
4693
4694
*/
4694
4695
void rt_mutex_setprio (struct task_struct * p , int prio )
4695
4696
{
4696
- unsigned long flags ;
4697
4697
int oldprio , on_rq , running ;
4698
4698
struct rq * rq ;
4699
4699
const struct sched_class * prev_class ;
4700
4700
4701
4701
BUG_ON (prio < 0 || prio > MAX_PRIO );
4702
4702
4703
- lockdep_assert_held (& p -> pi_lock );
4704
-
4705
- rq = task_rq_lock (p , & flags );
4703
+ rq = __task_rq_lock (p );
4706
4704
4707
4705
trace_sched_pi_setprio (p , prio );
4708
4706
oldprio = p -> prio ;
@@ -4727,7 +4725,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4727
4725
enqueue_task (rq , p , oldprio < prio ? ENQUEUE_HEAD : 0 );
4728
4726
4729
4727
check_class_changed (rq , p , prev_class , oldprio );
4730
- task_rq_unlock (rq , & flags );
4728
+ __task_rq_unlock (rq );
4731
4729
}
4732
4730
4733
4731
#endif
@@ -4775,7 +4773,7 @@ void set_user_nice(struct task_struct *p, long nice)
4775
4773
resched_task (rq -> curr );
4776
4774
}
4777
4775
out_unlock :
4778
- task_rq_unlock (rq , & flags );
4776
+ task_rq_unlock (rq , p , & flags );
4779
4777
}
4780
4778
EXPORT_SYMBOL (set_user_nice );
4781
4779
@@ -5003,20 +5001,17 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
5003
5001
/*
5004
5002
* make sure no PI-waiters arrive (or leave) while we are
5005
5003
* changing the priority of the task:
5006
- */
5007
- raw_spin_lock_irqsave (& p -> pi_lock , flags );
5008
- /*
5004
+ *
5009
5005
* To be able to change p->policy safely, the appropriate
5010
5006
* runqueue lock must be held.
5011
5007
*/
5012
- rq = __task_rq_lock ( p );
5008
+ rq = task_rq_lock ( p , & flags );
5013
5009
5014
5010
/*
5015
5011
* Changing the policy of the stop threads its a very bad idea
5016
5012
*/
5017
5013
if (p == rq -> stop ) {
5018
- __task_rq_unlock (rq );
5019
- raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
5014
+ task_rq_unlock (rq , p , & flags );
5020
5015
return - EINVAL ;
5021
5016
}
5022
5017
@@ -5040,8 +5035,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
5040
5035
if (rt_bandwidth_enabled () && rt_policy (policy ) &&
5041
5036
task_group (p )-> rt_bandwidth .rt_runtime == 0 &&
5042
5037
!task_group_is_autogroup (task_group (p ))) {
5043
- __task_rq_unlock (rq );
5044
- raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
5038
+ task_rq_unlock (rq , p , & flags );
5045
5039
return - EPERM ;
5046
5040
}
5047
5041
}
@@ -5050,8 +5044,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
5050
5044
/* recheck policy now with rq lock held */
5051
5045
if (unlikely (oldpolicy != -1 && oldpolicy != p -> policy )) {
5052
5046
policy = oldpolicy = -1 ;
5053
- __task_rq_unlock (rq );
5054
- raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
5047
+ task_rq_unlock (rq , p , & flags );
5055
5048
goto recheck ;
5056
5049
}
5057
5050
on_rq = p -> on_rq ;
@@ -5073,8 +5066,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
5073
5066
activate_task (rq , p , 0 );
5074
5067
5075
5068
check_class_changed (rq , p , prev_class , oldprio );
5076
- __task_rq_unlock (rq );
5077
- raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
5069
+ task_rq_unlock (rq , p , & flags );
5078
5070
5079
5071
rt_mutex_adjust_pi (p );
5080
5072
@@ -5666,7 +5658,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5666
5658
5667
5659
rq = task_rq_lock (p , & flags );
5668
5660
time_slice = p -> sched_class -> get_rr_interval (rq , p );
5669
- task_rq_unlock (rq , & flags );
5661
+ task_rq_unlock (rq , p , & flags );
5670
5662
5671
5663
rcu_read_unlock ();
5672
5664
jiffies_to_timespec (time_slice , & t );
@@ -5889,8 +5881,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5889
5881
unsigned int dest_cpu ;
5890
5882
int ret = 0 ;
5891
5883
5892
- raw_spin_lock_irqsave (& p -> pi_lock , flags );
5893
- rq = __task_rq_lock (p );
5884
+ rq = task_rq_lock (p , & flags );
5894
5885
5895
5886
if (!cpumask_intersects (new_mask , cpu_active_mask )) {
5896
5887
ret = - EINVAL ;
@@ -5918,15 +5909,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5918
5909
if (need_migrate_task (p )) {
5919
5910
struct migration_arg arg = { p , dest_cpu };
5920
5911
/* Need help from migration thread: drop lock and wait. */
5921
- __task_rq_unlock (rq );
5922
- raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
5912
+ task_rq_unlock (rq , p , & flags );
5923
5913
stop_one_cpu (cpu_of (rq ), migration_cpu_stop , & arg );
5924
5914
tlb_migrate_finish (p -> mm );
5925
5915
return 0 ;
5926
5916
}
5927
5917
out :
5928
- __task_rq_unlock (rq );
5929
- raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
5918
+ task_rq_unlock (rq , p , & flags );
5930
5919
5931
5920
return ret ;
5932
5921
}
@@ -5954,6 +5943,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5954
5943
rq_src = cpu_rq (src_cpu );
5955
5944
rq_dest = cpu_rq (dest_cpu );
5956
5945
5946
+ raw_spin_lock (& p -> pi_lock );
5957
5947
double_rq_lock (rq_src , rq_dest );
5958
5948
/* Already moved. */
5959
5949
if (task_cpu (p ) != src_cpu )
@@ -5976,6 +5966,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5976
5966
ret = 1 ;
5977
5967
fail :
5978
5968
double_rq_unlock (rq_src , rq_dest );
5969
+ raw_spin_unlock (& p -> pi_lock );
5979
5970
return ret ;
5980
5971
}
5981
5972
@@ -8702,7 +8693,7 @@ void sched_move_task(struct task_struct *tsk)
8702
8693
if (on_rq )
8703
8694
enqueue_task (rq , tsk , 0 );
8704
8695
8705
- task_rq_unlock (rq , & flags );
8696
+ task_rq_unlock (rq , tsk , & flags );
8706
8697
}
8707
8698
#endif /* CONFIG_CGROUP_SCHED */
8708
8699
0 commit comments