@@ -173,7 +173,7 @@ static struct rq *this_rq_lock(void)
173
173
/*
174
174
* __task_rq_lock - lock the rq @p resides on.
175
175
*/
176
- struct rq * __task_rq_lock (struct task_struct * p )
176
+ struct rq * __task_rq_lock (struct task_struct * p , struct rq_flags * rf )
177
177
__acquires (rq - > lock )
178
178
{
179
179
struct rq * rq ;
@@ -197,14 +197,14 @@ struct rq *__task_rq_lock(struct task_struct *p)
197
197
/*
198
198
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
199
199
*/
200
- struct rq * task_rq_lock (struct task_struct * p , unsigned long * flags )
200
+ struct rq * task_rq_lock (struct task_struct * p , struct rq_flags * rf )
201
201
__acquires (p - > pi_lock )
202
202
__acquires (rq - > lock )
203
203
{
204
204
struct rq * rq ;
205
205
206
206
for (;;) {
207
- raw_spin_lock_irqsave (& p -> pi_lock , * flags );
207
+ raw_spin_lock_irqsave (& p -> pi_lock , rf -> flags );
208
208
rq = task_rq (p );
209
209
raw_spin_lock (& rq -> lock );
210
210
/*
@@ -228,7 +228,7 @@ struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
228
228
return rq ;
229
229
}
230
230
raw_spin_unlock (& rq -> lock );
231
- raw_spin_unlock_irqrestore (& p -> pi_lock , * flags );
231
+ raw_spin_unlock_irqrestore (& p -> pi_lock , rf -> flags );
232
232
233
233
while (unlikely (task_on_rq_migrating (p )))
234
234
cpu_relax ();
@@ -1150,12 +1150,12 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1150
1150
static int __set_cpus_allowed_ptr (struct task_struct * p ,
1151
1151
const struct cpumask * new_mask , bool check )
1152
1152
{
1153
- unsigned long flags ;
1154
- struct rq * rq ;
1155
1153
unsigned int dest_cpu ;
1154
+ struct rq_flags rf ;
1155
+ struct rq * rq ;
1156
1156
int ret = 0 ;
1157
1157
1158
- rq = task_rq_lock (p , & flags );
1158
+ rq = task_rq_lock (p , & rf );
1159
1159
1160
1160
/*
1161
1161
* Must re-check here, to close a race against __kthread_bind(),
@@ -1184,7 +1184,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
1184
1184
if (task_running (rq , p ) || p -> state == TASK_WAKING ) {
1185
1185
struct migration_arg arg = { p , dest_cpu };
1186
1186
/* Need help from migration thread: drop lock and wait. */
1187
- task_rq_unlock (rq , p , & flags );
1187
+ task_rq_unlock (rq , p , & rf );
1188
1188
stop_one_cpu (cpu_of (rq ), migration_cpu_stop , & arg );
1189
1189
tlb_migrate_finish (p -> mm );
1190
1190
return 0 ;
@@ -1198,7 +1198,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
1198
1198
lockdep_pin_lock (& rq -> lock );
1199
1199
}
1200
1200
out :
1201
- task_rq_unlock (rq , p , & flags );
1201
+ task_rq_unlock (rq , p , & rf );
1202
1202
1203
1203
return ret ;
1204
1204
}
@@ -1382,8 +1382,8 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
1382
1382
*/
1383
1383
unsigned long wait_task_inactive (struct task_struct * p , long match_state )
1384
1384
{
1385
- unsigned long flags ;
1386
1385
int running , queued ;
1386
+ struct rq_flags rf ;
1387
1387
unsigned long ncsw ;
1388
1388
struct rq * rq ;
1389
1389
@@ -1418,14 +1418,14 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1418
1418
* lock now, to be *sure*. If we're wrong, we'll
1419
1419
* just go back and repeat.
1420
1420
*/
1421
- rq = task_rq_lock (p , & flags );
1421
+ rq = task_rq_lock (p , & rf );
1422
1422
trace_sched_wait_task (p );
1423
1423
running = task_running (rq , p );
1424
1424
queued = task_on_rq_queued (p );
1425
1425
ncsw = 0 ;
1426
1426
if (!match_state || p -> state == match_state )
1427
1427
ncsw = p -> nvcsw | LONG_MIN ; /* sets MSB */
1428
- task_rq_unlock (rq , p , & flags );
1428
+ task_rq_unlock (rq , p , & rf );
1429
1429
1430
1430
/*
1431
1431
* If it changed from the expected state, bail out now.
@@ -1723,17 +1723,18 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1723
1723
*/
1724
1724
static int ttwu_remote (struct task_struct * p , int wake_flags )
1725
1725
{
1726
+ struct rq_flags rf ;
1726
1727
struct rq * rq ;
1727
1728
int ret = 0 ;
1728
1729
1729
- rq = __task_rq_lock (p );
1730
+ rq = __task_rq_lock (p , & rf );
1730
1731
if (task_on_rq_queued (p )) {
1731
1732
/* check_preempt_curr() may use rq clock */
1732
1733
update_rq_clock (rq );
1733
1734
ttwu_do_wakeup (rq , p , wake_flags );
1734
1735
ret = 1 ;
1735
1736
}
1736
- __task_rq_unlock (rq );
1737
+ __task_rq_unlock (rq , & rf );
1737
1738
1738
1739
return ret ;
1739
1740
}
@@ -2486,12 +2487,12 @@ extern void init_dl_bw(struct dl_bw *dl_b);
2486
2487
*/
2487
2488
void wake_up_new_task (struct task_struct * p )
2488
2489
{
2489
- unsigned long flags ;
2490
+ struct rq_flags rf ;
2490
2491
struct rq * rq ;
2491
2492
2492
- raw_spin_lock_irqsave (& p -> pi_lock , flags );
2493
2493
/* Initialize new task's runnable average */
2494
2494
init_entity_runnable_average (& p -> se );
2495
+ raw_spin_lock_irqsave (& p -> pi_lock , rf .flags );
2495
2496
#ifdef CONFIG_SMP
2496
2497
/*
2497
2498
* Fork balancing, do it here and not earlier because:
@@ -2503,7 +2504,7 @@ void wake_up_new_task(struct task_struct *p)
2503
2504
/* Post initialize new task's util average when its cfs_rq is set */
2504
2505
post_init_entity_util_avg (& p -> se );
2505
2506
2506
- rq = __task_rq_lock (p );
2507
+ rq = __task_rq_lock (p , & rf );
2507
2508
activate_task (rq , p , 0 );
2508
2509
p -> on_rq = TASK_ON_RQ_QUEUED ;
2509
2510
trace_sched_wakeup_new (p );
@@ -2519,7 +2520,7 @@ void wake_up_new_task(struct task_struct *p)
2519
2520
lockdep_pin_lock (& rq -> lock );
2520
2521
}
2521
2522
#endif
2522
- task_rq_unlock (rq , p , & flags );
2523
+ task_rq_unlock (rq , p , & rf );
2523
2524
}
2524
2525
2525
2526
#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -2935,7 +2936,7 @@ EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2935
2936
*/
2936
2937
unsigned long long task_sched_runtime (struct task_struct * p )
2937
2938
{
2938
- unsigned long flags ;
2939
+ struct rq_flags rf ;
2939
2940
struct rq * rq ;
2940
2941
u64 ns ;
2941
2942
@@ -2955,7 +2956,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2955
2956
return p -> se .sum_exec_runtime ;
2956
2957
#endif
2957
2958
2958
- rq = task_rq_lock (p , & flags );
2959
+ rq = task_rq_lock (p , & rf );
2959
2960
/*
2960
2961
* Must be ->curr _and_ ->on_rq. If dequeued, we would
2961
2962
* project cycles that may never be accounted to this
@@ -2966,7 +2967,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2966
2967
p -> sched_class -> update_curr (rq );
2967
2968
}
2968
2969
ns = p -> se .sum_exec_runtime ;
2969
- task_rq_unlock (rq , p , & flags );
2970
+ task_rq_unlock (rq , p , & rf );
2970
2971
2971
2972
return ns ;
2972
2973
}
@@ -3524,12 +3525,13 @@ EXPORT_SYMBOL(default_wake_function);
3524
3525
void rt_mutex_setprio (struct task_struct * p , int prio )
3525
3526
{
3526
3527
int oldprio , queued , running , queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE ;
3527
- struct rq * rq ;
3528
3528
const struct sched_class * prev_class ;
3529
+ struct rq_flags rf ;
3530
+ struct rq * rq ;
3529
3531
3530
3532
BUG_ON (prio > MAX_PRIO );
3531
3533
3532
- rq = __task_rq_lock (p );
3534
+ rq = __task_rq_lock (p , & rf );
3533
3535
3534
3536
/*
3535
3537
* Idle task boosting is a nono in general. There is one
@@ -3605,7 +3607,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3605
3607
check_class_changed (rq , p , prev_class , oldprio );
3606
3608
out_unlock :
3607
3609
preempt_disable (); /* avoid rq from going away on us */
3608
- __task_rq_unlock (rq );
3610
+ __task_rq_unlock (rq , & rf );
3609
3611
3610
3612
balance_callback (rq );
3611
3613
preempt_enable ();
@@ -3615,7 +3617,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3615
3617
void set_user_nice (struct task_struct * p , long nice )
3616
3618
{
3617
3619
int old_prio , delta , queued ;
3618
- unsigned long flags ;
3620
+ struct rq_flags rf ;
3619
3621
struct rq * rq ;
3620
3622
3621
3623
if (task_nice (p ) == nice || nice < MIN_NICE || nice > MAX_NICE )
@@ -3624,7 +3626,7 @@ void set_user_nice(struct task_struct *p, long nice)
3624
3626
* We have to be careful, if called from sys_setpriority(),
3625
3627
* the task might be in the middle of scheduling on another CPU.
3626
3628
*/
3627
- rq = task_rq_lock (p , & flags );
3629
+ rq = task_rq_lock (p , & rf );
3628
3630
/*
3629
3631
* The RT priorities are set via sched_setscheduler(), but we still
3630
3632
* allow the 'normal' nice value to be set - but as expected
@@ -3655,7 +3657,7 @@ void set_user_nice(struct task_struct *p, long nice)
3655
3657
resched_curr (rq );
3656
3658
}
3657
3659
out_unlock :
3658
- task_rq_unlock (rq , p , & flags );
3660
+ task_rq_unlock (rq , p , & rf );
3659
3661
}
3660
3662
EXPORT_SYMBOL (set_user_nice );
3661
3663
@@ -3952,11 +3954,11 @@ static int __sched_setscheduler(struct task_struct *p,
3952
3954
MAX_RT_PRIO - 1 - attr -> sched_priority ;
3953
3955
int retval , oldprio , oldpolicy = -1 , queued , running ;
3954
3956
int new_effective_prio , policy = attr -> sched_policy ;
3955
- unsigned long flags ;
3956
3957
const struct sched_class * prev_class ;
3957
- struct rq * rq ;
3958
+ struct rq_flags rf ;
3958
3959
int reset_on_fork ;
3959
3960
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE ;
3961
+ struct rq * rq ;
3960
3962
3961
3963
/* may grab non-irq protected spin_locks */
3962
3964
BUG_ON (in_interrupt ());
@@ -4051,13 +4053,13 @@ static int __sched_setscheduler(struct task_struct *p,
4051
4053
* To be able to change p->policy safely, the appropriate
4052
4054
* runqueue lock must be held.
4053
4055
*/
4054
- rq = task_rq_lock (p , & flags );
4056
+ rq = task_rq_lock (p , & rf );
4055
4057
4056
4058
/*
4057
4059
* Changing the policy of the stop threads its a very bad idea
4058
4060
*/
4059
4061
if (p == rq -> stop ) {
4060
- task_rq_unlock (rq , p , & flags );
4062
+ task_rq_unlock (rq , p , & rf );
4061
4063
return - EINVAL ;
4062
4064
}
4063
4065
@@ -4074,7 +4076,7 @@ static int __sched_setscheduler(struct task_struct *p,
4074
4076
goto change ;
4075
4077
4076
4078
p -> sched_reset_on_fork = reset_on_fork ;
4077
- task_rq_unlock (rq , p , & flags );
4079
+ task_rq_unlock (rq , p , & rf );
4078
4080
return 0 ;
4079
4081
}
4080
4082
change :
@@ -4088,7 +4090,7 @@ static int __sched_setscheduler(struct task_struct *p,
4088
4090
if (rt_bandwidth_enabled () && rt_policy (policy ) &&
4089
4091
task_group (p )-> rt_bandwidth .rt_runtime == 0 &&
4090
4092
!task_group_is_autogroup (task_group (p ))) {
4091
- task_rq_unlock (rq , p , & flags );
4093
+ task_rq_unlock (rq , p , & rf );
4092
4094
return - EPERM ;
4093
4095
}
4094
4096
#endif
@@ -4103,7 +4105,7 @@ static int __sched_setscheduler(struct task_struct *p,
4103
4105
*/
4104
4106
if (!cpumask_subset (span , & p -> cpus_allowed ) ||
4105
4107
rq -> rd -> dl_bw .bw == 0 ) {
4106
- task_rq_unlock (rq , p , & flags );
4108
+ task_rq_unlock (rq , p , & rf );
4107
4109
return - EPERM ;
4108
4110
}
4109
4111
}
@@ -4113,7 +4115,7 @@ static int __sched_setscheduler(struct task_struct *p,
4113
4115
/* recheck policy now with rq lock held */
4114
4116
if (unlikely (oldpolicy != -1 && oldpolicy != p -> policy )) {
4115
4117
policy = oldpolicy = -1 ;
4116
- task_rq_unlock (rq , p , & flags );
4118
+ task_rq_unlock (rq , p , & rf );
4117
4119
goto recheck ;
4118
4120
}
4119
4121
@@ -4123,7 +4125,7 @@ static int __sched_setscheduler(struct task_struct *p,
4123
4125
* is available.
4124
4126
*/
4125
4127
if ((dl_policy (policy ) || dl_task (p )) && dl_overflow (p , policy , attr )) {
4126
- task_rq_unlock (rq , p , & flags );
4128
+ task_rq_unlock (rq , p , & rf );
4127
4129
return - EBUSY ;
4128
4130
}
4129
4131
@@ -4168,7 +4170,7 @@ static int __sched_setscheduler(struct task_struct *p,
4168
4170
4169
4171
check_class_changed (rq , p , prev_class , oldprio );
4170
4172
preempt_disable (); /* avoid rq from going away on us */
4171
- task_rq_unlock (rq , p , & flags );
4173
+ task_rq_unlock (rq , p , & rf );
4172
4174
4173
4175
if (pi )
4174
4176
rt_mutex_adjust_pi (p );
@@ -5021,10 +5023,10 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5021
5023
{
5022
5024
struct task_struct * p ;
5023
5025
unsigned int time_slice ;
5024
- unsigned long flags ;
5026
+ struct rq_flags rf ;
5027
+ struct timespec t ;
5025
5028
struct rq * rq ;
5026
5029
int retval ;
5027
- struct timespec t ;
5028
5030
5029
5031
if (pid < 0 )
5030
5032
return - EINVAL ;
@@ -5039,11 +5041,11 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5039
5041
if (retval )
5040
5042
goto out_unlock ;
5041
5043
5042
- rq = task_rq_lock (p , & flags );
5044
+ rq = task_rq_lock (p , & rf );
5043
5045
time_slice = 0 ;
5044
5046
if (p -> sched_class -> get_rr_interval )
5045
5047
time_slice = p -> sched_class -> get_rr_interval (rq , p );
5046
- task_rq_unlock (rq , p , & flags );
5048
+ task_rq_unlock (rq , p , & rf );
5047
5049
5048
5050
rcu_read_unlock ();
5049
5051
jiffies_to_timespec (time_slice , & t );
@@ -5307,11 +5309,11 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
5307
5309
*/
5308
5310
void sched_setnuma (struct task_struct * p , int nid )
5309
5311
{
5310
- struct rq * rq ;
5311
- unsigned long flags ;
5312
5312
bool queued , running ;
5313
+ struct rq_flags rf ;
5314
+ struct rq * rq ;
5313
5315
5314
- rq = task_rq_lock (p , & flags );
5316
+ rq = task_rq_lock (p , & rf );
5315
5317
queued = task_on_rq_queued (p );
5316
5318
running = task_current (rq , p );
5317
5319
@@ -5326,7 +5328,7 @@ void sched_setnuma(struct task_struct *p, int nid)
5326
5328
p -> sched_class -> set_curr_task (rq );
5327
5329
if (queued )
5328
5330
enqueue_task (rq , p , ENQUEUE_RESTORE );
5329
- task_rq_unlock (rq , p , & flags );
5331
+ task_rq_unlock (rq , p , & rf );
5330
5332
}
5331
5333
#endif /* CONFIG_NUMA_BALANCING */
5332
5334
@@ -7757,10 +7759,10 @@ void sched_move_task(struct task_struct *tsk)
7757
7759
{
7758
7760
struct task_group * tg ;
7759
7761
int queued , running ;
7760
- unsigned long flags ;
7762
+ struct rq_flags rf ;
7761
7763
struct rq * rq ;
7762
7764
7763
- rq = task_rq_lock (tsk , & flags );
7765
+ rq = task_rq_lock (tsk , & rf );
7764
7766
7765
7767
running = task_current (rq , tsk );
7766
7768
queued = task_on_rq_queued (tsk );
@@ -7792,7 +7794,7 @@ void sched_move_task(struct task_struct *tsk)
7792
7794
if (queued )
7793
7795
enqueue_task (rq , tsk , ENQUEUE_RESTORE | ENQUEUE_MOVE );
7794
7796
7795
- task_rq_unlock (rq , tsk , & flags );
7797
+ task_rq_unlock (rq , tsk , & rf );
7796
7798
}
7797
7799
#endif /* CONFIG_CGROUP_SCHED */
7798
7800
0 commit comments