Skip to content

Commit eb58075

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/core: Introduce 'struct rq_flags'
In order to be able to pass around more than just the IRQ flags in the future, add a rq_flags structure. No difference in code generation for the x86_64-defconfig build I tested. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent 3e71a46 commit eb58075

File tree

3 files changed

+62
-56
lines changed

3 files changed

+62
-56
lines changed

kernel/sched/core.c

Lines changed: 50 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ static struct rq *this_rq_lock(void)
173173
/*
174174
* __task_rq_lock - lock the rq @p resides on.
175175
*/
176-
struct rq *__task_rq_lock(struct task_struct *p)
176+
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
177177
__acquires(rq->lock)
178178
{
179179
struct rq *rq;
@@ -197,14 +197,14 @@ struct rq *__task_rq_lock(struct task_struct *p)
197197
/*
198198
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
199199
*/
200-
struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
200+
struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
201201
__acquires(p->pi_lock)
202202
__acquires(rq->lock)
203203
{
204204
struct rq *rq;
205205

206206
for (;;) {
207-
raw_spin_lock_irqsave(&p->pi_lock, *flags);
207+
raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
208208
rq = task_rq(p);
209209
raw_spin_lock(&rq->lock);
210210
/*
@@ -228,7 +228,7 @@ struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
228228
return rq;
229229
}
230230
raw_spin_unlock(&rq->lock);
231-
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
231+
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
232232

233233
while (unlikely(task_on_rq_migrating(p)))
234234
cpu_relax();
@@ -1150,12 +1150,12 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
11501150
static int __set_cpus_allowed_ptr(struct task_struct *p,
11511151
const struct cpumask *new_mask, bool check)
11521152
{
1153-
unsigned long flags;
1154-
struct rq *rq;
11551153
unsigned int dest_cpu;
1154+
struct rq_flags rf;
1155+
struct rq *rq;
11561156
int ret = 0;
11571157

1158-
rq = task_rq_lock(p, &flags);
1158+
rq = task_rq_lock(p, &rf);
11591159

11601160
/*
11611161
* Must re-check here, to close a race against __kthread_bind(),
@@ -1184,7 +1184,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
11841184
if (task_running(rq, p) || p->state == TASK_WAKING) {
11851185
struct migration_arg arg = { p, dest_cpu };
11861186
/* Need help from migration thread: drop lock and wait. */
1187-
task_rq_unlock(rq, p, &flags);
1187+
task_rq_unlock(rq, p, &rf);
11881188
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
11891189
tlb_migrate_finish(p->mm);
11901190
return 0;
@@ -1198,7 +1198,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
11981198
lockdep_pin_lock(&rq->lock);
11991199
}
12001200
out:
1201-
task_rq_unlock(rq, p, &flags);
1201+
task_rq_unlock(rq, p, &rf);
12021202

12031203
return ret;
12041204
}
@@ -1382,8 +1382,8 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
13821382
*/
13831383
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
13841384
{
1385-
unsigned long flags;
13861385
int running, queued;
1386+
struct rq_flags rf;
13871387
unsigned long ncsw;
13881388
struct rq *rq;
13891389

@@ -1418,14 +1418,14 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
14181418
* lock now, to be *sure*. If we're wrong, we'll
14191419
* just go back and repeat.
14201420
*/
1421-
rq = task_rq_lock(p, &flags);
1421+
rq = task_rq_lock(p, &rf);
14221422
trace_sched_wait_task(p);
14231423
running = task_running(rq, p);
14241424
queued = task_on_rq_queued(p);
14251425
ncsw = 0;
14261426
if (!match_state || p->state == match_state)
14271427
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1428-
task_rq_unlock(rq, p, &flags);
1428+
task_rq_unlock(rq, p, &rf);
14291429

14301430
/*
14311431
* If it changed from the expected state, bail out now.
@@ -1723,17 +1723,18 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
17231723
*/
17241724
static int ttwu_remote(struct task_struct *p, int wake_flags)
17251725
{
1726+
struct rq_flags rf;
17261727
struct rq *rq;
17271728
int ret = 0;
17281729

1729-
rq = __task_rq_lock(p);
1730+
rq = __task_rq_lock(p, &rf);
17301731
if (task_on_rq_queued(p)) {
17311732
/* check_preempt_curr() may use rq clock */
17321733
update_rq_clock(rq);
17331734
ttwu_do_wakeup(rq, p, wake_flags);
17341735
ret = 1;
17351736
}
1736-
__task_rq_unlock(rq);
1737+
__task_rq_unlock(rq, &rf);
17371738

17381739
return ret;
17391740
}
@@ -2486,12 +2487,12 @@ extern void init_dl_bw(struct dl_bw *dl_b);
24862487
*/
24872488
void wake_up_new_task(struct task_struct *p)
24882489
{
2489-
unsigned long flags;
2490+
struct rq_flags rf;
24902491
struct rq *rq;
24912492

2492-
raw_spin_lock_irqsave(&p->pi_lock, flags);
24932493
/* Initialize new task's runnable average */
24942494
init_entity_runnable_average(&p->se);
2495+
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
24952496
#ifdef CONFIG_SMP
24962497
/*
24972498
* Fork balancing, do it here and not earlier because:
@@ -2503,7 +2504,7 @@ void wake_up_new_task(struct task_struct *p)
25032504
/* Post initialize new task's util average when its cfs_rq is set */
25042505
post_init_entity_util_avg(&p->se);
25052506

2506-
rq = __task_rq_lock(p);
2507+
rq = __task_rq_lock(p, &rf);
25072508
activate_task(rq, p, 0);
25082509
p->on_rq = TASK_ON_RQ_QUEUED;
25092510
trace_sched_wakeup_new(p);
@@ -2519,7 +2520,7 @@ void wake_up_new_task(struct task_struct *p)
25192520
lockdep_pin_lock(&rq->lock);
25202521
}
25212522
#endif
2522-
task_rq_unlock(rq, p, &flags);
2523+
task_rq_unlock(rq, p, &rf);
25232524
}
25242525

25252526
#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -2935,7 +2936,7 @@ EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
29352936
*/
29362937
unsigned long long task_sched_runtime(struct task_struct *p)
29372938
{
2938-
unsigned long flags;
2939+
struct rq_flags rf;
29392940
struct rq *rq;
29402941
u64 ns;
29412942

@@ -2955,7 +2956,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
29552956
return p->se.sum_exec_runtime;
29562957
#endif
29572958

2958-
rq = task_rq_lock(p, &flags);
2959+
rq = task_rq_lock(p, &rf);
29592960
/*
29602961
* Must be ->curr _and_ ->on_rq. If dequeued, we would
29612962
* project cycles that may never be accounted to this
@@ -2966,7 +2967,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
29662967
p->sched_class->update_curr(rq);
29672968
}
29682969
ns = p->se.sum_exec_runtime;
2969-
task_rq_unlock(rq, p, &flags);
2970+
task_rq_unlock(rq, p, &rf);
29702971

29712972
return ns;
29722973
}
@@ -3524,12 +3525,13 @@ EXPORT_SYMBOL(default_wake_function);
35243525
void rt_mutex_setprio(struct task_struct *p, int prio)
35253526
{
35263527
int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
3527-
struct rq *rq;
35283528
const struct sched_class *prev_class;
3529+
struct rq_flags rf;
3530+
struct rq *rq;
35293531

35303532
BUG_ON(prio > MAX_PRIO);
35313533

3532-
rq = __task_rq_lock(p);
3534+
rq = __task_rq_lock(p, &rf);
35333535

35343536
/*
35353537
* Idle task boosting is a nono in general. There is one
@@ -3605,7 +3607,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
36053607
check_class_changed(rq, p, prev_class, oldprio);
36063608
out_unlock:
36073609
preempt_disable(); /* avoid rq from going away on us */
3608-
__task_rq_unlock(rq);
3610+
__task_rq_unlock(rq, &rf);
36093611

36103612
balance_callback(rq);
36113613
preempt_enable();
@@ -3615,7 +3617,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
36153617
void set_user_nice(struct task_struct *p, long nice)
36163618
{
36173619
int old_prio, delta, queued;
3618-
unsigned long flags;
3620+
struct rq_flags rf;
36193621
struct rq *rq;
36203622

36213623
if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
@@ -3624,7 +3626,7 @@ void set_user_nice(struct task_struct *p, long nice)
36243626
* We have to be careful, if called from sys_setpriority(),
36253627
* the task might be in the middle of scheduling on another CPU.
36263628
*/
3627-
rq = task_rq_lock(p, &flags);
3629+
rq = task_rq_lock(p, &rf);
36283630
/*
36293631
* The RT priorities are set via sched_setscheduler(), but we still
36303632
* allow the 'normal' nice value to be set - but as expected
@@ -3655,7 +3657,7 @@ void set_user_nice(struct task_struct *p, long nice)
36553657
resched_curr(rq);
36563658
}
36573659
out_unlock:
3658-
task_rq_unlock(rq, p, &flags);
3660+
task_rq_unlock(rq, p, &rf);
36593661
}
36603662
EXPORT_SYMBOL(set_user_nice);
36613663

@@ -3952,11 +3954,11 @@ static int __sched_setscheduler(struct task_struct *p,
39523954
MAX_RT_PRIO - 1 - attr->sched_priority;
39533955
int retval, oldprio, oldpolicy = -1, queued, running;
39543956
int new_effective_prio, policy = attr->sched_policy;
3955-
unsigned long flags;
39563957
const struct sched_class *prev_class;
3957-
struct rq *rq;
3958+
struct rq_flags rf;
39583959
int reset_on_fork;
39593960
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
3961+
struct rq *rq;
39603962

39613963
/* may grab non-irq protected spin_locks */
39623964
BUG_ON(in_interrupt());
@@ -4051,13 +4053,13 @@ static int __sched_setscheduler(struct task_struct *p,
40514053
* To be able to change p->policy safely, the appropriate
40524054
* runqueue lock must be held.
40534055
*/
4054-
rq = task_rq_lock(p, &flags);
4056+
rq = task_rq_lock(p, &rf);
40554057

40564058
/*
40574059
* Changing the policy of the stop threads its a very bad idea
40584060
*/
40594061
if (p == rq->stop) {
4060-
task_rq_unlock(rq, p, &flags);
4062+
task_rq_unlock(rq, p, &rf);
40614063
return -EINVAL;
40624064
}
40634065

@@ -4074,7 +4076,7 @@ static int __sched_setscheduler(struct task_struct *p,
40744076
goto change;
40754077

40764078
p->sched_reset_on_fork = reset_on_fork;
4077-
task_rq_unlock(rq, p, &flags);
4079+
task_rq_unlock(rq, p, &rf);
40784080
return 0;
40794081
}
40804082
change:
@@ -4088,7 +4090,7 @@ static int __sched_setscheduler(struct task_struct *p,
40884090
if (rt_bandwidth_enabled() && rt_policy(policy) &&
40894091
task_group(p)->rt_bandwidth.rt_runtime == 0 &&
40904092
!task_group_is_autogroup(task_group(p))) {
4091-
task_rq_unlock(rq, p, &flags);
4093+
task_rq_unlock(rq, p, &rf);
40924094
return -EPERM;
40934095
}
40944096
#endif
@@ -4103,7 +4105,7 @@ static int __sched_setscheduler(struct task_struct *p,
41034105
*/
41044106
if (!cpumask_subset(span, &p->cpus_allowed) ||
41054107
rq->rd->dl_bw.bw == 0) {
4106-
task_rq_unlock(rq, p, &flags);
4108+
task_rq_unlock(rq, p, &rf);
41074109
return -EPERM;
41084110
}
41094111
}
@@ -4113,7 +4115,7 @@ static int __sched_setscheduler(struct task_struct *p,
41134115
/* recheck policy now with rq lock held */
41144116
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
41154117
policy = oldpolicy = -1;
4116-
task_rq_unlock(rq, p, &flags);
4118+
task_rq_unlock(rq, p, &rf);
41174119
goto recheck;
41184120
}
41194121

@@ -4123,7 +4125,7 @@ static int __sched_setscheduler(struct task_struct *p,
41234125
* is available.
41244126
*/
41254127
if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
4126-
task_rq_unlock(rq, p, &flags);
4128+
task_rq_unlock(rq, p, &rf);
41274129
return -EBUSY;
41284130
}
41294131

@@ -4168,7 +4170,7 @@ static int __sched_setscheduler(struct task_struct *p,
41684170

41694171
check_class_changed(rq, p, prev_class, oldprio);
41704172
preempt_disable(); /* avoid rq from going away on us */
4171-
task_rq_unlock(rq, p, &flags);
4173+
task_rq_unlock(rq, p, &rf);
41724174

41734175
if (pi)
41744176
rt_mutex_adjust_pi(p);
@@ -5021,10 +5023,10 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
50215023
{
50225024
struct task_struct *p;
50235025
unsigned int time_slice;
5024-
unsigned long flags;
5026+
struct rq_flags rf;
5027+
struct timespec t;
50255028
struct rq *rq;
50265029
int retval;
5027-
struct timespec t;
50285030

50295031
if (pid < 0)
50305032
return -EINVAL;
@@ -5039,11 +5041,11 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
50395041
if (retval)
50405042
goto out_unlock;
50415043

5042-
rq = task_rq_lock(p, &flags);
5044+
rq = task_rq_lock(p, &rf);
50435045
time_slice = 0;
50445046
if (p->sched_class->get_rr_interval)
50455047
time_slice = p->sched_class->get_rr_interval(rq, p);
5046-
task_rq_unlock(rq, p, &flags);
5048+
task_rq_unlock(rq, p, &rf);
50475049

50485050
rcu_read_unlock();
50495051
jiffies_to_timespec(time_slice, &t);
@@ -5307,11 +5309,11 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
53075309
*/
53085310
void sched_setnuma(struct task_struct *p, int nid)
53095311
{
5310-
struct rq *rq;
5311-
unsigned long flags;
53125312
bool queued, running;
5313+
struct rq_flags rf;
5314+
struct rq *rq;
53135315

5314-
rq = task_rq_lock(p, &flags);
5316+
rq = task_rq_lock(p, &rf);
53155317
queued = task_on_rq_queued(p);
53165318
running = task_current(rq, p);
53175319

@@ -5326,7 +5328,7 @@ void sched_setnuma(struct task_struct *p, int nid)
53265328
p->sched_class->set_curr_task(rq);
53275329
if (queued)
53285330
enqueue_task(rq, p, ENQUEUE_RESTORE);
5329-
task_rq_unlock(rq, p, &flags);
5331+
task_rq_unlock(rq, p, &rf);
53305332
}
53315333
#endif /* CONFIG_NUMA_BALANCING */
53325334

@@ -7757,10 +7759,10 @@ void sched_move_task(struct task_struct *tsk)
77577759
{
77587760
struct task_group *tg;
77597761
int queued, running;
7760-
unsigned long flags;
7762+
struct rq_flags rf;
77617763
struct rq *rq;
77627764

7763-
rq = task_rq_lock(tsk, &flags);
7765+
rq = task_rq_lock(tsk, &rf);
77647766

77657767
running = task_current(rq, tsk);
77667768
queued = task_on_rq_queued(tsk);
@@ -7792,7 +7794,7 @@ void sched_move_task(struct task_struct *tsk)
77927794
if (queued)
77937795
enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
77947796

7795-
task_rq_unlock(rq, tsk, &flags);
7797+
task_rq_unlock(rq, tsk, &rf);
77967798
}
77977799
#endif /* CONFIG_CGROUP_SCHED */
77987800

0 commit comments

Comments
 (0)