Skip to content

Commit 39f23ce

Browse files
vingu-linaroPeter Zijlstra
authored andcommitted
sched/fair: Fix unthrottle_cfs_rq() for leaf_cfs_rq list
Although not exactly identical, unthrottle_cfs_rq() and enqueue_task_fair() are quite close and follow the same sequence for enqueuing an entity in the cfs hierarchy. Modify unthrottle_cfs_rq() to use the same pattern as enqueue_task_fair(). This fixes a problem already faced with the latter and add an optimization in the last for_each_sched_entity loop. Fixes: fe61468 (sched/fair: Fix enqueue_task_fair warning) Reported-by Tao Zhou <[email protected]> Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Phil Auld <[email protected]> Reviewed-by: Ben Segall <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent ad32bb4 commit 39f23ce

File tree

1 file changed

+30
-12
lines changed

1 file changed

+30
-12
lines changed

kernel/sched/fair.c

Lines changed: 30 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -4774,7 +4774,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
47744774
struct rq *rq = rq_of(cfs_rq);
47754775
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
47764776
struct sched_entity *se;
4777-
int enqueue = 1;
47784777
long task_delta, idle_task_delta;
47794778

47804779
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -4798,26 +4797,44 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
47984797
idle_task_delta = cfs_rq->idle_h_nr_running;
47994798
for_each_sched_entity(se) {
48004799
if (se->on_rq)
4801-
enqueue = 0;
4800+
break;
4801+
cfs_rq = cfs_rq_of(se);
4802+
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
48024803

4804+
cfs_rq->h_nr_running += task_delta;
4805+
cfs_rq->idle_h_nr_running += idle_task_delta;
4806+
4807+
/* end evaluation on encountering a throttled cfs_rq */
4808+
if (cfs_rq_throttled(cfs_rq))
4809+
goto unthrottle_throttle;
4810+
}
4811+
4812+
for_each_sched_entity(se) {
48034813
cfs_rq = cfs_rq_of(se);
4804-
if (enqueue) {
4805-
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4806-
} else {
4807-
update_load_avg(cfs_rq, se, 0);
4808-
se_update_runnable(se);
4809-
}
4814+
4815+
update_load_avg(cfs_rq, se, UPDATE_TG);
4816+
se_update_runnable(se);
48104817

48114818
cfs_rq->h_nr_running += task_delta;
48124819
cfs_rq->idle_h_nr_running += idle_task_delta;
48134820

4821+
4822+
/* end evaluation on encountering a throttled cfs_rq */
48144823
if (cfs_rq_throttled(cfs_rq))
4815-
break;
4824+
goto unthrottle_throttle;
4825+
4826+
/*
4827+
* One parent has been throttled and cfs_rq removed from the
4828+
* list. Add it back to not break the leaf list.
4829+
*/
4830+
if (throttled_hierarchy(cfs_rq))
4831+
list_add_leaf_cfs_rq(cfs_rq);
48164832
}
48174833

4818-
if (!se)
4819-
add_nr_running(rq, task_delta);
4834+
/* At this point se is NULL and we are at root level*/
4835+
add_nr_running(rq, task_delta);
48204836

4837+
unthrottle_throttle:
48214838
/*
48224839
* The cfs_rq_throttled() breaks in the above iteration can result in
48234840
* incomplete leaf list maintenance, resulting in triggering the
@@ -4826,7 +4843,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
48264843
for_each_sched_entity(se) {
48274844
cfs_rq = cfs_rq_of(se);
48284845

4829-
list_add_leaf_cfs_rq(cfs_rq);
4846+
if (list_add_leaf_cfs_rq(cfs_rq))
4847+
break;
48304848
}
48314849

48324850
assert_list_leaf_cfs_rq(rq);

0 commit comments

Comments
 (0)