Skip to content

Commit 31bc6ae

Browse files
vingu-linaroIngo Molnar
authored andcommitted
sched/fair: Optimize update_blocked_averages()
Removing a cfs_rq from rq->leaf_cfs_rq_list can break the parent/child ordering of the list when it will be added back. In order to remove an empty and fully decayed cfs_rq, we must remove its children too, so they will be added back in the right order next time. With a normal decay of PELT, a parent will be empty and fully decayed if all children are empty and fully decayed too. In such a case, we just have to ensure that the whole branch will be added when a new task is enqueued. This is default behavior since : commit f678331 ("sched/fair: Fix insertion in rq->leaf_cfs_rq_list") In case of throttling, the PELT of throttled cfs_rq will not be updated whereas the parent will. This breaks the assumption made above unless we remove the children of a cfs_rq that is throttled. Then, they will be added back when unthrottled and a sched_entity will be enqueued. As throttled cfs_rq are now removed from the list, we can remove the associated test in update_blocked_averages(). Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent c9ba756 commit 31bc6ae

File tree

1 file changed

+21
-5
lines changed

1 file changed

+21
-5
lines changed

kernel/sched/fair.c

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -346,6 +346,18 @@ static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
346346
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
347347
{
348348
if (cfs_rq->on_list) {
349+
struct rq *rq = rq_of(cfs_rq);
350+
351+
/*
352+
* With cfs_rq being unthrottled/throttled during an enqueue,
353+
* it can happen the tmp_alone_branch points the a leaf that
354+
* we finally want to del. In this case, tmp_alone_branch moves
355+
* to the prev element but it will point to rq->leaf_cfs_rq_list
356+
* at the end of the enqueue.
357+
*/
358+
if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
359+
rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
360+
349361
list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
350362
cfs_rq->on_list = 0;
351363
}
@@ -4438,6 +4450,10 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
44384450
/* adjust cfs_rq_clock_task() */
44394451
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
44404452
cfs_rq->throttled_clock_task;
4453+
4454+
/* Add cfs_rq with already running entity in the list */
4455+
if (cfs_rq->nr_running >= 1)
4456+
list_add_leaf_cfs_rq(cfs_rq);
44414457
}
44424458

44434459
return 0;
@@ -4449,8 +4465,10 @@ static int tg_throttle_down(struct task_group *tg, void *data)
44494465
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
44504466

44514467
/* group is entering throttled state, stop time */
4452-
if (!cfs_rq->throttle_count)
4468+
if (!cfs_rq->throttle_count) {
44534469
cfs_rq->throttled_clock_task = rq_clock_task(rq);
4470+
list_del_leaf_cfs_rq(cfs_rq);
4471+
}
44544472
cfs_rq->throttle_count++;
44554473

44564474
return 0;
@@ -4553,6 +4571,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
45534571
break;
45544572
}
45554573

4574+
assert_list_leaf_cfs_rq(rq);
4575+
45564576
if (!se)
45574577
add_nr_running(rq, task_delta);
45584578

@@ -7700,10 +7720,6 @@ static void update_blocked_averages(int cpu)
77007720
for_each_leaf_cfs_rq(rq, cfs_rq) {
77017721
struct sched_entity *se;
77027722

7703-
/* throttled entities do not contribute to load */
7704-
if (throttled_hierarchy(cfs_rq))
7705-
continue;
7706-
77077723
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq))
77087724
update_tg_load_avg(cfs_rq, 0);
77097725

0 commit comments

Comments
 (0)