Skip to content

Commit 79462e8

Browse files
joshdonPeter Zijlstra
authored andcommitted
sched: don't account throttle time for empty groups
It is easy for a cfs_rq to become throttled even when it has no enqueued entities (for example, if we have just put_prev()'d the last runnable task of the cfs_rq, and the cfs_rq is out of quota). Avoid accounting this time towards total throttle time, since it otherwise falsely inflates the stats. Note that the dequeue path is special, since we normally disallow migrations when a task is in a throttled hierarchy (see throttled_lb_pair()). Signed-off-by: Josh Don <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 893cdaa commit 79462e8

File tree

1 file changed

+15
-3
lines changed

1 file changed

+15
-3
lines changed

kernel/sched/fair.c

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4787,6 +4787,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
47874787
}
47884788

47894789
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
4790+
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
47904791

47914792
static inline bool cfs_bandwidth_used(void);
47924793

@@ -4873,8 +4874,14 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
48734874

48744875
if (cfs_rq->nr_running == 1) {
48754876
check_enqueue_throttle(cfs_rq);
4876-
if (!throttled_hierarchy(cfs_rq))
4877+
if (!throttled_hierarchy(cfs_rq)) {
48774878
list_add_leaf_cfs_rq(cfs_rq);
4879+
} else {
4880+
#ifdef CONFIG_CFS_BANDWIDTH
4881+
if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
4882+
cfs_rq->throttled_clock = rq_clock(rq_of(cfs_rq));
4883+
#endif
4884+
}
48784885
}
48794886
}
48804887

@@ -5480,7 +5487,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
54805487
* throttled-list. rq->lock protects completion.
54815488
*/
54825489
cfs_rq->throttled = 1;
5483-
cfs_rq->throttled_clock = rq_clock(rq);
5490+
SCHED_WARN_ON(cfs_rq->throttled_clock);
5491+
if (cfs_rq->nr_running)
5492+
cfs_rq->throttled_clock = rq_clock(rq);
54845493
return true;
54855494
}
54865495

@@ -5498,7 +5507,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
54985507
update_rq_clock(rq);
54995508

55005509
raw_spin_lock(&cfs_b->lock);
5501-
cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5510+
if (cfs_rq->throttled_clock) {
5511+
cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5512+
cfs_rq->throttled_clock = 0;
5513+
}
55025514
list_del_rcu(&cfs_rq->throttled_list);
55035515
raw_spin_unlock(&cfs_b->lock);
55045516

0 commit comments

Comments
 (0)