Skip to content

Commit 95246d1

Browse files
vingu-linaroPeter Zijlstra
authored andcommitted
sched/pelt: Relax the sync of runnable_sum with runnable_avg
Similarly to util_avg and util_sum, don't sync runnable_sum with the low bound of runnable_avg but only ensure that runnable_sum stays in the correct range. Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Dietmar Eggemann <[email protected]> Tested-by: Sachin Sant <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 7ceb771 commit 95246d1

File tree

1 file changed

+19
-14
lines changed

1 file changed

+19
-14
lines changed

kernel/sched/fair.c

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3483,11 +3483,11 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
34833483
static inline void
34843484
update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
34853485
{
3486-
long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
3487-
u32 divider;
3486+
long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
3487+
u32 new_sum, divider;
34883488

34893489
/* Nothing to update */
3490-
if (!delta)
3490+
if (!delta_avg)
34913491
return;
34923492

34933493
/*
@@ -3498,11 +3498,16 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
34983498

34993499
/* Set new sched_entity's runnable */
35003500
se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
3501-
se->avg.runnable_sum = se->avg.runnable_avg * divider;
3501+
new_sum = se->avg.runnable_avg * divider;
3502+
delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
3503+
se->avg.runnable_sum = new_sum;
35023504

35033505
/* Update parent cfs_rq runnable */
3504-
add_positive(&cfs_rq->avg.runnable_avg, delta);
3505-
cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
3506+
add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
3507+
add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
3508+
/* See update_cfs_rq_load_avg() */
3509+
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
3510+
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
35063511
}
35073512

35083513
static inline void
@@ -3702,7 +3707,10 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
37023707

37033708
r = removed_runnable;
37043709
sub_positive(&sa->runnable_avg, r);
3705-
sa->runnable_sum = sa->runnable_avg * divider;
3710+
sub_positive(&sa->runnable_sum, r * divider);
3711+
/* See sa->util_sum above */
3712+
sa->runnable_sum = max_t(u32, sa->runnable_sum,
3713+
sa->runnable_avg * PELT_MIN_DIVIDER);
37063714

37073715
/*
37083716
* removed_runnable is the unweighted version of removed_load so we
@@ -3789,12 +3797,6 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
37893797
*/
37903798
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
37913799
{
3792-
/*
3793-
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3794-
* See ___update_load_avg() for details.
3795-
*/
3796-
u32 divider = get_pelt_divider(&cfs_rq->avg);
3797-
37983800
dequeue_load_avg(cfs_rq, se);
37993801
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
38003802
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
@@ -3803,7 +3805,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
38033805
cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
38043806

38053807
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
3806-
cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
3808+
sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
3809+
/* See update_cfs_rq_load_avg() */
3810+
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
3811+
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
38073812

38083813
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
38093814

0 commit comments

Comments
 (0)