@@ -3483,11 +3483,11 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
3483
3483
static inline void
3484
3484
update_tg_cfs_runnable (struct cfs_rq * cfs_rq , struct sched_entity * se , struct cfs_rq * gcfs_rq )
3485
3485
{
3486
- long delta = gcfs_rq -> avg .runnable_avg - se -> avg .runnable_avg ;
3487
- u32 divider ;
3486
+ long delta_sum , delta_avg = gcfs_rq -> avg .runnable_avg - se -> avg .runnable_avg ;
3487
+ u32 new_sum , divider ;
3488
3488
3489
3489
/* Nothing to update */
3490
- if (!delta )
3490
+ if (!delta_avg )
3491
3491
return ;
3492
3492
3493
3493
/*
@@ -3498,11 +3498,16 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
3498
3498
3499
3499
/* Set new sched_entity's runnable */
3500
3500
se -> avg .runnable_avg = gcfs_rq -> avg .runnable_avg ;
3501
- se -> avg .runnable_sum = se -> avg .runnable_avg * divider ;
3501
+ new_sum = se -> avg .runnable_avg * divider ;
3502
+ delta_sum = (long )new_sum - (long )se -> avg .runnable_sum ;
3503
+ se -> avg .runnable_sum = new_sum ;
3502
3504
3503
3505
/* Update parent cfs_rq runnable */
3504
- add_positive (& cfs_rq -> avg .runnable_avg , delta );
3505
- cfs_rq -> avg .runnable_sum = cfs_rq -> avg .runnable_avg * divider ;
3506
+ add_positive (& cfs_rq -> avg .runnable_avg , delta_avg );
3507
+ add_positive (& cfs_rq -> avg .runnable_sum , delta_sum );
3508
+ /* See update_cfs_rq_load_avg() */
3509
+ cfs_rq -> avg .runnable_sum = max_t (u32 , cfs_rq -> avg .runnable_sum ,
3510
+ cfs_rq -> avg .runnable_avg * PELT_MIN_DIVIDER );
3506
3511
}
3507
3512
3508
3513
static inline void
@@ -3702,7 +3707,10 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3702
3707
3703
3708
r = removed_runnable ;
3704
3709
sub_positive (& sa -> runnable_avg , r );
3705
- sa -> runnable_sum = sa -> runnable_avg * divider ;
3710
+ sub_positive (& sa -> runnable_sum , r * divider );
3711
+ /* See sa->util_sum above */
3712
+ sa -> runnable_sum = max_t (u32 , sa -> runnable_sum ,
3713
+ sa -> runnable_avg * PELT_MIN_DIVIDER );
3706
3714
3707
3715
/*
3708
3716
* removed_runnable is the unweighted version of removed_load so we
@@ -3789,12 +3797,6 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3789
3797
*/
3790
3798
static void detach_entity_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se )
3791
3799
{
3792
- /*
3793
- * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3794
- * See ___update_load_avg() for details.
3795
- */
3796
- u32 divider = get_pelt_divider (& cfs_rq -> avg );
3797
-
3798
3800
dequeue_load_avg (cfs_rq , se );
3799
3801
sub_positive (& cfs_rq -> avg .util_avg , se -> avg .util_avg );
3800
3802
sub_positive (& cfs_rq -> avg .util_sum , se -> avg .util_sum );
@@ -3803,7 +3805,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3803
3805
cfs_rq -> avg .util_avg * PELT_MIN_DIVIDER );
3804
3806
3805
3807
sub_positive (& cfs_rq -> avg .runnable_avg , se -> avg .runnable_avg );
3806
- cfs_rq -> avg .runnable_sum = cfs_rq -> avg .runnable_avg * divider ;
3808
+ sub_positive (& cfs_rq -> avg .runnable_sum , se -> avg .runnable_sum );
3809
+ /* See update_cfs_rq_load_avg() */
3810
+ cfs_rq -> avg .runnable_sum = max_t (u32 , cfs_rq -> avg .runnable_sum ,
3811
+ cfs_rq -> avg .runnable_avg * PELT_MIN_DIVIDER );
3807
3812
3808
3813
add_tg_cfs_propagate (cfs_rq , - se -> avg .load_sum );
3809
3814
0 commit comments