@@ -3028,9 +3028,11 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3028
3028
static inline void
3029
3029
dequeue_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se )
3030
3030
{
3031
- u32 divider = get_pelt_divider (& se -> avg );
3032
3031
sub_positive (& cfs_rq -> avg .load_avg , se -> avg .load_avg );
3033
- cfs_rq -> avg .load_sum = cfs_rq -> avg .load_avg * divider ;
3032
+ sub_positive (& cfs_rq -> avg .load_sum , se_weight (se ) * se -> avg .load_sum );
3033
+ /* See update_cfs_rq_load_avg() */
3034
+ cfs_rq -> avg .load_sum = max_t (u32 , cfs_rq -> avg .load_sum ,
3035
+ cfs_rq -> avg .load_avg * PELT_MIN_DIVIDER );
3034
3036
}
3035
3037
#else
3036
3038
static inline void
@@ -3513,9 +3515,10 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
3513
3515
static inline void
3514
3516
update_tg_cfs_load (struct cfs_rq * cfs_rq , struct sched_entity * se , struct cfs_rq * gcfs_rq )
3515
3517
{
3516
- long delta , running_sum , runnable_sum = gcfs_rq -> prop_runnable_sum ;
3518
+ long delta_avg , running_sum , runnable_sum = gcfs_rq -> prop_runnable_sum ;
3517
3519
unsigned long load_avg ;
3518
3520
u64 load_sum = 0 ;
3521
+ s64 delta_sum ;
3519
3522
u32 divider ;
3520
3523
3521
3524
if (!runnable_sum )
@@ -3542,7 +3545,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
3542
3545
* assuming all tasks are equally runnable.
3543
3546
*/
3544
3547
if (scale_load_down (gcfs_rq -> load .weight )) {
3545
- load_sum = div_s64 (gcfs_rq -> avg .load_sum ,
3548
+ load_sum = div_u64 (gcfs_rq -> avg .load_sum ,
3546
3549
scale_load_down (gcfs_rq -> load .weight ));
3547
3550
}
3548
3551
@@ -3559,19 +3562,22 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
3559
3562
running_sum = se -> avg .util_sum >> SCHED_CAPACITY_SHIFT ;
3560
3563
runnable_sum = max (runnable_sum , running_sum );
3561
3564
3562
- load_sum = (s64 )se_weight (se ) * runnable_sum ;
3563
- load_avg = div_s64 (load_sum , divider );
3564
-
3565
- se -> avg .load_sum = runnable_sum ;
3565
+ load_sum = se_weight (se ) * runnable_sum ;
3566
+ load_avg = div_u64 (load_sum , divider );
3566
3567
3567
- delta = load_avg - se -> avg .load_avg ;
3568
- if (!delta )
3568
+ delta_avg = load_avg - se -> avg .load_avg ;
3569
+ if (!delta_avg )
3569
3570
return ;
3570
3571
3571
- se -> avg .load_avg = load_avg ;
3572
+ delta_sum = load_sum - ( s64 ) se_weight ( se ) * se -> avg .load_sum ;
3572
3573
3573
- add_positive (& cfs_rq -> avg .load_avg , delta );
3574
- cfs_rq -> avg .load_sum = cfs_rq -> avg .load_avg * divider ;
3574
+ se -> avg .load_sum = runnable_sum ;
3575
+ se -> avg .load_avg = load_avg ;
3576
+ add_positive (& cfs_rq -> avg .load_avg , delta_avg );
3577
+ add_positive (& cfs_rq -> avg .load_sum , delta_sum );
3578
+ /* See update_cfs_rq_load_avg() */
3579
+ cfs_rq -> avg .load_sum = max_t (u32 , cfs_rq -> avg .load_sum ,
3580
+ cfs_rq -> avg .load_avg * PELT_MIN_DIVIDER );
3575
3581
}
3576
3582
3577
3583
static inline void add_tg_cfs_propagate (struct cfs_rq * cfs_rq , long runnable_sum )
@@ -3687,7 +3693,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3687
3693
3688
3694
r = removed_load ;
3689
3695
sub_positive (& sa -> load_avg , r );
3690
- sa -> load_sum = sa -> load_avg * divider ;
3696
+ sub_positive (& sa -> load_sum , r * divider );
3697
+ /* See sa->util_sum below */
3698
+ sa -> load_sum = max_t (u32 , sa -> load_sum , sa -> load_avg * PELT_MIN_DIVIDER );
3691
3699
3692
3700
r = removed_util ;
3693
3701
sub_positive (& sa -> util_avg , r );
0 commit comments