@@ -3441,52 +3441,46 @@ static inline void
3441
3441
update_tg_cfs_util (struct cfs_rq * cfs_rq , struct sched_entity * se , struct cfs_rq * gcfs_rq )
3442
3442
{
3443
3443
long delta = gcfs_rq -> avg .util_avg - se -> avg .util_avg ;
3444
+ /*
3445
+ * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3446
+ * See ___update_load_avg() for details.
3447
+ */
3448
+ u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq -> avg .period_contrib ;
3444
3449
3445
3450
/* Nothing to update */
3446
3451
if (!delta )
3447
3452
return ;
3448
3453
3449
- /*
3450
- * The relation between sum and avg is:
3451
- *
3452
- * LOAD_AVG_MAX - 1024 + sa->period_contrib
3453
- *
3454
- * however, the PELT windows are not aligned between grq and gse.
3455
- */
3456
-
3457
3454
/* Set new sched_entity's utilization */
3458
3455
se -> avg .util_avg = gcfs_rq -> avg .util_avg ;
3459
- se -> avg .util_sum = se -> avg .util_avg * LOAD_AVG_MAX ;
3456
+ se -> avg .util_sum = se -> avg .util_avg * divider ;
3460
3457
3461
3458
/* Update parent cfs_rq utilization */
3462
3459
add_positive (& cfs_rq -> avg .util_avg , delta );
3463
- cfs_rq -> avg .util_sum = cfs_rq -> avg .util_avg * LOAD_AVG_MAX ;
3460
+ cfs_rq -> avg .util_sum = cfs_rq -> avg .util_avg * divider ;
3464
3461
}
3465
3462
3466
3463
static inline void
3467
3464
update_tg_cfs_runnable (struct cfs_rq * cfs_rq , struct sched_entity * se , struct cfs_rq * gcfs_rq )
3468
3465
{
3469
3466
long delta = gcfs_rq -> avg .runnable_avg - se -> avg .runnable_avg ;
3467
+ /*
3468
+ * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3469
+ * See ___update_load_avg() for details.
3470
+ */
3471
+ u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq -> avg .period_contrib ;
3470
3472
3471
3473
/* Nothing to update */
3472
3474
if (!delta )
3473
3475
return ;
3474
3476
3475
- /*
3476
- * The relation between sum and avg is:
3477
- *
3478
- * LOAD_AVG_MAX - 1024 + sa->period_contrib
3479
- *
3480
- * however, the PELT windows are not aligned between grq and gse.
3481
- */
3482
-
3483
3477
/* Set new sched_entity's runnable */
3484
3478
se -> avg .runnable_avg = gcfs_rq -> avg .runnable_avg ;
3485
- se -> avg .runnable_sum = se -> avg .runnable_avg * LOAD_AVG_MAX ;
3479
+ se -> avg .runnable_sum = se -> avg .runnable_avg * divider ;
3486
3480
3487
3481
/* Update parent cfs_rq runnable */
3488
3482
add_positive (& cfs_rq -> avg .runnable_avg , delta );
3489
- cfs_rq -> avg .runnable_sum = cfs_rq -> avg .runnable_avg * LOAD_AVG_MAX ;
3483
+ cfs_rq -> avg .runnable_sum = cfs_rq -> avg .runnable_avg * divider ;
3490
3484
}
3491
3485
3492
3486
static inline void
@@ -3496,19 +3490,26 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
3496
3490
unsigned long load_avg ;
3497
3491
u64 load_sum = 0 ;
3498
3492
s64 delta_sum ;
3493
+ u32 divider ;
3499
3494
3500
3495
if (!runnable_sum )
3501
3496
return ;
3502
3497
3503
3498
gcfs_rq -> prop_runnable_sum = 0 ;
3504
3499
3500
+ /*
3501
+ * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3502
+ * See ___update_load_avg() for details.
3503
+ */
3504
+ divider = LOAD_AVG_MAX - 1024 + cfs_rq -> avg .period_contrib ;
3505
+
3505
3506
if (runnable_sum >= 0 ) {
3506
3507
/*
3507
3508
* Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3508
3509
* the CPU is saturated running == runnable.
3509
3510
*/
3510
3511
runnable_sum += se -> avg .load_sum ;
3511
- runnable_sum = min ( runnable_sum , ( long ) LOAD_AVG_MAX );
3512
+ runnable_sum = min_t ( long , runnable_sum , divider );
3512
3513
} else {
3513
3514
/*
3514
3515
* Estimate the new unweighted runnable_sum of the gcfs_rq by
@@ -3533,7 +3534,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
3533
3534
runnable_sum = max (runnable_sum , running_sum );
3534
3535
3535
3536
load_sum = (s64 )se_weight (se ) * runnable_sum ;
3536
- load_avg = div_s64 (load_sum , LOAD_AVG_MAX );
3537
+ load_avg = div_s64 (load_sum , divider );
3537
3538
3538
3539
delta_sum = load_sum - (s64 )se_weight (se ) * se -> avg .load_sum ;
3539
3540
delta_avg = load_avg - se -> avg .load_avg ;
@@ -3697,6 +3698,10 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3697
3698
*/
3698
3699
static void attach_entity_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se )
3699
3700
{
3701
+ /*
3702
+ * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3703
+ * See ___update_load_avg() for details.
3704
+ */
3700
3705
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq -> avg .period_contrib ;
3701
3706
3702
3707
/*
0 commit comments