Skip to content

Commit 2d02fa8

Browse files
vingu-linaroPeter Zijlstra
authored andcommitted
sched/pelt: Relax the sync of load_sum with load_avg
Similarly to util_avg and util_sum, don't sync load_sum with the low bound of load_avg but only ensure that load_sum stays in the correct range. Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Dietmar Eggemann <[email protected]> Tested-by: Sachin Sant <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 95246d1 commit 2d02fa8

File tree

1 file changed

+22
-14
lines changed

1 file changed

+22
-14
lines changed

kernel/sched/fair.c

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3028,9 +3028,11 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
30283028
static inline void
30293029
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
30303030
{
3031-
u32 divider = get_pelt_divider(&se->avg);
30323031
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3033-
cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
3032+
sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3033+
/* See update_cfs_rq_load_avg() */
3034+
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3035+
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
30343036
}
30353037
#else
30363038
static inline void
@@ -3513,9 +3515,10 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
35133515
static inline void
35143516
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
35153517
{
3516-
long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
3518+
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
35173519
unsigned long load_avg;
35183520
u64 load_sum = 0;
3521+
s64 delta_sum;
35193522
u32 divider;
35203523

35213524
if (!runnable_sum)
@@ -3542,7 +3545,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
35423545
* assuming all tasks are equally runnable.
35433546
*/
35443547
if (scale_load_down(gcfs_rq->load.weight)) {
3545-
load_sum = div_s64(gcfs_rq->avg.load_sum,
3548+
load_sum = div_u64(gcfs_rq->avg.load_sum,
35463549
scale_load_down(gcfs_rq->load.weight));
35473550
}
35483551

@@ -3559,19 +3562,22 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
35593562
running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
35603563
runnable_sum = max(runnable_sum, running_sum);
35613564

3562-
load_sum = (s64)se_weight(se) * runnable_sum;
3563-
load_avg = div_s64(load_sum, divider);
3564-
3565-
se->avg.load_sum = runnable_sum;
3565+
load_sum = se_weight(se) * runnable_sum;
3566+
load_avg = div_u64(load_sum, divider);
35663567

3567-
delta = load_avg - se->avg.load_avg;
3568-
if (!delta)
3568+
delta_avg = load_avg - se->avg.load_avg;
3569+
if (!delta_avg)
35693570
return;
35703571

3571-
se->avg.load_avg = load_avg;
3572+
delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
35723573

3573-
add_positive(&cfs_rq->avg.load_avg, delta);
3574-
cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
3574+
se->avg.load_sum = runnable_sum;
3575+
se->avg.load_avg = load_avg;
3576+
add_positive(&cfs_rq->avg.load_avg, delta_avg);
3577+
add_positive(&cfs_rq->avg.load_sum, delta_sum);
3578+
/* See update_cfs_rq_load_avg() */
3579+
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3580+
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
35753581
}
35763582

35773583
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3687,7 +3693,9 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
36873693

36883694
r = removed_load;
36893695
sub_positive(&sa->load_avg, r);
3690-
sa->load_sum = sa->load_avg * divider;
3696+
sub_positive(&sa->load_sum, r * divider);
3697+
/* See sa->util_sum below */
3698+
sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
36913699

36923700
r = removed_util;
36933701
sub_positive(&sa->util_avg, r);

0 commit comments

Comments
 (0)