Skip to content

Commit 1ea6c46

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/fair: Propagate an effective runnable_load_avg
The load balancer uses runnable_load_avg as load indicator. For !cgroup this is: runnable_load_avg = \Sum se->avg.load_avg ; where se->on_rq That is, a direct sum of all runnable tasks on that runqueue. As opposed to load_avg, which is a sum of all tasks on the runqueue, which includes a blocked component. However, in the cgroup case, this comes apart since the group entities are always runnable, even if most of their constituent entities are blocked. Therefore introduce a runnable_weight which for task entities is the same as the regular weight, but for group entities is a fraction of the entity weight and represents the runnable part of the group runqueue. Then propagate this load through the PELT hierarchy to arrive at an effective runnable load avgerage -- which we should not confuse with the canonical runnable load average. Suggested-by: Tejun Heo <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 0e2d2aa commit 1ea6c46

File tree

4 files changed

+124
-62
lines changed

4 files changed

+124
-62
lines changed

include/linux/sched.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -331,9 +331,11 @@ struct load_weight {
331331
struct sched_avg {
332332
u64 last_update_time;
333333
u64 load_sum;
334+
u64 runnable_load_sum;
334335
u32 util_sum;
335336
u32 period_contrib;
336337
unsigned long load_avg;
338+
unsigned long runnable_load_avg;
337339
unsigned long util_avg;
338340
};
339341

@@ -376,6 +378,7 @@ struct sched_statistics {
376378
struct sched_entity {
377379
/* For load-balancing: */
378380
struct load_weight load;
381+
unsigned long runnable_weight;
379382
struct rb_node run_node;
380383
struct list_head group_node;
381384
unsigned int on_rq;

kernel/sched/debug.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -441,9 +441,11 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
441441
P_SCHEDSTAT(se->statistics.wait_count);
442442
}
443443
P(se->load.weight);
444+
P(se->runnable_weight);
444445
#ifdef CONFIG_SMP
445446
P(se->avg.load_avg);
446447
P(se->avg.util_avg);
448+
P(se->avg.runnable_load_avg);
447449
#endif
448450

449451
#undef PN_SCHEDSTAT
@@ -558,10 +560,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
558560
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
559561
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
560562
#ifdef CONFIG_SMP
563+
SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
561564
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
562565
cfs_rq->avg.load_avg);
563566
SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
564-
cfs_rq->runnable_load_avg);
567+
cfs_rq->avg.runnable_load_avg);
565568
SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
566569
cfs_rq->avg.util_avg);
567570
SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
@@ -1006,10 +1009,13 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
10061009
"nr_involuntary_switches", (long long)p->nivcsw);
10071010

10081011
P(se.load.weight);
1012+
P(se.runnable_weight);
10091013
#ifdef CONFIG_SMP
10101014
P(se.avg.load_sum);
1015+
P(se.avg.runnable_load_sum);
10111016
P(se.avg.util_sum);
10121017
P(se.avg.load_avg);
1018+
P(se.avg.runnable_load_avg);
10131019
P(se.avg.util_avg);
10141020
P(se.avg.last_update_time);
10151021
#endif

0 commit comments

Comments
 (0)