Skip to content

Commit 677ea01

Browse files
joshdonPeter Zijlstra
authored andcommitted
sched: add throttled time stat for throttled children
We currently export the total throttled time for cgroups that are given a bandwidth limit. This patch extends this accounting to also account the total time that each children cgroup has been throttled. This is useful to understand the degree to which children have been affected by the throttling control. Children which are not runnable during the entire throttled period, for example, will not show any self-throttling time during this period. Expose this in a new interface, 'cpu.stat.local', which is similar to how non-hierarchical events are accounted in 'memory.events.local'. Signed-off-by: Josh Don <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Tejun Heo <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 79462e8 commit 677ea01

File tree

5 files changed

+102
-1
lines changed

5 files changed

+102
-1
lines changed

include/linux/cgroup-defs.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -661,6 +661,8 @@ struct cgroup_subsys {
661661
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
662662
int (*css_extra_stat_show)(struct seq_file *seq,
663663
struct cgroup_subsys_state *css);
664+
int (*css_local_stat_show)(struct seq_file *seq,
665+
struct cgroup_subsys_state *css);
664666

665667
int (*can_attach)(struct cgroup_taskset *tset);
666668
void (*cancel_attach)(struct cgroup_taskset *tset);

kernel/cgroup/cgroup.c

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3685,6 +3685,36 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
36853685
return ret;
36863686
}
36873687

3688+
static int __maybe_unused cgroup_local_stat_show(struct seq_file *seq,
3689+
struct cgroup *cgrp, int ssid)
3690+
{
3691+
struct cgroup_subsys *ss = cgroup_subsys[ssid];
3692+
struct cgroup_subsys_state *css;
3693+
int ret;
3694+
3695+
if (!ss->css_local_stat_show)
3696+
return 0;
3697+
3698+
css = cgroup_tryget_css(cgrp, ss);
3699+
if (!css)
3700+
return 0;
3701+
3702+
ret = ss->css_local_stat_show(seq, css);
3703+
css_put(css);
3704+
return ret;
3705+
}
3706+
3707+
static int cpu_local_stat_show(struct seq_file *seq, void *v)
3708+
{
3709+
struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
3710+
int ret = 0;
3711+
3712+
#ifdef CONFIG_CGROUP_SCHED
3713+
ret = cgroup_local_stat_show(seq, cgrp, cpu_cgrp_id);
3714+
#endif
3715+
return ret;
3716+
}
3717+
36883718
#ifdef CONFIG_PSI
36893719
static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
36903720
{
@@ -5235,6 +5265,10 @@ static struct cftype cgroup_base_files[] = {
52355265
.name = "cpu.stat",
52365266
.seq_show = cpu_stat_show,
52375267
},
5268+
{
5269+
.name = "cpu.stat.local",
5270+
.seq_show = cpu_local_stat_show,
5271+
},
52385272
{ } /* terminate */
52395273
};
52405274

kernel/sched/core.c

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11139,6 +11139,27 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
1113911139

1114011140
return 0;
1114111141
}
11142+
11143+
static u64 throttled_time_self(struct task_group *tg)
11144+
{
11145+
int i;
11146+
u64 total = 0;
11147+
11148+
for_each_possible_cpu(i) {
11149+
total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
11150+
}
11151+
11152+
return total;
11153+
}
11154+
11155+
static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
11156+
{
11157+
struct task_group *tg = css_tg(seq_css(sf));
11158+
11159+
seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
11160+
11161+
return 0;
11162+
}
1114211163
#endif /* CONFIG_CFS_BANDWIDTH */
1114311164
#endif /* CONFIG_FAIR_GROUP_SCHED */
1114411165

@@ -11215,6 +11236,10 @@ static struct cftype cpu_legacy_files[] = {
1121511236
.name = "stat",
1121611237
.seq_show = cpu_cfs_stat_show,
1121711238
},
11239+
{
11240+
.name = "stat.local",
11241+
.seq_show = cpu_cfs_local_stat_show,
11242+
},
1121811243
#endif
1121911244
#ifdef CONFIG_RT_GROUP_SCHED
1122011245
{
@@ -11271,6 +11296,24 @@ static int cpu_extra_stat_show(struct seq_file *sf,
1127111296
return 0;
1127211297
}
1127311298

11299+
static int cpu_local_stat_show(struct seq_file *sf,
11300+
struct cgroup_subsys_state *css)
11301+
{
11302+
#ifdef CONFIG_CFS_BANDWIDTH
11303+
{
11304+
struct task_group *tg = css_tg(css);
11305+
u64 throttled_self_usec;
11306+
11307+
throttled_self_usec = throttled_time_self(tg);
11308+
do_div(throttled_self_usec, NSEC_PER_USEC);
11309+
11310+
seq_printf(sf, "throttled_usec %llu\n",
11311+
throttled_self_usec);
11312+
}
11313+
#endif
11314+
return 0;
11315+
}
11316+
1127411317
#ifdef CONFIG_FAIR_GROUP_SCHED
1127511318
static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
1127611319
struct cftype *cft)
@@ -11449,6 +11492,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
1144911492
.css_released = cpu_cgroup_css_released,
1145011493
.css_free = cpu_cgroup_css_free,
1145111494
.css_extra_stat_show = cpu_extra_stat_show,
11495+
.css_local_stat_show = cpu_local_stat_show,
1145211496
#ifdef CONFIG_RT_GROUP_SCHED
1145311497
.can_attach = cpu_cgroup_can_attach,
1145411498
#endif

kernel/sched/fair.c

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4878,8 +4878,12 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
48784878
list_add_leaf_cfs_rq(cfs_rq);
48794879
} else {
48804880
#ifdef CONFIG_CFS_BANDWIDTH
4881+
struct rq *rq = rq_of(cfs_rq);
4882+
48814883
if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
4882-
cfs_rq->throttled_clock = rq_clock(rq_of(cfs_rq));
4884+
cfs_rq->throttled_clock = rq_clock(rq);
4885+
if (!cfs_rq->throttled_clock_self)
4886+
cfs_rq->throttled_clock_self = rq_clock(rq);
48834887
#endif
48844888
}
48854889
}
@@ -5384,6 +5388,17 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
53845388
/* Add cfs_rq with load or one or more already running entities to the list */
53855389
if (!cfs_rq_is_decayed(cfs_rq))
53865390
list_add_leaf_cfs_rq(cfs_rq);
5391+
5392+
if (cfs_rq->throttled_clock_self) {
5393+
u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
5394+
5395+
cfs_rq->throttled_clock_self = 0;
5396+
5397+
if (SCHED_WARN_ON((s64)delta < 0))
5398+
delta = 0;
5399+
5400+
cfs_rq->throttled_clock_self_time += delta;
5401+
}
53875402
}
53885403

53895404
return 0;
@@ -5398,6 +5413,10 @@ static int tg_throttle_down(struct task_group *tg, void *data)
53985413
if (!cfs_rq->throttle_count) {
53995414
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
54005415
list_del_leaf_cfs_rq(cfs_rq);
5416+
5417+
SCHED_WARN_ON(cfs_rq->throttled_clock_self);
5418+
if (cfs_rq->nr_running)
5419+
cfs_rq->throttled_clock_self = rq_clock(rq);
54015420
}
54025421
cfs_rq->throttle_count++;
54035422

kernel/sched/sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -636,6 +636,8 @@ struct cfs_rq {
636636
u64 throttled_clock;
637637
u64 throttled_clock_pelt;
638638
u64 throttled_clock_pelt_time;
639+
u64 throttled_clock_self;
640+
u64 throttled_clock_self_time;
639641
int throttled;
640642
int throttle_count;
641643
struct list_head throttled_list;

0 commit comments

Comments
 (0)