Skip to content

Commit a349834

Browse files
vingu-linaroIngo Molnar
authored andcommitted
sched/fair: Rename sg_lb_stats::sum_nr_running to sum_h_nr_running
Rename sum_nr_running to sum_h_nr_running because it effectively tracks cfs->h_nr_running so we can use sum_nr_running to track rq->nr_running when needed. There are no functional changes. Signed-off-by: Vincent Guittot <[email protected]> Reviewed-by: Valentin Schneider <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: Ben Segall <[email protected]> Cc: Dietmar Eggemann <[email protected]> Cc: Juri Lelli <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: [email protected] Cc: Peter Zijlstra <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 490ba97 commit a349834

File tree

1 file changed

+16
-16
lines changed

1 file changed

+16
-16
lines changed

kernel/sched/fair.c

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7660,7 +7660,7 @@ struct sg_lb_stats {
76607660
unsigned long load_per_task;
76617661
unsigned long group_capacity;
76627662
unsigned long group_util; /* Total utilization of the group */
7663-
unsigned int sum_nr_running; /* Nr tasks running in the group */
7663+
unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
76647664
unsigned int idle_cpus;
76657665
unsigned int group_weight;
76667666
enum group_type group_type;
@@ -7705,7 +7705,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
77057705
.total_capacity = 0UL,
77067706
.busiest_stat = {
77077707
.avg_load = 0UL,
7708-
.sum_nr_running = 0,
7708+
.sum_h_nr_running = 0,
77097709
.group_type = group_other,
77107710
},
77117711
};
@@ -7896,7 +7896,7 @@ static inline int sg_imbalanced(struct sched_group *group)
78967896
static inline bool
78977897
group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
78987898
{
7899-
if (sgs->sum_nr_running < sgs->group_weight)
7899+
if (sgs->sum_h_nr_running < sgs->group_weight)
79007900
return true;
79017901

79027902
if ((sgs->group_capacity * 100) >
@@ -7917,7 +7917,7 @@ group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
79177917
static inline bool
79187918
group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
79197919
{
7920-
if (sgs->sum_nr_running <= sgs->group_weight)
7920+
if (sgs->sum_h_nr_running <= sgs->group_weight)
79217921
return false;
79227922

79237923
if ((sgs->group_capacity * 100) <
@@ -8009,7 +8009,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
80098009

80108010
sgs->group_load += cpu_runnable_load(rq);
80118011
sgs->group_util += cpu_util(i);
8012-
sgs->sum_nr_running += rq->cfs.h_nr_running;
8012+
sgs->sum_h_nr_running += rq->cfs.h_nr_running;
80138013

80148014
nr_running = rq->nr_running;
80158015
if (nr_running > 1)
@@ -8039,8 +8039,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
80398039
sgs->group_capacity = group->sgc->capacity;
80408040
sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
80418041

8042-
if (sgs->sum_nr_running)
8043-
sgs->load_per_task = sgs->group_load / sgs->sum_nr_running;
8042+
if (sgs->sum_h_nr_running)
8043+
sgs->load_per_task = sgs->group_load / sgs->sum_h_nr_running;
80448044

80458045
sgs->group_weight = group->group_weight;
80468046

@@ -8097,7 +8097,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
80978097
* capable CPUs may harm throughput. Maximize throughput,
80988098
* power/energy consequences are not considered.
80998099
*/
8100-
if (sgs->sum_nr_running <= sgs->group_weight &&
8100+
if (sgs->sum_h_nr_running <= sgs->group_weight &&
81018101
group_smaller_min_cpu_capacity(sds->local, sg))
81028102
return false;
81038103

@@ -8128,7 +8128,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
81288128
* perform better since they share less core resources. Hence when we
81298129
* have idle threads, we want them to be the higher ones.
81308130
*/
8131-
if (sgs->sum_nr_running &&
8131+
if (sgs->sum_h_nr_running &&
81328132
sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
81338133
sgs->group_asym_packing = 1;
81348134
if (!sds->busiest)
@@ -8146,9 +8146,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
81468146
#ifdef CONFIG_NUMA_BALANCING
81478147
static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
81488148
{
8149-
if (sgs->sum_nr_running > sgs->nr_numa_running)
8149+
if (sgs->sum_h_nr_running > sgs->nr_numa_running)
81508150
return regular;
8151-
if (sgs->sum_nr_running > sgs->nr_preferred_running)
8151+
if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
81528152
return remote;
81538153
return all;
81548154
}
@@ -8223,7 +8223,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
82238223
*/
82248224
if (prefer_sibling && sds->local &&
82258225
group_has_capacity(env, local) &&
8226-
(sgs->sum_nr_running > local->sum_nr_running + 1)) {
8226+
(sgs->sum_h_nr_running > local->sum_h_nr_running + 1)) {
82278227
sgs->group_no_capacity = 1;
82288228
sgs->group_type = group_classify(sg, sgs);
82298229
}
@@ -8235,7 +8235,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
82358235

82368236
next_group:
82378237
/* Now, start updating sd_lb_stats */
8238-
sds->total_running += sgs->sum_nr_running;
8238+
sds->total_running += sgs->sum_h_nr_running;
82398239
sds->total_load += sgs->group_load;
82408240
sds->total_capacity += sgs->group_capacity;
82418241

@@ -8289,7 +8289,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
82898289
local = &sds->local_stat;
82908290
busiest = &sds->busiest_stat;
82918291

8292-
if (!local->sum_nr_running)
8292+
if (!local->sum_h_nr_running)
82938293
local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
82948294
else if (busiest->load_per_task > local->load_per_task)
82958295
imbn = 1;
@@ -8387,7 +8387,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
83878387
*/
83888388
if (busiest->group_type == group_overloaded &&
83898389
local->group_type == group_overloaded) {
8390-
load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
8390+
load_above_capacity = busiest->sum_h_nr_running * SCHED_CAPACITY_SCALE;
83918391
if (load_above_capacity > busiest->group_capacity) {
83928392
load_above_capacity -= busiest->group_capacity;
83938393
load_above_capacity *= scale_load_down(NICE_0_LOAD);
@@ -8468,7 +8468,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
84688468
goto force_balance;
84698469

84708470
/* There is no busy sibling group to pull tasks from */
8471-
if (!sds.busiest || busiest->sum_nr_running == 0)
8471+
if (!sds.busiest || busiest->sum_h_nr_running == 0)
84728472
goto out_balanced;
84738473

84748474
/* XXX broken for overlapping NUMA groups */

0 commit comments

Comments
 (0)