@@ -7660,7 +7660,7 @@ struct sg_lb_stats {
7660
7660
unsigned long load_per_task ;
7661
7661
unsigned long group_capacity ;
7662
7662
unsigned long group_util ; /* Total utilization of the group */
7663
- unsigned int sum_nr_running ; /* Nr tasks running in the group */
7663
+ unsigned int sum_h_nr_running ; /* Nr of CFS tasks running in the group */
7664
7664
unsigned int idle_cpus ;
7665
7665
unsigned int group_weight ;
7666
7666
enum group_type group_type ;
@@ -7705,7 +7705,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
7705
7705
.total_capacity = 0UL ,
7706
7706
.busiest_stat = {
7707
7707
.avg_load = 0UL ,
7708
- .sum_nr_running = 0 ,
7708
+ .sum_h_nr_running = 0 ,
7709
7709
.group_type = group_other ,
7710
7710
},
7711
7711
};
@@ -7896,7 +7896,7 @@ static inline int sg_imbalanced(struct sched_group *group)
7896
7896
static inline bool
7897
7897
group_has_capacity (struct lb_env * env , struct sg_lb_stats * sgs )
7898
7898
{
7899
- if (sgs -> sum_nr_running < sgs -> group_weight )
7899
+ if (sgs -> sum_h_nr_running < sgs -> group_weight )
7900
7900
return true;
7901
7901
7902
7902
if ((sgs -> group_capacity * 100 ) >
@@ -7917,7 +7917,7 @@ group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
7917
7917
static inline bool
7918
7918
group_is_overloaded (struct lb_env * env , struct sg_lb_stats * sgs )
7919
7919
{
7920
- if (sgs -> sum_nr_running <= sgs -> group_weight )
7920
+ if (sgs -> sum_h_nr_running <= sgs -> group_weight )
7921
7921
return false;
7922
7922
7923
7923
if ((sgs -> group_capacity * 100 ) <
@@ -8009,7 +8009,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
8009
8009
8010
8010
sgs -> group_load += cpu_runnable_load (rq );
8011
8011
sgs -> group_util += cpu_util (i );
8012
- sgs -> sum_nr_running += rq -> cfs .h_nr_running ;
8012
+ sgs -> sum_h_nr_running += rq -> cfs .h_nr_running ;
8013
8013
8014
8014
nr_running = rq -> nr_running ;
8015
8015
if (nr_running > 1 )
@@ -8039,8 +8039,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
8039
8039
sgs -> group_capacity = group -> sgc -> capacity ;
8040
8040
sgs -> avg_load = (sgs -> group_load * SCHED_CAPACITY_SCALE ) / sgs -> group_capacity ;
8041
8041
8042
- if (sgs -> sum_nr_running )
8043
- sgs -> load_per_task = sgs -> group_load / sgs -> sum_nr_running ;
8042
+ if (sgs -> sum_h_nr_running )
8043
+ sgs -> load_per_task = sgs -> group_load / sgs -> sum_h_nr_running ;
8044
8044
8045
8045
sgs -> group_weight = group -> group_weight ;
8046
8046
@@ -8097,7 +8097,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
8097
8097
* capable CPUs may harm throughput. Maximize throughput,
8098
8098
* power/energy consequences are not considered.
8099
8099
*/
8100
- if (sgs -> sum_nr_running <= sgs -> group_weight &&
8100
+ if (sgs -> sum_h_nr_running <= sgs -> group_weight &&
8101
8101
group_smaller_min_cpu_capacity (sds -> local , sg ))
8102
8102
return false;
8103
8103
@@ -8128,7 +8128,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
8128
8128
* perform better since they share less core resources. Hence when we
8129
8129
* have idle threads, we want them to be the higher ones.
8130
8130
*/
8131
- if (sgs -> sum_nr_running &&
8131
+ if (sgs -> sum_h_nr_running &&
8132
8132
sched_asym_prefer (env -> dst_cpu , sg -> asym_prefer_cpu )) {
8133
8133
sgs -> group_asym_packing = 1 ;
8134
8134
if (!sds -> busiest )
@@ -8146,9 +8146,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
8146
8146
#ifdef CONFIG_NUMA_BALANCING
8147
8147
static inline enum fbq_type fbq_classify_group (struct sg_lb_stats * sgs )
8148
8148
{
8149
- if (sgs -> sum_nr_running > sgs -> nr_numa_running )
8149
+ if (sgs -> sum_h_nr_running > sgs -> nr_numa_running )
8150
8150
return regular ;
8151
- if (sgs -> sum_nr_running > sgs -> nr_preferred_running )
8151
+ if (sgs -> sum_h_nr_running > sgs -> nr_preferred_running )
8152
8152
return remote ;
8153
8153
return all ;
8154
8154
}
@@ -8223,7 +8223,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
8223
8223
*/
8224
8224
if (prefer_sibling && sds -> local &&
8225
8225
group_has_capacity (env , local ) &&
8226
- (sgs -> sum_nr_running > local -> sum_nr_running + 1 )) {
8226
+ (sgs -> sum_h_nr_running > local -> sum_h_nr_running + 1 )) {
8227
8227
sgs -> group_no_capacity = 1 ;
8228
8228
sgs -> group_type = group_classify (sg , sgs );
8229
8229
}
@@ -8235,7 +8235,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
8235
8235
8236
8236
next_group :
8237
8237
/* Now, start updating sd_lb_stats */
8238
- sds -> total_running += sgs -> sum_nr_running ;
8238
+ sds -> total_running += sgs -> sum_h_nr_running ;
8239
8239
sds -> total_load += sgs -> group_load ;
8240
8240
sds -> total_capacity += sgs -> group_capacity ;
8241
8241
@@ -8289,7 +8289,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
8289
8289
local = & sds -> local_stat ;
8290
8290
busiest = & sds -> busiest_stat ;
8291
8291
8292
- if (!local -> sum_nr_running )
8292
+ if (!local -> sum_h_nr_running )
8293
8293
local -> load_per_task = cpu_avg_load_per_task (env -> dst_cpu );
8294
8294
else if (busiest -> load_per_task > local -> load_per_task )
8295
8295
imbn = 1 ;
@@ -8387,7 +8387,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
8387
8387
*/
8388
8388
if (busiest -> group_type == group_overloaded &&
8389
8389
local -> group_type == group_overloaded ) {
8390
- load_above_capacity = busiest -> sum_nr_running * SCHED_CAPACITY_SCALE ;
8390
+ load_above_capacity = busiest -> sum_h_nr_running * SCHED_CAPACITY_SCALE ;
8391
8391
if (load_above_capacity > busiest -> group_capacity ) {
8392
8392
load_above_capacity -= busiest -> group_capacity ;
8393
8393
load_above_capacity *= scale_load_down (NICE_0_LOAD );
@@ -8468,7 +8468,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
8468
8468
goto force_balance ;
8469
8469
8470
8470
/* There is no busy sibling group to pull tasks from */
8471
- if (!sds .busiest || busiest -> sum_nr_running == 0 )
8471
+ if (!sds .busiest || busiest -> sum_h_nr_running == 0 )
8472
8472
goto out_balanced ;
8473
8473
8474
8474
/* XXX broken for overlapping NUMA groups */
0 commit comments