@@ -5380,18 +5380,6 @@ static unsigned long capacity_of(int cpu)
5380
5380
return cpu_rq (cpu )-> cpu_capacity ;
5381
5381
}
5382
5382
5383
- static unsigned long cpu_avg_load_per_task (int cpu )
5384
- {
5385
- struct rq * rq = cpu_rq (cpu );
5386
- unsigned long nr_running = READ_ONCE (rq -> cfs .h_nr_running );
5387
- unsigned long load_avg = cpu_runnable_load (rq );
5388
-
5389
- if (nr_running )
5390
- return load_avg / nr_running ;
5391
-
5392
- return 0 ;
5393
- }
5394
-
5395
5383
static void record_wakee (struct task_struct * p )
5396
5384
{
5397
5385
/*
@@ -7657,7 +7645,6 @@ static unsigned long task_h_load(struct task_struct *p)
7657
7645
struct sg_lb_stats {
7658
7646
unsigned long avg_load ; /*Avg load across the CPUs of the group */
7659
7647
unsigned long group_load ; /* Total load over the CPUs of the group */
7660
- unsigned long load_per_task ;
7661
7648
unsigned long group_capacity ;
7662
7649
unsigned long group_util ; /* Total utilization of the group */
7663
7650
unsigned int sum_h_nr_running ; /* Nr of CFS tasks running in the group */
@@ -8039,9 +8026,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
8039
8026
sgs -> group_capacity = group -> sgc -> capacity ;
8040
8027
sgs -> avg_load = (sgs -> group_load * SCHED_CAPACITY_SCALE ) / sgs -> group_capacity ;
8041
8028
8042
- if (sgs -> sum_h_nr_running )
8043
- sgs -> load_per_task = sgs -> group_load / sgs -> sum_h_nr_running ;
8044
-
8045
8029
sgs -> group_weight = group -> group_weight ;
8046
8030
8047
8031
sgs -> group_no_capacity = group_is_overloaded (env , sgs );
@@ -8271,76 +8255,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
8271
8255
}
8272
8256
}
8273
8257
8274
- /**
8275
- * fix_small_imbalance - Calculate the minor imbalance that exists
8276
- * amongst the groups of a sched_domain, during
8277
- * load balancing.
8278
- * @env: The load balancing environment.
8279
- * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
8280
- */
8281
- static inline
8282
- void fix_small_imbalance (struct lb_env * env , struct sd_lb_stats * sds )
8283
- {
8284
- unsigned long tmp , capa_now = 0 , capa_move = 0 ;
8285
- unsigned int imbn = 2 ;
8286
- unsigned long scaled_busy_load_per_task ;
8287
- struct sg_lb_stats * local , * busiest ;
8288
-
8289
- local = & sds -> local_stat ;
8290
- busiest = & sds -> busiest_stat ;
8291
-
8292
- if (!local -> sum_h_nr_running )
8293
- local -> load_per_task = cpu_avg_load_per_task (env -> dst_cpu );
8294
- else if (busiest -> load_per_task > local -> load_per_task )
8295
- imbn = 1 ;
8296
-
8297
- scaled_busy_load_per_task =
8298
- (busiest -> load_per_task * SCHED_CAPACITY_SCALE ) /
8299
- busiest -> group_capacity ;
8300
-
8301
- if (busiest -> avg_load + scaled_busy_load_per_task >=
8302
- local -> avg_load + (scaled_busy_load_per_task * imbn )) {
8303
- env -> imbalance = busiest -> load_per_task ;
8304
- return ;
8305
- }
8306
-
8307
- /*
8308
- * OK, we don't have enough imbalance to justify moving tasks,
8309
- * however we may be able to increase total CPU capacity used by
8310
- * moving them.
8311
- */
8312
-
8313
- capa_now += busiest -> group_capacity *
8314
- min (busiest -> load_per_task , busiest -> avg_load );
8315
- capa_now += local -> group_capacity *
8316
- min (local -> load_per_task , local -> avg_load );
8317
- capa_now /= SCHED_CAPACITY_SCALE ;
8318
-
8319
- /* Amount of load we'd subtract */
8320
- if (busiest -> avg_load > scaled_busy_load_per_task ) {
8321
- capa_move += busiest -> group_capacity *
8322
- min (busiest -> load_per_task ,
8323
- busiest -> avg_load - scaled_busy_load_per_task );
8324
- }
8325
-
8326
- /* Amount of load we'd add */
8327
- if (busiest -> avg_load * busiest -> group_capacity <
8328
- busiest -> load_per_task * SCHED_CAPACITY_SCALE ) {
8329
- tmp = (busiest -> avg_load * busiest -> group_capacity ) /
8330
- local -> group_capacity ;
8331
- } else {
8332
- tmp = (busiest -> load_per_task * SCHED_CAPACITY_SCALE ) /
8333
- local -> group_capacity ;
8334
- }
8335
- capa_move += local -> group_capacity *
8336
- min (local -> load_per_task , local -> avg_load + tmp );
8337
- capa_move /= SCHED_CAPACITY_SCALE ;
8338
-
8339
- /* Move if we gain throughput */
8340
- if (capa_move > capa_now )
8341
- env -> imbalance = busiest -> load_per_task ;
8342
- }
8343
-
8344
8258
/**
8345
8259
* calculate_imbalance - Calculate the amount of imbalance present within the
8346
8260
* groups of a given sched_domain during load balance.
@@ -8360,15 +8274,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
8360
8274
return ;
8361
8275
}
8362
8276
8363
- if (busiest -> group_type == group_imbalanced ) {
8364
- /*
8365
- * In the group_imb case we cannot rely on group-wide averages
8366
- * to ensure CPU-load equilibrium, look at wider averages. XXX
8367
- */
8368
- busiest -> load_per_task =
8369
- min (busiest -> load_per_task , sds -> avg_load );
8370
- }
8371
-
8372
8277
/*
8373
8278
* Avg load of busiest sg can be less and avg load of local sg can
8374
8279
* be greater than avg load across all sgs of sd because avg load
@@ -8379,7 +8284,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
8379
8284
(busiest -> avg_load <= sds -> avg_load ||
8380
8285
local -> avg_load >= sds -> avg_load )) {
8381
8286
env -> imbalance = 0 ;
8382
- return fix_small_imbalance ( env , sds ) ;
8287
+ return ;
8383
8288
}
8384
8289
8385
8290
/*
@@ -8417,14 +8322,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
8417
8322
busiest -> group_misfit_task_load );
8418
8323
}
8419
8324
8420
- /*
8421
- * if *imbalance is less than the average load per runnable task
8422
- * there is no guarantee that any tasks will be moved so we'll have
8423
- * a think about bumping its value to force at least one task to be
8424
- * moved
8425
- */
8426
- if (env -> imbalance < busiest -> load_per_task )
8427
- return fix_small_imbalance (env , sds );
8428
8325
}
8429
8326
8430
8327
/******* find_busiest_group() helpers end here *********************/
0 commit comments