@@ -1467,8 +1467,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1467
1467
}
1468
1468
1469
1469
static unsigned long weighted_cpuload (struct rq * rq );
1470
- static unsigned long source_load (int cpu , int type );
1471
- static unsigned long target_load (int cpu , int type );
1472
1470
1473
1471
/* Cached statistics for all CPUs within a node */
1474
1472
struct numa_stats {
@@ -5333,45 +5331,11 @@ static struct {
5333
5331
5334
5332
#endif /* CONFIG_NO_HZ_COMMON */
5335
5333
5336
- /* Used instead of source_load when we know the type == 0 */
5337
5334
static unsigned long weighted_cpuload (struct rq * rq )
5338
5335
{
5339
5336
return cfs_rq_runnable_load_avg (& rq -> cfs );
5340
5337
}
5341
5338
5342
- /*
5343
- * Return a low guess at the load of a migration-source CPU weighted
5344
- * according to the scheduling class and "nice" value.
5345
- *
5346
- * We want to under-estimate the load of migration sources, to
5347
- * balance conservatively.
5348
- */
5349
- static unsigned long source_load (int cpu , int type )
5350
- {
5351
- struct rq * rq = cpu_rq (cpu );
5352
- unsigned long total = weighted_cpuload (rq );
5353
-
5354
- if (type == 0 || !sched_feat (LB_BIAS ))
5355
- return total ;
5356
-
5357
- return min (rq -> cpu_load [type - 1 ], total );
5358
- }
5359
-
5360
- /*
5361
- * Return a high guess at the load of a migration-target CPU weighted
5362
- * according to the scheduling class and "nice" value.
5363
- */
5364
- static unsigned long target_load (int cpu , int type )
5365
- {
5366
- struct rq * rq = cpu_rq (cpu );
5367
- unsigned long total = weighted_cpuload (rq );
5368
-
5369
- if (type == 0 || !sched_feat (LB_BIAS ))
5370
- return total ;
5371
-
5372
- return max (rq -> cpu_load [type - 1 ], total );
5373
- }
5374
-
5375
5339
static unsigned long capacity_of (int cpu )
5376
5340
{
5377
5341
return cpu_rq (cpu )-> cpu_capacity ;
@@ -5479,7 +5443,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
5479
5443
s64 this_eff_load , prev_eff_load ;
5480
5444
unsigned long task_load ;
5481
5445
5482
- this_eff_load = target_load ( this_cpu , sd -> wake_idx );
5446
+ this_eff_load = weighted_cpuload ( cpu_rq ( this_cpu ) );
5483
5447
5484
5448
if (sync ) {
5485
5449
unsigned long current_load = task_h_load (current );
@@ -5497,7 +5461,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
5497
5461
this_eff_load *= 100 ;
5498
5462
this_eff_load *= capacity_of (prev_cpu );
5499
5463
5500
- prev_eff_load = source_load ( prev_cpu , sd -> wake_idx );
5464
+ prev_eff_load = weighted_cpuload ( cpu_rq ( prev_cpu ) );
5501
5465
prev_eff_load -= task_load ;
5502
5466
if (sched_feat (WA_BIAS ))
5503
5467
prev_eff_load *= 100 + (sd -> imbalance_pct - 100 ) / 2 ;
@@ -5558,14 +5522,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5558
5522
unsigned long this_runnable_load = ULONG_MAX ;
5559
5523
unsigned long min_avg_load = ULONG_MAX , this_avg_load = ULONG_MAX ;
5560
5524
unsigned long most_spare = 0 , this_spare = 0 ;
5561
- int load_idx = sd -> forkexec_idx ;
5562
5525
int imbalance_scale = 100 + (sd -> imbalance_pct - 100 )/2 ;
5563
5526
unsigned long imbalance = scale_load_down (NICE_0_LOAD ) *
5564
5527
(sd -> imbalance_pct - 100 ) / 100 ;
5565
5528
5566
- if (sd_flag & SD_BALANCE_WAKE )
5567
- load_idx = sd -> wake_idx ;
5568
-
5569
5529
do {
5570
5530
unsigned long load , avg_load , runnable_load ;
5571
5531
unsigned long spare_cap , max_spare_cap ;
@@ -5589,12 +5549,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5589
5549
max_spare_cap = 0 ;
5590
5550
5591
5551
for_each_cpu (i , sched_group_span (group )) {
5592
- /* Bias balancing toward CPUs of our domain */
5593
- if (local_group )
5594
- load = source_load (i , load_idx );
5595
- else
5596
- load = target_load (i , load_idx );
5597
-
5552
+ load = weighted_cpuload (cpu_rq (i ));
5598
5553
runnable_load += load ;
5599
5554
5600
5555
avg_load += cfs_rq_load_avg (& cpu_rq (i )-> cfs );
@@ -7676,34 +7631,6 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
7676
7631
};
7677
7632
}
7678
7633
7679
- /**
7680
- * get_sd_load_idx - Obtain the load index for a given sched domain.
7681
- * @sd: The sched_domain whose load_idx is to be obtained.
7682
- * @idle: The idle status of the CPU for whose sd load_idx is obtained.
7683
- *
7684
- * Return: The load index.
7685
- */
7686
- static inline int get_sd_load_idx (struct sched_domain * sd ,
7687
- enum cpu_idle_type idle )
7688
- {
7689
- int load_idx ;
7690
-
7691
- switch (idle ) {
7692
- case CPU_NOT_IDLE :
7693
- load_idx = sd -> busy_idx ;
7694
- break ;
7695
-
7696
- case CPU_NEWLY_IDLE :
7697
- load_idx = sd -> newidle_idx ;
7698
- break ;
7699
- default :
7700
- load_idx = sd -> idle_idx ;
7701
- break ;
7702
- }
7703
-
7704
- return load_idx ;
7705
- }
7706
-
7707
7634
static unsigned long scale_rt_capacity (struct sched_domain * sd , int cpu )
7708
7635
{
7709
7636
struct rq * rq = cpu_rq (cpu );
@@ -7992,9 +7919,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
7992
7919
struct sg_lb_stats * sgs ,
7993
7920
int * sg_status )
7994
7921
{
7995
- int local_group = cpumask_test_cpu (env -> dst_cpu , sched_group_span (group ));
7996
- int load_idx = get_sd_load_idx (env -> sd , env -> idle );
7997
- unsigned long load ;
7998
7922
int i , nr_running ;
7999
7923
8000
7924
memset (sgs , 0 , sizeof (* sgs ));
@@ -8005,13 +7929,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
8005
7929
if ((env -> flags & LBF_NOHZ_STATS ) && update_nohz_stats (rq , false))
8006
7930
env -> flags |= LBF_NOHZ_AGAIN ;
8007
7931
8008
- /* Bias balancing toward CPUs of our domain: */
8009
- if (local_group )
8010
- load = target_load (i , load_idx );
8011
- else
8012
- load = source_load (i , load_idx );
8013
-
8014
- sgs -> group_load += load ;
7932
+ sgs -> group_load += weighted_cpuload (rq );
8015
7933
sgs -> group_util += cpu_util (i );
8016
7934
sgs -> sum_nr_running += rq -> cfs .h_nr_running ;
8017
7935
0 commit comments