Skip to content

Commit 1c1b8a7

Browse files
deggemanIngo Molnar
authored andcommitted
sched/fair: Replace source_load() & target_load() with weighted_cpuload()
With LB_BIAS disabled, source_load() & target_load() return weighted_cpuload(). Replace both with calls to weighted_cpuload(). The function to obtain the load index (sd->*_idx) for an sd, get_sd_load_idx(), can be removed as well. Finally, get rid of the sched feature LB_BIAS. Signed-off-by: Dietmar Eggemann <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Morten Rasmussen <[email protected]> Cc: Patrick Bellasi <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Quentin Perret <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Valentin Schneider <[email protected]> Cc: Vincent Guittot <[email protected]> Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 5e83eaf commit 1c1b8a7

File tree

2 files changed

+4
-87
lines changed

2 files changed

+4
-87
lines changed

kernel/sched/fair.c

Lines changed: 4 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -1467,8 +1467,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
14671467
}
14681468

14691469
static unsigned long weighted_cpuload(struct rq *rq);
1470-
static unsigned long source_load(int cpu, int type);
1471-
static unsigned long target_load(int cpu, int type);
14721470

14731471
/* Cached statistics for all CPUs within a node */
14741472
struct numa_stats {
@@ -5333,45 +5331,11 @@ static struct {
53335331

53345332
#endif /* CONFIG_NO_HZ_COMMON */
53355333

5336-
/* Used instead of source_load when we know the type == 0 */
53375334
static unsigned long weighted_cpuload(struct rq *rq)
53385335
{
53395336
return cfs_rq_runnable_load_avg(&rq->cfs);
53405337
}
53415338

5342-
/*
5343-
* Return a low guess at the load of a migration-source CPU weighted
5344-
* according to the scheduling class and "nice" value.
5345-
*
5346-
* We want to under-estimate the load of migration sources, to
5347-
* balance conservatively.
5348-
*/
5349-
static unsigned long source_load(int cpu, int type)
5350-
{
5351-
struct rq *rq = cpu_rq(cpu);
5352-
unsigned long total = weighted_cpuload(rq);
5353-
5354-
if (type == 0 || !sched_feat(LB_BIAS))
5355-
return total;
5356-
5357-
return min(rq->cpu_load[type-1], total);
5358-
}
5359-
5360-
/*
5361-
* Return a high guess at the load of a migration-target CPU weighted
5362-
* according to the scheduling class and "nice" value.
5363-
*/
5364-
static unsigned long target_load(int cpu, int type)
5365-
{
5366-
struct rq *rq = cpu_rq(cpu);
5367-
unsigned long total = weighted_cpuload(rq);
5368-
5369-
if (type == 0 || !sched_feat(LB_BIAS))
5370-
return total;
5371-
5372-
return max(rq->cpu_load[type-1], total);
5373-
}
5374-
53755339
static unsigned long capacity_of(int cpu)
53765340
{
53775341
return cpu_rq(cpu)->cpu_capacity;
@@ -5479,7 +5443,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
54795443
s64 this_eff_load, prev_eff_load;
54805444
unsigned long task_load;
54815445

5482-
this_eff_load = target_load(this_cpu, sd->wake_idx);
5446+
this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
54835447

54845448
if (sync) {
54855449
unsigned long current_load = task_h_load(current);
@@ -5497,7 +5461,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
54975461
this_eff_load *= 100;
54985462
this_eff_load *= capacity_of(prev_cpu);
54995463

5500-
prev_eff_load = source_load(prev_cpu, sd->wake_idx);
5464+
prev_eff_load = weighted_cpuload(cpu_rq(prev_cpu));
55015465
prev_eff_load -= task_load;
55025466
if (sched_feat(WA_BIAS))
55035467
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5558,14 +5522,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
55585522
unsigned long this_runnable_load = ULONG_MAX;
55595523
unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
55605524
unsigned long most_spare = 0, this_spare = 0;
5561-
int load_idx = sd->forkexec_idx;
55625525
int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
55635526
unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
55645527
(sd->imbalance_pct-100) / 100;
55655528

5566-
if (sd_flag & SD_BALANCE_WAKE)
5567-
load_idx = sd->wake_idx;
5568-
55695529
do {
55705530
unsigned long load, avg_load, runnable_load;
55715531
unsigned long spare_cap, max_spare_cap;
@@ -5589,12 +5549,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
55895549
max_spare_cap = 0;
55905550

55915551
for_each_cpu(i, sched_group_span(group)) {
5592-
/* Bias balancing toward CPUs of our domain */
5593-
if (local_group)
5594-
load = source_load(i, load_idx);
5595-
else
5596-
load = target_load(i, load_idx);
5597-
5552+
load = weighted_cpuload(cpu_rq(i));
55985553
runnable_load += load;
55995554

56005555
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -7676,34 +7631,6 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
76767631
};
76777632
}
76787633

7679-
/**
7680-
* get_sd_load_idx - Obtain the load index for a given sched domain.
7681-
* @sd: The sched_domain whose load_idx is to be obtained.
7682-
* @idle: The idle status of the CPU for whose sd load_idx is obtained.
7683-
*
7684-
* Return: The load index.
7685-
*/
7686-
static inline int get_sd_load_idx(struct sched_domain *sd,
7687-
enum cpu_idle_type idle)
7688-
{
7689-
int load_idx;
7690-
7691-
switch (idle) {
7692-
case CPU_NOT_IDLE:
7693-
load_idx = sd->busy_idx;
7694-
break;
7695-
7696-
case CPU_NEWLY_IDLE:
7697-
load_idx = sd->newidle_idx;
7698-
break;
7699-
default:
7700-
load_idx = sd->idle_idx;
7701-
break;
7702-
}
7703-
7704-
return load_idx;
7705-
}
7706-
77077634
static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
77087635
{
77097636
struct rq *rq = cpu_rq(cpu);
@@ -7992,9 +7919,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
79927919
struct sg_lb_stats *sgs,
79937920
int *sg_status)
79947921
{
7995-
int local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
7996-
int load_idx = get_sd_load_idx(env->sd, env->idle);
7997-
unsigned long load;
79987922
int i, nr_running;
79997923

80007924
memset(sgs, 0, sizeof(*sgs));
@@ -8005,13 +7929,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
80057929
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
80067930
env->flags |= LBF_NOHZ_AGAIN;
80077931

8008-
/* Bias balancing toward CPUs of our domain: */
8009-
if (local_group)
8010-
load = target_load(i, load_idx);
8011-
else
8012-
load = source_load(i, load_idx);
8013-
8014-
sgs->group_load += load;
7932+
sgs->group_load += weighted_cpuload(rq);
80157933
sgs->group_util += cpu_util(i);
80167934
sgs->sum_nr_running += rq->cfs.h_nr_running;
80177935

kernel/sched/features.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@ SCHED_FEAT(WAKEUP_PREEMPTION, true)
3939

4040
SCHED_FEAT(HRTICK, false)
4141
SCHED_FEAT(DOUBLE_TICK, false)
42-
SCHED_FEAT(LB_BIAS, false)
4342

4443
/*
4544
* Decrement CPU capacity based on time not spent running tasks

0 commit comments

Comments
 (0)