Skip to content

Commit fcf0553

Browse files
vingu-linaroIngo Molnar
authored andcommitted
sched/fair: Remove meaningless imbalance calculation
Clean up load_balance() and remove meaningless calculation and fields before adding a new algorithm. Signed-off-by: Vincent Guittot <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: Ben Segall <[email protected]> Cc: Dietmar Eggemann <[email protected]> Cc: Juri Lelli <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: [email protected] Cc: Peter Zijlstra <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent a349834 commit fcf0553

File tree

1 file changed

+1
-104
lines changed

1 file changed

+1
-104
lines changed

kernel/sched/fair.c

Lines changed: 1 addition & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -5380,18 +5380,6 @@ static unsigned long capacity_of(int cpu)
53805380
return cpu_rq(cpu)->cpu_capacity;
53815381
}
53825382

5383-
static unsigned long cpu_avg_load_per_task(int cpu)
5384-
{
5385-
struct rq *rq = cpu_rq(cpu);
5386-
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
5387-
unsigned long load_avg = cpu_runnable_load(rq);
5388-
5389-
if (nr_running)
5390-
return load_avg / nr_running;
5391-
5392-
return 0;
5393-
}
5394-
53955383
static void record_wakee(struct task_struct *p)
53965384
{
53975385
/*
@@ -7657,7 +7645,6 @@ static unsigned long task_h_load(struct task_struct *p)
76577645
struct sg_lb_stats {
76587646
unsigned long avg_load; /*Avg load across the CPUs of the group */
76597647
unsigned long group_load; /* Total load over the CPUs of the group */
7660-
unsigned long load_per_task;
76617648
unsigned long group_capacity;
76627649
unsigned long group_util; /* Total utilization of the group */
76637650
unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
@@ -8039,9 +8026,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
80398026
sgs->group_capacity = group->sgc->capacity;
80408027
sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
80418028

8042-
if (sgs->sum_h_nr_running)
8043-
sgs->load_per_task = sgs->group_load / sgs->sum_h_nr_running;
8044-
80458029
sgs->group_weight = group->group_weight;
80468030

80478031
sgs->group_no_capacity = group_is_overloaded(env, sgs);
@@ -8271,76 +8255,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
82718255
}
82728256
}
82738257

8274-
/**
8275-
* fix_small_imbalance - Calculate the minor imbalance that exists
8276-
* amongst the groups of a sched_domain, during
8277-
* load balancing.
8278-
* @env: The load balancing environment.
8279-
* @sds: Statistics of the sched_domain whose imbalance is to be calculated.
8280-
*/
8281-
static inline
8282-
void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
8283-
{
8284-
unsigned long tmp, capa_now = 0, capa_move = 0;
8285-
unsigned int imbn = 2;
8286-
unsigned long scaled_busy_load_per_task;
8287-
struct sg_lb_stats *local, *busiest;
8288-
8289-
local = &sds->local_stat;
8290-
busiest = &sds->busiest_stat;
8291-
8292-
if (!local->sum_h_nr_running)
8293-
local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
8294-
else if (busiest->load_per_task > local->load_per_task)
8295-
imbn = 1;
8296-
8297-
scaled_busy_load_per_task =
8298-
(busiest->load_per_task * SCHED_CAPACITY_SCALE) /
8299-
busiest->group_capacity;
8300-
8301-
if (busiest->avg_load + scaled_busy_load_per_task >=
8302-
local->avg_load + (scaled_busy_load_per_task * imbn)) {
8303-
env->imbalance = busiest->load_per_task;
8304-
return;
8305-
}
8306-
8307-
/*
8308-
* OK, we don't have enough imbalance to justify moving tasks,
8309-
* however we may be able to increase total CPU capacity used by
8310-
* moving them.
8311-
*/
8312-
8313-
capa_now += busiest->group_capacity *
8314-
min(busiest->load_per_task, busiest->avg_load);
8315-
capa_now += local->group_capacity *
8316-
min(local->load_per_task, local->avg_load);
8317-
capa_now /= SCHED_CAPACITY_SCALE;
8318-
8319-
/* Amount of load we'd subtract */
8320-
if (busiest->avg_load > scaled_busy_load_per_task) {
8321-
capa_move += busiest->group_capacity *
8322-
min(busiest->load_per_task,
8323-
busiest->avg_load - scaled_busy_load_per_task);
8324-
}
8325-
8326-
/* Amount of load we'd add */
8327-
if (busiest->avg_load * busiest->group_capacity <
8328-
busiest->load_per_task * SCHED_CAPACITY_SCALE) {
8329-
tmp = (busiest->avg_load * busiest->group_capacity) /
8330-
local->group_capacity;
8331-
} else {
8332-
tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
8333-
local->group_capacity;
8334-
}
8335-
capa_move += local->group_capacity *
8336-
min(local->load_per_task, local->avg_load + tmp);
8337-
capa_move /= SCHED_CAPACITY_SCALE;
8338-
8339-
/* Move if we gain throughput */
8340-
if (capa_move > capa_now)
8341-
env->imbalance = busiest->load_per_task;
8342-
}
8343-
83448258
/**
83458259
* calculate_imbalance - Calculate the amount of imbalance present within the
83468260
* groups of a given sched_domain during load balance.
@@ -8360,15 +8274,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
83608274
return;
83618275
}
83628276

8363-
if (busiest->group_type == group_imbalanced) {
8364-
/*
8365-
* In the group_imb case we cannot rely on group-wide averages
8366-
* to ensure CPU-load equilibrium, look at wider averages. XXX
8367-
*/
8368-
busiest->load_per_task =
8369-
min(busiest->load_per_task, sds->avg_load);
8370-
}
8371-
83728277
/*
83738278
* Avg load of busiest sg can be less and avg load of local sg can
83748279
* be greater than avg load across all sgs of sd because avg load
@@ -8379,7 +8284,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
83798284
(busiest->avg_load <= sds->avg_load ||
83808285
local->avg_load >= sds->avg_load)) {
83818286
env->imbalance = 0;
8382-
return fix_small_imbalance(env, sds);
8287+
return;
83838288
}
83848289

83858290
/*
@@ -8417,14 +8322,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
84178322
busiest->group_misfit_task_load);
84188323
}
84198324

8420-
/*
8421-
* if *imbalance is less than the average load per runnable task
8422-
* there is no guarantee that any tasks will be moved so we'll have
8423-
* a think about bumping its value to force at least one task to be
8424-
* moved
8425-
*/
8426-
if (env->imbalance < busiest->load_per_task)
8427-
return fix_small_imbalance(env, sds);
84288325
}
84298326

84308327
/******* find_busiest_group() helpers end here *********************/

0 commit comments

Comments
 (0)