Skip to content

Commit 03153dc

Browse files
Brendan JackmanDan Duval
authored andcommitted
sched/fair: Move select_task_rq_fair() slow-path into its own function
Orabug: 28088230 In preparation for changes that would otherwise require adding a new level of indentation to the while(sd) loop, create a new function find_idlest_cpu() which contains this loop, and rename the existing find_idlest_cpu() to find_idlest_group_cpu(). Code inside the while(sd) loop is unchanged. @new_cpu is added as a variable in the new function, with the same initial value as the @new_cpu in select_task_rq_fair(). Suggested-by: Peter Zijlstra <[email protected]> Signed-off-by: Brendan Jackman <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Josef Bacik <[email protected]> Reviewed-by: Vincent Guittot <[email protected]> Cc: Dietmar Eggemann <[email protected]> Cc: Josef Bacik <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Morten Rasmussen <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]> (cherry picked from commit 18bd1b4) Signed-off-by: Dan Duval <[email protected]> Reviewed-by: Chuck Anderson <[email protected]>
1 parent e4caf8f commit 03153dc

File tree

1 file changed

+48
-35
lines changed

1 file changed

+48
-35
lines changed

kernel/sched/fair.c

Lines changed: 48 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -5576,10 +5576,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
55765576
}
55775577

55785578
/*
5579-
* find_idlest_cpu - find the idlest cpu among the cpus in group.
5579+
* find_idlest_group_cpu - find the idlest cpu among the cpus in group.
55805580
*/
55815581
static int
5582-
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5582+
find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
55835583
{
55845584
unsigned long load, min_load = ULONG_MAX;
55855585
unsigned int min_exit_latency = UINT_MAX;
@@ -5628,6 +5628,50 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
56285628
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
56295629
}
56305630

5631+
static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
5632+
int cpu, int prev_cpu, int sd_flag)
5633+
{
5634+
int new_cpu = prev_cpu;
5635+
5636+
while (sd) {
5637+
struct sched_group *group;
5638+
struct sched_domain *tmp;
5639+
int weight;
5640+
5641+
if (!(sd->flags & sd_flag)) {
5642+
sd = sd->child;
5643+
continue;
5644+
}
5645+
5646+
group = find_idlest_group(sd, p, cpu, sd_flag);
5647+
if (!group) {
5648+
sd = sd->child;
5649+
continue;
5650+
}
5651+
5652+
new_cpu = find_idlest_group_cpu(group, p, cpu);
5653+
if (new_cpu == -1 || new_cpu == cpu) {
5654+
/* Now try balancing at a lower domain level of cpu */
5655+
sd = sd->child;
5656+
continue;
5657+
}
5658+
5659+
/* Now try balancing at a lower domain level of new_cpu */
5660+
cpu = new_cpu;
5661+
weight = sd->span_weight;
5662+
sd = NULL;
5663+
for_each_domain(cpu, tmp) {
5664+
if (weight <= tmp->span_weight)
5665+
break;
5666+
if (tmp->flags & sd_flag)
5667+
sd = tmp;
5668+
}
5669+
/* while loop will break here if sd == NULL */
5670+
}
5671+
5672+
return new_cpu;
5673+
}
5674+
56315675
#ifdef CONFIG_SCHED_SMT
56325676
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
56335677

@@ -6001,39 +6045,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
60016045
if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
60026046
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
60036047

6004-
} else while (sd) {
6005-
struct sched_group *group;
6006-
int weight;
6007-
6008-
if (!(sd->flags & sd_flag)) {
6009-
sd = sd->child;
6010-
continue;
6011-
}
6012-
6013-
group = find_idlest_group(sd, p, cpu, sd_flag);
6014-
if (!group) {
6015-
sd = sd->child;
6016-
continue;
6017-
}
6018-
6019-
new_cpu = find_idlest_cpu(group, p, cpu);
6020-
if (new_cpu == -1 || new_cpu == cpu) {
6021-
/* Now try balancing at a lower domain level of cpu */
6022-
sd = sd->child;
6023-
continue;
6024-
}
6025-
6026-
/* Now try balancing at a lower domain level of new_cpu */
6027-
cpu = new_cpu;
6028-
weight = sd->span_weight;
6029-
sd = NULL;
6030-
for_each_domain(cpu, tmp) {
6031-
if (weight <= tmp->span_weight)
6032-
break;
6033-
if (tmp->flags & sd_flag)
6034-
sd = tmp;
6035-
}
6036-
/* while loop will break here if sd == NULL */
6048+
} else {
6049+
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
60376050
}
60386051
rcu_read_unlock();
60396052

0 commit comments

Comments
 (0)