@@ -5576,10 +5576,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5576
5576
}
5577
5577
5578
5578
/*
5579
- * find_idlest_cpu - find the idlest cpu among the cpus in group.
5579
+ * find_idlest_group_cpu - find the idlest cpu among the cpus in group.
5580
5580
*/
5581
5581
static int
5582
- find_idlest_cpu (struct sched_group * group , struct task_struct * p , int this_cpu )
5582
+ find_idlest_group_cpu (struct sched_group * group , struct task_struct * p , int this_cpu )
5583
5583
{
5584
5584
unsigned long load , min_load = ULONG_MAX ;
5585
5585
unsigned int min_exit_latency = UINT_MAX ;
@@ -5628,6 +5628,50 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5628
5628
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu ;
5629
5629
}
5630
5630
5631
+ static inline int find_idlest_cpu (struct sched_domain * sd , struct task_struct * p ,
5632
+ int cpu , int prev_cpu , int sd_flag )
5633
+ {
5634
+ int new_cpu = prev_cpu ;
5635
+
5636
+ while (sd ) {
5637
+ struct sched_group * group ;
5638
+ struct sched_domain * tmp ;
5639
+ int weight ;
5640
+
5641
+ if (!(sd -> flags & sd_flag )) {
5642
+ sd = sd -> child ;
5643
+ continue ;
5644
+ }
5645
+
5646
+ group = find_idlest_group (sd , p , cpu , sd_flag );
5647
+ if (!group ) {
5648
+ sd = sd -> child ;
5649
+ continue ;
5650
+ }
5651
+
5652
+ new_cpu = find_idlest_group_cpu (group , p , cpu );
5653
+ if (new_cpu == -1 || new_cpu == cpu ) {
5654
+ /* Now try balancing at a lower domain level of cpu */
5655
+ sd = sd -> child ;
5656
+ continue ;
5657
+ }
5658
+
5659
+ /* Now try balancing at a lower domain level of new_cpu */
5660
+ cpu = new_cpu ;
5661
+ weight = sd -> span_weight ;
5662
+ sd = NULL ;
5663
+ for_each_domain (cpu , tmp ) {
5664
+ if (weight <= tmp -> span_weight )
5665
+ break ;
5666
+ if (tmp -> flags & sd_flag )
5667
+ sd = tmp ;
5668
+ }
5669
+ /* while loop will break here if sd == NULL */
5670
+ }
5671
+
5672
+ return new_cpu ;
5673
+ }
5674
+
5631
5675
#ifdef CONFIG_SCHED_SMT
5632
5676
DEFINE_STATIC_KEY_FALSE (sched_smt_present );
5633
5677
@@ -6001,39 +6045,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
6001
6045
if (sd_flag & SD_BALANCE_WAKE ) /* XXX always ? */
6002
6046
new_cpu = select_idle_sibling (p , prev_cpu , new_cpu );
6003
6047
6004
- } else while (sd ) {
6005
- struct sched_group * group ;
6006
- int weight ;
6007
-
6008
- if (!(sd -> flags & sd_flag )) {
6009
- sd = sd -> child ;
6010
- continue ;
6011
- }
6012
-
6013
- group = find_idlest_group (sd , p , cpu , sd_flag );
6014
- if (!group ) {
6015
- sd = sd -> child ;
6016
- continue ;
6017
- }
6018
-
6019
- new_cpu = find_idlest_cpu (group , p , cpu );
6020
- if (new_cpu == -1 || new_cpu == cpu ) {
6021
- /* Now try balancing at a lower domain level of cpu */
6022
- sd = sd -> child ;
6023
- continue ;
6024
- }
6025
-
6026
- /* Now try balancing at a lower domain level of new_cpu */
6027
- cpu = new_cpu ;
6028
- weight = sd -> span_weight ;
6029
- sd = NULL ;
6030
- for_each_domain (cpu , tmp ) {
6031
- if (weight <= tmp -> span_weight )
6032
- break ;
6033
- if (tmp -> flags & sd_flag )
6034
- sd = tmp ;
6035
- }
6036
- /* while loop will break here if sd == NULL */
6048
+ } else {
6049
+ new_cpu = find_idlest_cpu (sd , p , cpu , prev_cpu , sd_flag );
6037
6050
}
6038
6051
rcu_read_unlock ();
6039
6052
0 commit comments