@@ -6381,7 +6381,8 @@ static int sched_domains_curr_level;
6381
6381
SD_SHARE_POWERDOMAIN)
6382
6382
6383
6383
static struct sched_domain *
6384
- sd_init (struct sched_domain_topology_level * tl , int cpu )
6384
+ sd_init (struct sched_domain_topology_level * tl ,
6385
+ struct sched_domain * child , int cpu )
6385
6386
{
6386
6387
struct sched_domain * sd = * per_cpu_ptr (tl -> data .sd , cpu );
6387
6388
int sd_weight , sd_flags = 0 ;
@@ -6433,6 +6434,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
6433
6434
.smt_gain = 0 ,
6434
6435
.max_newidle_lb_cost = 0 ,
6435
6436
.next_decay_max_lb_cost = jiffies ,
6437
+ .child = child ,
6436
6438
#ifdef CONFIG_SCHED_DEBUG
6437
6439
.name = tl -> name ,
6438
6440
#endif
@@ -6857,14 +6859,13 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6857
6859
const struct cpumask * cpu_map , struct sched_domain_attr * attr ,
6858
6860
struct sched_domain * child , int cpu )
6859
6861
{
6860
- struct sched_domain * sd = sd_init (tl , cpu );
6862
+ struct sched_domain * sd = sd_init (tl , child , cpu );
6861
6863
6862
6864
cpumask_and (sched_domain_span (sd ), cpu_map , tl -> mask (cpu ));
6863
6865
if (child ) {
6864
6866
sd -> level = child -> level + 1 ;
6865
6867
sched_domain_level_max = max (sched_domain_level_max , sd -> level );
6866
6868
child -> parent = sd ;
6867
- sd -> child = child ;
6868
6869
6869
6870
if (!cpumask_subset (sched_domain_span (child ),
6870
6871
sched_domain_span (sd ))) {
0 commit comments