@@ -5947,6 +5947,8 @@ static void destroy_sched_domain(struct sched_domain *sd)
5947
5947
kfree (sd -> groups -> sgc );
5948
5948
kfree (sd -> groups );
5949
5949
}
5950
+ if (sd -> shared && atomic_dec_and_test (& sd -> shared -> ref ))
5951
+ kfree (sd -> shared );
5950
5952
kfree (sd );
5951
5953
}
5952
5954
@@ -6385,6 +6387,9 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
6385
6387
WARN_ON_ONCE (* per_cpu_ptr (sdd -> sd , cpu ) != sd );
6386
6388
* per_cpu_ptr (sdd -> sd , cpu ) = NULL ;
6387
6389
6390
+ if (atomic_read (& (* per_cpu_ptr (sdd -> sds , cpu ))-> ref ))
6391
+ * per_cpu_ptr (sdd -> sds , cpu ) = NULL ;
6392
+
6388
6393
if (atomic_read (& (* per_cpu_ptr (sdd -> sg , cpu ))-> ref ))
6389
6394
* per_cpu_ptr (sdd -> sg , cpu ) = NULL ;
6390
6395
@@ -6429,10 +6434,12 @@ static int sched_domains_curr_level;
6429
6434
6430
6435
static struct sched_domain *
6431
6436
sd_init (struct sched_domain_topology_level * tl ,
6437
+ const struct cpumask * cpu_map ,
6432
6438
struct sched_domain * child , int cpu )
6433
6439
{
6434
- struct sched_domain * sd = * per_cpu_ptr (tl -> data .sd , cpu );
6435
- int sd_weight , sd_flags = 0 ;
6440
+ struct sd_data * sdd = & tl -> data ;
6441
+ struct sched_domain * sd = * per_cpu_ptr (sdd -> sd , cpu );
6442
+ int sd_id , sd_weight , sd_flags = 0 ;
6436
6443
6437
6444
#ifdef CONFIG_NUMA
6438
6445
/*
@@ -6487,6 +6494,9 @@ sd_init(struct sched_domain_topology_level *tl,
6487
6494
#endif
6488
6495
};
6489
6496
6497
+ cpumask_and (sched_domain_span (sd ), cpu_map , tl -> mask (cpu ));
6498
+ sd_id = cpumask_first (sched_domain_span (sd ));
6499
+
6490
6500
/*
6491
6501
* Convert topological properties into behaviour.
6492
6502
*/
@@ -6529,7 +6539,16 @@ sd_init(struct sched_domain_topology_level *tl,
6529
6539
sd -> idle_idx = 1 ;
6530
6540
}
6531
6541
6532
- sd -> private = & tl -> data ;
6542
+ /*
6543
+ * For all levels sharing cache; connect a sched_domain_shared
6544
+ * instance.
6545
+ */
6546
+ if (sd -> flags & SD_SHARE_PKG_RESOURCES ) {
6547
+ sd -> shared = * per_cpu_ptr (sdd -> sds , sd_id );
6548
+ atomic_inc (& sd -> shared -> ref );
6549
+ }
6550
+
6551
+ sd -> private = sdd ;
6533
6552
6534
6553
return sd ;
6535
6554
}
@@ -6839,6 +6858,10 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
6839
6858
if (!sdd -> sd )
6840
6859
return - ENOMEM ;
6841
6860
6861
+ sdd -> sds = alloc_percpu (struct sched_domain_shared * );
6862
+ if (!sdd -> sds )
6863
+ return - ENOMEM ;
6864
+
6842
6865
sdd -> sg = alloc_percpu (struct sched_group * );
6843
6866
if (!sdd -> sg )
6844
6867
return - ENOMEM ;
@@ -6849,6 +6872,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
6849
6872
6850
6873
for_each_cpu (j , cpu_map ) {
6851
6874
struct sched_domain * sd ;
6875
+ struct sched_domain_shared * sds ;
6852
6876
struct sched_group * sg ;
6853
6877
struct sched_group_capacity * sgc ;
6854
6878
@@ -6859,6 +6883,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
6859
6883
6860
6884
* per_cpu_ptr (sdd -> sd , j ) = sd ;
6861
6885
6886
+ sds = kzalloc_node (sizeof (struct sched_domain_shared ),
6887
+ GFP_KERNEL , cpu_to_node (j ));
6888
+ if (!sds )
6889
+ return - ENOMEM ;
6890
+
6891
+ * per_cpu_ptr (sdd -> sds , j ) = sds ;
6892
+
6862
6893
sg = kzalloc_node (sizeof (struct sched_group ) + cpumask_size (),
6863
6894
GFP_KERNEL , cpu_to_node (j ));
6864
6895
if (!sg )
@@ -6898,13 +6929,17 @@ static void __sdt_free(const struct cpumask *cpu_map)
6898
6929
kfree (* per_cpu_ptr (sdd -> sd , j ));
6899
6930
}
6900
6931
6932
+ if (sdd -> sds )
6933
+ kfree (* per_cpu_ptr (sdd -> sds , j ));
6901
6934
if (sdd -> sg )
6902
6935
kfree (* per_cpu_ptr (sdd -> sg , j ));
6903
6936
if (sdd -> sgc )
6904
6937
kfree (* per_cpu_ptr (sdd -> sgc , j ));
6905
6938
}
6906
6939
free_percpu (sdd -> sd );
6907
6940
sdd -> sd = NULL ;
6941
+ free_percpu (sdd -> sds );
6942
+ sdd -> sds = NULL ;
6908
6943
free_percpu (sdd -> sg );
6909
6944
sdd -> sg = NULL ;
6910
6945
free_percpu (sdd -> sgc );
@@ -6916,9 +6951,8 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6916
6951
const struct cpumask * cpu_map , struct sched_domain_attr * attr ,
6917
6952
struct sched_domain * child , int cpu )
6918
6953
{
6919
- struct sched_domain * sd = sd_init (tl , child , cpu );
6954
+ struct sched_domain * sd = sd_init (tl , cpu_map , child , cpu );
6920
6955
6921
- cpumask_and (sched_domain_span (sd ), cpu_map , tl -> mask (cpu ));
6922
6956
if (child ) {
6923
6957
sd -> level = child -> level + 1 ;
6924
6958
sched_domain_level_max = max (sched_domain_level_max , sd -> level );
0 commit comments