Skip to content

Commit 24fc7ed

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/core: Introduce 'struct sched_domain_shared'
Since struct sched_domain is strictly per cpu; introduce a structure that is shared between all 'identical' sched_domains. Limit to SD_SHARE_PKG_RESOURCES domains for now, as we'll only use it for shared cache state; if another use comes up later we can easily relax this. While the sched_group's are normally shared between CPUs, these are not natural to use when we need some shared state on a domain level -- since that would require the domain to have a parent, which is not a given. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 16f3ef4 commit 24fc7ed

File tree

2 files changed

+45
-5
lines changed

2 files changed

+45
-5
lines changed

include/linux/sched.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1067,6 +1067,10 @@ extern int sched_domain_level_max;
10671067

10681068
struct sched_group;
10691069

1070+
struct sched_domain_shared {
1071+
atomic_t ref;
1072+
};
1073+
10701074
struct sched_domain {
10711075
/* These fields must be setup */
10721076
struct sched_domain *parent; /* top domain must be null terminated */
@@ -1135,6 +1139,7 @@ struct sched_domain {
11351139
void *private; /* used during construction */
11361140
struct rcu_head rcu; /* used during destruction */
11371141
};
1142+
struct sched_domain_shared *shared;
11381143

11391144
unsigned int span_weight;
11401145
/*
@@ -1168,6 +1173,7 @@ typedef int (*sched_domain_flags_f)(void);
11681173

11691174
struct sd_data {
11701175
struct sched_domain **__percpu sd;
1176+
struct sched_domain_shared **__percpu sds;
11711177
struct sched_group **__percpu sg;
11721178
struct sched_group_capacity **__percpu sgc;
11731179
};

kernel/sched/core.c

Lines changed: 39 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5947,6 +5947,8 @@ static void destroy_sched_domain(struct sched_domain *sd)
59475947
kfree(sd->groups->sgc);
59485948
kfree(sd->groups);
59495949
}
5950+
if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
5951+
kfree(sd->shared);
59505952
kfree(sd);
59515953
}
59525954

@@ -6385,6 +6387,9 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
63856387
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
63866388
*per_cpu_ptr(sdd->sd, cpu) = NULL;
63876389

6390+
if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
6391+
*per_cpu_ptr(sdd->sds, cpu) = NULL;
6392+
63886393
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
63896394
*per_cpu_ptr(sdd->sg, cpu) = NULL;
63906395

@@ -6429,10 +6434,12 @@ static int sched_domains_curr_level;
64296434

64306435
static struct sched_domain *
64316436
sd_init(struct sched_domain_topology_level *tl,
6437+
const struct cpumask *cpu_map,
64326438
struct sched_domain *child, int cpu)
64336439
{
6434-
struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
6435-
int sd_weight, sd_flags = 0;
6440+
struct sd_data *sdd = &tl->data;
6441+
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6442+
int sd_id, sd_weight, sd_flags = 0;
64366443

64376444
#ifdef CONFIG_NUMA
64386445
/*
@@ -6487,6 +6494,9 @@ sd_init(struct sched_domain_topology_level *tl,
64876494
#endif
64886495
};
64896496

6497+
cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6498+
sd_id = cpumask_first(sched_domain_span(sd));
6499+
64906500
/*
64916501
* Convert topological properties into behaviour.
64926502
*/
@@ -6529,7 +6539,16 @@ sd_init(struct sched_domain_topology_level *tl,
65296539
sd->idle_idx = 1;
65306540
}
65316541

6532-
sd->private = &tl->data;
6542+
/*
6543+
* For all levels sharing cache; connect a sched_domain_shared
6544+
* instance.
6545+
*/
6546+
if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6547+
sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
6548+
atomic_inc(&sd->shared->ref);
6549+
}
6550+
6551+
sd->private = sdd;
65336552

65346553
return sd;
65356554
}
@@ -6839,6 +6858,10 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
68396858
if (!sdd->sd)
68406859
return -ENOMEM;
68416860

6861+
sdd->sds = alloc_percpu(struct sched_domain_shared *);
6862+
if (!sdd->sds)
6863+
return -ENOMEM;
6864+
68426865
sdd->sg = alloc_percpu(struct sched_group *);
68436866
if (!sdd->sg)
68446867
return -ENOMEM;
@@ -6849,6 +6872,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
68496872

68506873
for_each_cpu(j, cpu_map) {
68516874
struct sched_domain *sd;
6875+
struct sched_domain_shared *sds;
68526876
struct sched_group *sg;
68536877
struct sched_group_capacity *sgc;
68546878

@@ -6859,6 +6883,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
68596883

68606884
*per_cpu_ptr(sdd->sd, j) = sd;
68616885

6886+
sds = kzalloc_node(sizeof(struct sched_domain_shared),
6887+
GFP_KERNEL, cpu_to_node(j));
6888+
if (!sds)
6889+
return -ENOMEM;
6890+
6891+
*per_cpu_ptr(sdd->sds, j) = sds;
6892+
68626893
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
68636894
GFP_KERNEL, cpu_to_node(j));
68646895
if (!sg)
@@ -6898,13 +6929,17 @@ static void __sdt_free(const struct cpumask *cpu_map)
68986929
kfree(*per_cpu_ptr(sdd->sd, j));
68996930
}
69006931

6932+
if (sdd->sds)
6933+
kfree(*per_cpu_ptr(sdd->sds, j));
69016934
if (sdd->sg)
69026935
kfree(*per_cpu_ptr(sdd->sg, j));
69036936
if (sdd->sgc)
69046937
kfree(*per_cpu_ptr(sdd->sgc, j));
69056938
}
69066939
free_percpu(sdd->sd);
69076940
sdd->sd = NULL;
6941+
free_percpu(sdd->sds);
6942+
sdd->sds = NULL;
69086943
free_percpu(sdd->sg);
69096944
sdd->sg = NULL;
69106945
free_percpu(sdd->sgc);
@@ -6916,9 +6951,8 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
69166951
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
69176952
struct sched_domain *child, int cpu)
69186953
{
6919-
struct sched_domain *sd = sd_init(tl, child, cpu);
6954+
struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
69206955

6921-
cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
69226956
if (child) {
69236957
sd->level = child->level + 1;
69246958
sched_domain_level_max = max(sched_domain_level_max, sd->level);

0 commit comments

Comments
 (0)