Skip to content

Commit 2264d9c

Browse files
aeglKAGA-KOKO
authored andcommitted
x86/intel_rdt: Build structures for each resource based on cache topology
We use the cpu hotplug notifier to catch each cpu in turn and look at its cache topology w.r.t each of the resource groups. As we discover new resources, we initialize the bitmask array for each to the default (full access) value. Signed-off-by: Tony Luck <[email protected]> Signed-off-by: Fenghua Yu <[email protected]> Cc: "Ravi V Shankar" <[email protected]> Cc: "Shaohua Li" <[email protected]> Cc: "Sai Prakhya" <[email protected]> Cc: "Peter Zijlstra" <[email protected]> Cc: "Stephane Eranian" <[email protected]> Cc: "Dave Hansen" <[email protected]> Cc: "David Carrillo-Cisneros" <[email protected]> Cc: "Nilay Vaish" <[email protected]> Cc: "Vikas Shivappa" <[email protected]> Cc: "Ingo Molnar" <[email protected]> Cc: "Borislav Petkov" <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Thomas Gleixner <[email protected]>
1 parent f20e578 commit 2264d9c

File tree

2 files changed

+224
-0
lines changed

2 files changed

+224
-0
lines changed

arch/x86/include/asm/intel_rdt.h

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,34 @@ struct rdt_resource {
3939
int cbm_idx_offset;
4040
};
4141

42+
/**
43+
* struct rdt_domain - group of cpus sharing an RDT resource
44+
* @list: all instances of this resource
45+
* @id: unique id for this instance
46+
* @cpu_mask: which cpus share this resource
47+
* @cbm: array of cache bit masks (indexed by CLOSID)
48+
*/
49+
struct rdt_domain {
50+
struct list_head list;
51+
int id;
52+
struct cpumask cpu_mask;
53+
u32 *cbm;
54+
};
55+
56+
/**
57+
* struct msr_param - set a range of MSRs from a domain
58+
* @res: The resource to use
59+
* @low: Beginning index from base MSR
60+
* @high: End index
61+
*/
62+
struct msr_param {
63+
struct rdt_resource *res;
64+
int low;
65+
int high;
66+
};
67+
68+
extern struct mutex rdtgroup_mutex;
69+
4270
extern struct rdt_resource rdt_resources_all[];
4371

4472
enum {
@@ -56,6 +84,11 @@ enum {
5684
r++) \
5785
if (r->capable)
5886

87+
#define for_each_enabled_rdt_resource(r) \
88+
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
89+
r++) \
90+
if (r->enabled)
91+
5992
/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
6093
union cpuid_0x10_1_eax {
6194
struct {
@@ -71,4 +104,6 @@ union cpuid_0x10_1_edx {
71104
} split;
72105
unsigned int full;
73106
};
107+
108+
void rdt_cbm_update(void *arg);
74109
#endif /* _ASM_X86_INTEL_RDT_H */

arch/x86/kernel/cpu/intel_rdt.c

Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,16 @@
2626

2727
#include <linux/slab.h>
2828
#include <linux/err.h>
29+
#include <linux/cacheinfo.h>
30+
#include <linux/cpuhotplug.h>
2931

3032
#include <asm/intel_rdt_common.h>
3133
#include <asm/intel-family.h>
3234
#include <asm/intel_rdt.h>
3335

36+
/* Mutex to protect rdtgroup access. */
37+
DEFINE_MUTEX(rdtgroup_mutex);
38+
3439
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
3540

3641
struct rdt_resource rdt_resources_all[] = {
@@ -72,6 +77,11 @@ struct rdt_resource rdt_resources_all[] = {
7277
},
7378
};
7479

80+
static int cbm_idx(struct rdt_resource *r, int closid)
81+
{
82+
return closid * r->cbm_idx_multi + r->cbm_idx_offset;
83+
}
84+
7585
/*
7686
* cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
7787
* as they do not have CPUID enumeration support for Cache allocation.
@@ -176,13 +186,192 @@ static inline bool get_rdt_resources(void)
176186
return ret;
177187
}
178188

189+
static int get_cache_id(int cpu, int level)
190+
{
191+
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
192+
int i;
193+
194+
for (i = 0; i < ci->num_leaves; i++) {
195+
if (ci->info_list[i].level == level)
196+
return ci->info_list[i].id;
197+
}
198+
199+
return -1;
200+
}
201+
202+
void rdt_cbm_update(void *arg)
203+
{
204+
struct msr_param *m = (struct msr_param *)arg;
205+
struct rdt_resource *r = m->res;
206+
int i, cpu = smp_processor_id();
207+
struct rdt_domain *d;
208+
209+
list_for_each_entry(d, &r->domains, list) {
210+
/* Find the domain that contains this CPU */
211+
if (cpumask_test_cpu(cpu, &d->cpu_mask))
212+
goto found;
213+
}
214+
pr_info_once("cpu %d not found in any domain for resource %s\n",
215+
cpu, r->name);
216+
217+
return;
218+
219+
found:
220+
for (i = m->low; i < m->high; i++) {
221+
int idx = cbm_idx(r, i);
222+
223+
wrmsrl(r->msr_base + idx, d->cbm[i]);
224+
}
225+
}
226+
227+
/*
228+
* rdt_find_domain - Find a domain in a resource that matches input resource id
229+
*
230+
* Search resource r's domain list to find the resource id. If the resource
231+
* id is found in a domain, return the domain. Otherwise, if requested by
232+
* caller, return the first domain whose id is bigger than the input id.
233+
* The domain list is sorted by id in ascending order.
234+
*/
235+
static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
236+
struct list_head **pos)
237+
{
238+
struct rdt_domain *d;
239+
struct list_head *l;
240+
241+
if (id < 0)
242+
return ERR_PTR(id);
243+
244+
list_for_each(l, &r->domains) {
245+
d = list_entry(l, struct rdt_domain, list);
246+
/* When id is found, return its domain. */
247+
if (id == d->id)
248+
return d;
249+
/* Stop searching when finding id's position in sorted list. */
250+
if (id < d->id)
251+
break;
252+
}
253+
254+
if (pos)
255+
*pos = l;
256+
257+
return NULL;
258+
}
259+
260+
/*
261+
* domain_add_cpu - Add a cpu to a resource's domain list.
262+
*
263+
* If an existing domain in the resource r's domain list matches the cpu's
264+
* resource id, add the cpu in the domain.
265+
*
266+
* Otherwise, a new domain is allocated and inserted into the right position
267+
* in the domain list sorted by id in ascending order.
268+
*
269+
* The order in the domain list is visible to users when we print entries
270+
* in the schemata file and schemata input is validated to have the same order
271+
* as this list.
272+
*/
273+
static void domain_add_cpu(int cpu, struct rdt_resource *r)
274+
{
275+
int i, id = get_cache_id(cpu, r->cache_level);
276+
struct list_head *add_pos = NULL;
277+
struct rdt_domain *d;
278+
279+
d = rdt_find_domain(r, id, &add_pos);
280+
if (IS_ERR(d)) {
281+
pr_warn("Could't find cache id for cpu %d\n", cpu);
282+
return;
283+
}
284+
285+
if (d) {
286+
cpumask_set_cpu(cpu, &d->cpu_mask);
287+
return;
288+
}
289+
290+
d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
291+
if (!d)
292+
return;
293+
294+
d->id = id;
295+
296+
d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL);
297+
if (!d->cbm) {
298+
kfree(d);
299+
return;
300+
}
301+
302+
for (i = 0; i < r->num_closid; i++) {
303+
int idx = cbm_idx(r, i);
304+
305+
d->cbm[i] = r->max_cbm;
306+
wrmsrl(r->msr_base + idx, d->cbm[i]);
307+
}
308+
309+
cpumask_set_cpu(cpu, &d->cpu_mask);
310+
list_add_tail(&d->list, add_pos);
311+
r->num_domains++;
312+
}
313+
314+
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
315+
{
316+
int id = get_cache_id(cpu, r->cache_level);
317+
struct rdt_domain *d;
318+
319+
d = rdt_find_domain(r, id, NULL);
320+
if (IS_ERR_OR_NULL(d)) {
321+
pr_warn("Could't find cache id for cpu %d\n", cpu);
322+
return;
323+
}
324+
325+
cpumask_clear_cpu(cpu, &d->cpu_mask);
326+
if (cpumask_empty(&d->cpu_mask)) {
327+
r->num_domains--;
328+
kfree(d->cbm);
329+
list_del(&d->list);
330+
kfree(d);
331+
}
332+
}
333+
334+
static int intel_rdt_online_cpu(unsigned int cpu)
335+
{
336+
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
337+
struct rdt_resource *r;
338+
339+
mutex_lock(&rdtgroup_mutex);
340+
for_each_capable_rdt_resource(r)
341+
domain_add_cpu(cpu, r);
342+
state->closid = 0;
343+
wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
344+
mutex_unlock(&rdtgroup_mutex);
345+
346+
return 0;
347+
}
348+
349+
static int intel_rdt_offline_cpu(unsigned int cpu)
350+
{
351+
struct rdt_resource *r;
352+
353+
mutex_lock(&rdtgroup_mutex);
354+
for_each_capable_rdt_resource(r)
355+
domain_remove_cpu(cpu, r);
356+
mutex_unlock(&rdtgroup_mutex);
357+
358+
return 0;
359+
}
360+
179361
static int __init intel_rdt_late_init(void)
180362
{
181363
struct rdt_resource *r;
364+
int state;
182365

183366
if (!get_rdt_resources())
184367
return -ENODEV;
185368

369+
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
370+
"x86/rdt/cat:online:",
371+
intel_rdt_online_cpu, intel_rdt_offline_cpu);
372+
if (state < 0)
373+
return state;
374+
186375
for_each_capable_rdt_resource(r)
187376
pr_info("Intel RDT %s allocation detected\n", r->name);
188377

0 commit comments

Comments
 (0)