|
26 | 26 |
|
27 | 27 | #include <linux/slab.h>
|
28 | 28 | #include <linux/err.h>
|
| 29 | +#include <linux/cacheinfo.h> |
| 30 | +#include <linux/cpuhotplug.h> |
29 | 31 |
|
30 | 32 | #include <asm/intel_rdt_common.h>
|
31 | 33 | #include <asm/intel-family.h>
|
32 | 34 | #include <asm/intel_rdt.h>
|
33 | 35 |
|
| 36 | +/* Mutex to protect rdtgroup access. */ |
| 37 | +DEFINE_MUTEX(rdtgroup_mutex); |
| 38 | + |
34 | 39 | #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
|
35 | 40 |
|
36 | 41 | struct rdt_resource rdt_resources_all[] = {
|
@@ -72,6 +77,11 @@ struct rdt_resource rdt_resources_all[] = {
|
72 | 77 | },
|
73 | 78 | };
|
74 | 79 |
|
| 80 | +static int cbm_idx(struct rdt_resource *r, int closid) |
| 81 | +{ |
| 82 | + return closid * r->cbm_idx_multi + r->cbm_idx_offset; |
| 83 | +} |
| 84 | + |
75 | 85 | /*
|
76 | 86 | * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
|
77 | 87 | * as they do not have CPUID enumeration support for Cache allocation.
|
@@ -176,13 +186,192 @@ static inline bool get_rdt_resources(void)
|
176 | 186 | return ret;
|
177 | 187 | }
|
178 | 188 |
|
| 189 | +static int get_cache_id(int cpu, int level) |
| 190 | +{ |
| 191 | + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); |
| 192 | + int i; |
| 193 | + |
| 194 | + for (i = 0; i < ci->num_leaves; i++) { |
| 195 | + if (ci->info_list[i].level == level) |
| 196 | + return ci->info_list[i].id; |
| 197 | + } |
| 198 | + |
| 199 | + return -1; |
| 200 | +} |
| 201 | + |
| 202 | +void rdt_cbm_update(void *arg) |
| 203 | +{ |
| 204 | + struct msr_param *m = (struct msr_param *)arg; |
| 205 | + struct rdt_resource *r = m->res; |
| 206 | + int i, cpu = smp_processor_id(); |
| 207 | + struct rdt_domain *d; |
| 208 | + |
| 209 | + list_for_each_entry(d, &r->domains, list) { |
| 210 | + /* Find the domain that contains this CPU */ |
| 211 | + if (cpumask_test_cpu(cpu, &d->cpu_mask)) |
| 212 | + goto found; |
| 213 | + } |
| 214 | + pr_info_once("cpu %d not found in any domain for resource %s\n", |
| 215 | + cpu, r->name); |
| 216 | + |
| 217 | + return; |
| 218 | + |
| 219 | +found: |
| 220 | + for (i = m->low; i < m->high; i++) { |
| 221 | + int idx = cbm_idx(r, i); |
| 222 | + |
| 223 | + wrmsrl(r->msr_base + idx, d->cbm[i]); |
| 224 | + } |
| 225 | +} |
| 226 | + |
| 227 | +/* |
| 228 | + * rdt_find_domain - Find a domain in a resource that matches input resource id |
| 229 | + * |
| 230 | + * Search resource r's domain list to find the resource id. If the resource |
| 231 | + * id is found in a domain, return the domain. Otherwise, if requested by |
| 232 | + * caller, return the first domain whose id is bigger than the input id. |
| 233 | + * The domain list is sorted by id in ascending order. |
| 234 | + */ |
| 235 | +static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, |
| 236 | + struct list_head **pos) |
| 237 | +{ |
| 238 | + struct rdt_domain *d; |
| 239 | + struct list_head *l; |
| 240 | + |
| 241 | + if (id < 0) |
| 242 | + return ERR_PTR(id); |
| 243 | + |
| 244 | + list_for_each(l, &r->domains) { |
| 245 | + d = list_entry(l, struct rdt_domain, list); |
| 246 | + /* When id is found, return its domain. */ |
| 247 | + if (id == d->id) |
| 248 | + return d; |
| 249 | + /* Stop searching when finding id's position in sorted list. */ |
| 250 | + if (id < d->id) |
| 251 | + break; |
| 252 | + } |
| 253 | + |
| 254 | + if (pos) |
| 255 | + *pos = l; |
| 256 | + |
| 257 | + return NULL; |
| 258 | +} |
| 259 | + |
| 260 | +/* |
| 261 | + * domain_add_cpu - Add a cpu to a resource's domain list. |
| 262 | + * |
| 263 | + * If an existing domain in the resource r's domain list matches the cpu's |
| 264 | + * resource id, add the cpu in the domain. |
| 265 | + * |
| 266 | + * Otherwise, a new domain is allocated and inserted into the right position |
| 267 | + * in the domain list sorted by id in ascending order. |
| 268 | + * |
| 269 | + * The order in the domain list is visible to users when we print entries |
| 270 | + * in the schemata file and schemata input is validated to have the same order |
| 271 | + * as this list. |
| 272 | + */ |
| 273 | +static void domain_add_cpu(int cpu, struct rdt_resource *r) |
| 274 | +{ |
| 275 | + int i, id = get_cache_id(cpu, r->cache_level); |
| 276 | + struct list_head *add_pos = NULL; |
| 277 | + struct rdt_domain *d; |
| 278 | + |
| 279 | + d = rdt_find_domain(r, id, &add_pos); |
| 280 | + if (IS_ERR(d)) { |
| 281 | + pr_warn("Could't find cache id for cpu %d\n", cpu); |
| 282 | + return; |
| 283 | + } |
| 284 | + |
| 285 | + if (d) { |
| 286 | + cpumask_set_cpu(cpu, &d->cpu_mask); |
| 287 | + return; |
| 288 | + } |
| 289 | + |
| 290 | + d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu)); |
| 291 | + if (!d) |
| 292 | + return; |
| 293 | + |
| 294 | + d->id = id; |
| 295 | + |
| 296 | + d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL); |
| 297 | + if (!d->cbm) { |
| 298 | + kfree(d); |
| 299 | + return; |
| 300 | + } |
| 301 | + |
| 302 | + for (i = 0; i < r->num_closid; i++) { |
| 303 | + int idx = cbm_idx(r, i); |
| 304 | + |
| 305 | + d->cbm[i] = r->max_cbm; |
| 306 | + wrmsrl(r->msr_base + idx, d->cbm[i]); |
| 307 | + } |
| 308 | + |
| 309 | + cpumask_set_cpu(cpu, &d->cpu_mask); |
| 310 | + list_add_tail(&d->list, add_pos); |
| 311 | + r->num_domains++; |
| 312 | +} |
| 313 | + |
| 314 | +static void domain_remove_cpu(int cpu, struct rdt_resource *r) |
| 315 | +{ |
| 316 | + int id = get_cache_id(cpu, r->cache_level); |
| 317 | + struct rdt_domain *d; |
| 318 | + |
| 319 | + d = rdt_find_domain(r, id, NULL); |
| 320 | + if (IS_ERR_OR_NULL(d)) { |
| 321 | + pr_warn("Could't find cache id for cpu %d\n", cpu); |
| 322 | + return; |
| 323 | + } |
| 324 | + |
| 325 | + cpumask_clear_cpu(cpu, &d->cpu_mask); |
| 326 | + if (cpumask_empty(&d->cpu_mask)) { |
| 327 | + r->num_domains--; |
| 328 | + kfree(d->cbm); |
| 329 | + list_del(&d->list); |
| 330 | + kfree(d); |
| 331 | + } |
| 332 | +} |
| 333 | + |
| 334 | +static int intel_rdt_online_cpu(unsigned int cpu) |
| 335 | +{ |
| 336 | + struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); |
| 337 | + struct rdt_resource *r; |
| 338 | + |
| 339 | + mutex_lock(&rdtgroup_mutex); |
| 340 | + for_each_capable_rdt_resource(r) |
| 341 | + domain_add_cpu(cpu, r); |
| 342 | + state->closid = 0; |
| 343 | + wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0); |
| 344 | + mutex_unlock(&rdtgroup_mutex); |
| 345 | + |
| 346 | + return 0; |
| 347 | +} |
| 348 | + |
| 349 | +static int intel_rdt_offline_cpu(unsigned int cpu) |
| 350 | +{ |
| 351 | + struct rdt_resource *r; |
| 352 | + |
| 353 | + mutex_lock(&rdtgroup_mutex); |
| 354 | + for_each_capable_rdt_resource(r) |
| 355 | + domain_remove_cpu(cpu, r); |
| 356 | + mutex_unlock(&rdtgroup_mutex); |
| 357 | + |
| 358 | + return 0; |
| 359 | +} |
| 360 | + |
179 | 361 | static int __init intel_rdt_late_init(void)
|
180 | 362 | {
|
181 | 363 | struct rdt_resource *r;
|
| 364 | + int state; |
182 | 365 |
|
183 | 366 | if (!get_rdt_resources())
|
184 | 367 | return -ENODEV;
|
185 | 368 |
|
| 369 | + state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, |
| 370 | + "x86/rdt/cat:online:", |
| 371 | + intel_rdt_online_cpu, intel_rdt_offline_cpu); |
| 372 | + if (state < 0) |
| 373 | + return state; |
| 374 | + |
186 | 375 | for_each_capable_rdt_resource(r)
|
187 | 376 | pr_info("Intel RDT %s allocation detected\n", r->name);
|
188 | 377 |
|
|
0 commit comments