Skip to content

Commit 39621c5

Browse files
sandip4nPeter Zijlstra
authored andcommitted
perf/x86/amd/uncore: Use dynamic events array
If AMD Performance Monitoring Version 2 (PerfMonV2) is supported, the number of available counters for a given uncore PMU may not be fixed across families and models and has to be determined at runtime. The per-cpu uncore PMU data currently uses a fixed-sized array for event information. Make it dynamic based on the number of available counters. Signed-off-by: Sandipan Das <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/r/21eea0cb6de9d14f78d52d1d62637ae02bc900f5.1652954372.git.sandipan.das@amd.com
1 parent e60b7cb commit 39621c5

File tree

1 file changed

+31
-7
lines changed

1 file changed

+31
-7
lines changed

arch/x86/events/amd/uncore.c

Lines changed: 31 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
#define NUM_COUNTERS_NB 4
2222
#define NUM_COUNTERS_L2 4
2323
#define NUM_COUNTERS_L3 6
24-
#define MAX_COUNTERS 6
2524

2625
#define RDPMC_BASE_NB 6
2726
#define RDPMC_BASE_LLC 10
@@ -46,7 +45,7 @@ struct amd_uncore {
4645
u32 msr_base;
4746
cpumask_t *active_mask;
4847
struct pmu *pmu;
49-
struct perf_event *events[MAX_COUNTERS];
48+
struct perf_event **events;
5049
struct hlist_node node;
5150
};
5251

@@ -370,11 +369,19 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
370369
cpu_to_node(cpu));
371370
}
372371

372+
static inline struct perf_event **
373+
amd_uncore_events_alloc(unsigned int num, unsigned int cpu)
374+
{
375+
return kzalloc_node(sizeof(struct perf_event *) * num, GFP_KERNEL,
376+
cpu_to_node(cpu));
377+
}
378+
373379
static int amd_uncore_cpu_up_prepare(unsigned int cpu)
374380
{
375-
struct amd_uncore *uncore_nb = NULL, *uncore_llc;
381+
struct amd_uncore *uncore_nb = NULL, *uncore_llc = NULL;
376382

377383
if (amd_uncore_nb) {
384+
*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
378385
uncore_nb = amd_uncore_alloc(cpu);
379386
if (!uncore_nb)
380387
goto fail;
@@ -384,11 +391,15 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
384391
uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
385392
uncore_nb->active_mask = &amd_nb_active_mask;
386393
uncore_nb->pmu = &amd_nb_pmu;
394+
uncore_nb->events = amd_uncore_events_alloc(num_counters_nb, cpu);
395+
if (!uncore_nb->events)
396+
goto fail;
387397
uncore_nb->id = -1;
388398
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
389399
}
390400

391401
if (amd_uncore_llc) {
402+
*per_cpu_ptr(amd_uncore_llc, cpu) = NULL;
392403
uncore_llc = amd_uncore_alloc(cpu);
393404
if (!uncore_llc)
394405
goto fail;
@@ -398,16 +409,26 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
398409
uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
399410
uncore_llc->active_mask = &amd_llc_active_mask;
400411
uncore_llc->pmu = &amd_llc_pmu;
412+
uncore_llc->events = amd_uncore_events_alloc(num_counters_llc, cpu);
413+
if (!uncore_llc->events)
414+
goto fail;
401415
uncore_llc->id = -1;
402416
*per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
403417
}
404418

405419
return 0;
406420

407421
fail:
408-
if (amd_uncore_nb)
409-
*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
410-
kfree(uncore_nb);
422+
if (uncore_nb) {
423+
kfree(uncore_nb->events);
424+
kfree(uncore_nb);
425+
}
426+
427+
if (uncore_llc) {
428+
kfree(uncore_llc->events);
429+
kfree(uncore_llc);
430+
}
431+
411432
return -ENOMEM;
412433
}
413434

@@ -540,8 +561,11 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
540561
if (cpu == uncore->cpu)
541562
cpumask_clear_cpu(cpu, uncore->active_mask);
542563

543-
if (!--uncore->refcnt)
564+
if (!--uncore->refcnt) {
565+
kfree(uncore->events);
544566
kfree(uncore);
567+
}
568+
545569
*per_cpu_ptr(uncores, cpu) = NULL;
546570
}
547571

0 commit comments

Comments
 (0)