Skip to content

Commit b0560bf

Browse files
Kan LiangIngo Molnar
authored andcommitted
perf/x86/intel: Clean up the hybrid CPU type handling code
There is a fairly long list of grievances about the current code. The main beefs: 1. hybrid_big_small assumes that the *HARDWARE* (CPUID) provided core types are a bitmap. They are not. If Intel happened to make a core type of 0xff, hilarity would ensue. 2. adl_get_hybrid_cpu_type() utterly inscrutable. There are precisely zero comments and zero changelog about what it is attempting to do. According to Kan, the adl_get_hybrid_cpu_type() is there because some Alder Lake (ADL) CPUs can do some silly things. Some ADL models are *supposed* to be hybrid CPUs with big and little cores, but there are some SKUs that only have big cores. CPUID(0x1a) on those CPUs does not say that the CPUs are big cores. It apparently just returns 0x0. It confuses perf because it expects to see either 0x40 (Core) or 0x20 (Atom). The perf workaround for this is to watch for a CPU core saying it is type 0x0. If that happens on an Alder Lake, it calls x86_pmu.get_hybrid_cpu_type() and just assumes that the core is a Core (0x40) CPU. To fix up the mess, separate out the CPU types and the 'pmu' types. This allows 'hybrid_pmu_type' bitmaps without worrying that some future CPU type will set multiple bits. Since the types are now separate, add a function to glue them back together again. Actual comment on the situation in the glue function (find_hybrid_pmu_for_cpu()). Also, give ->get_hybrid_cpu_type() a real return type and make it clear that it is overriding the *CPU* type, not the PMU type. Rename cpu_type to pmu_type in the struct x86_hybrid_pmu to reflect the change. Originally-by: Dave Hansen <[email protected]> Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 299a5fc commit b0560bf

File tree

4 files changed

+72
-40
lines changed

4 files changed

+72
-40
lines changed

arch/x86/events/core.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1887,9 +1887,9 @@ ssize_t events_hybrid_sysfs_show(struct device *dev,
18871887

18881888
str = pmu_attr->event_str;
18891889
for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
1890-
if (!(x86_pmu.hybrid_pmu[i].cpu_type & pmu_attr->pmu_type))
1890+
if (!(x86_pmu.hybrid_pmu[i].pmu_type & pmu_attr->pmu_type))
18911891
continue;
1892-
if (x86_pmu.hybrid_pmu[i].cpu_type & pmu->cpu_type) {
1892+
if (x86_pmu.hybrid_pmu[i].pmu_type & pmu->pmu_type) {
18931893
next_str = strchr(str, ';');
18941894
if (next_str)
18951895
return snprintf(page, next_str - str + 1, "%s", str);
@@ -2169,7 +2169,7 @@ static int __init init_hw_perf_events(void)
21692169
hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE;
21702170

21712171
err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name,
2172-
(hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1);
2172+
(hybrid_pmu->pmu_type == hybrid_big) ? PERF_TYPE_RAW : -1);
21732173
if (err)
21742174
break;
21752175
}

arch/x86/events/intel/core.c

Lines changed: 47 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -3849,7 +3849,7 @@ static inline bool require_mem_loads_aux_event(struct perf_event *event)
38493849
return false;
38503850

38513851
if (is_hybrid())
3852-
return hybrid_pmu(event->pmu)->cpu_type == hybrid_big;
3852+
return hybrid_pmu(event->pmu)->pmu_type == hybrid_big;
38533853

38543854
return true;
38553855
}
@@ -4341,9 +4341,9 @@ adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
43414341
{
43424342
struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
43434343

4344-
if (pmu->cpu_type == hybrid_big)
4344+
if (pmu->pmu_type == hybrid_big)
43454345
return glc_get_event_constraints(cpuc, idx, event);
4346-
else if (pmu->cpu_type == hybrid_small)
4346+
else if (pmu->pmu_type == hybrid_small)
43474347
return tnt_get_event_constraints(cpuc, idx, event);
43484348

43494349
WARN_ON(1);
@@ -4413,9 +4413,9 @@ mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
44134413
{
44144414
struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
44154415

4416-
if (pmu->cpu_type == hybrid_big)
4416+
if (pmu->pmu_type == hybrid_big)
44174417
return rwc_get_event_constraints(cpuc, idx, event);
4418-
if (pmu->cpu_type == hybrid_small)
4418+
if (pmu->pmu_type == hybrid_small)
44194419
return cmt_get_event_constraints(cpuc, idx, event);
44204420

44214421
WARN_ON(1);
@@ -4426,18 +4426,18 @@ static int adl_hw_config(struct perf_event *event)
44264426
{
44274427
struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
44284428

4429-
if (pmu->cpu_type == hybrid_big)
4429+
if (pmu->pmu_type == hybrid_big)
44304430
return hsw_hw_config(event);
4431-
else if (pmu->cpu_type == hybrid_small)
4431+
else if (pmu->pmu_type == hybrid_small)
44324432
return intel_pmu_hw_config(event);
44334433

44344434
WARN_ON(1);
44354435
return -EOPNOTSUPP;
44364436
}
44374437

4438-
static u8 adl_get_hybrid_cpu_type(void)
4438+
static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void)
44394439
{
4440-
return hybrid_big;
4440+
return HYBRID_INTEL_CORE;
44414441
}
44424442

44434443
/*
@@ -4613,22 +4613,47 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
46134613
}
46144614
}
46154615

4616-
static bool init_hybrid_pmu(int cpu)
4616+
static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
46174617
{
4618-
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
46194618
u8 cpu_type = get_this_hybrid_cpu_type();
4620-
struct x86_hybrid_pmu *pmu = NULL;
46214619
int i;
46224620

4623-
if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
4624-
cpu_type = x86_pmu.get_hybrid_cpu_type();
4621+
/*
4622+
* This is running on a CPU model that is known to have hybrid
4623+
* configurations. But the CPU told us it is not hybrid, shame
4624+
* on it. There should be a fixup function provided for these
4625+
* troublesome CPUs (->get_hybrid_cpu_type).
4626+
*/
4627+
if (cpu_type == HYBRID_INTEL_NONE) {
4628+
if (x86_pmu.get_hybrid_cpu_type)
4629+
cpu_type = x86_pmu.get_hybrid_cpu_type();
4630+
else
4631+
return NULL;
4632+
}
46254633

4634+
/*
4635+
* This essentially just maps between the 'hybrid_cpu_type'
4636+
* and 'hybrid_pmu_type' enums:
4637+
*/
46264638
for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4627-
if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
4628-
pmu = &x86_pmu.hybrid_pmu[i];
4629-
break;
4630-
}
4639+
enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
4640+
4641+
if (cpu_type == HYBRID_INTEL_CORE &&
4642+
pmu_type == hybrid_big)
4643+
return &x86_pmu.hybrid_pmu[i];
4644+
if (cpu_type == HYBRID_INTEL_ATOM &&
4645+
pmu_type == hybrid_small)
4646+
return &x86_pmu.hybrid_pmu[i];
46314647
}
4648+
4649+
return NULL;
4650+
}
4651+
4652+
static bool init_hybrid_pmu(int cpu)
4653+
{
4654+
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4655+
struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu();
4656+
46324657
if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
46334658
cpuc->pmu = NULL;
46344659
return false;
@@ -5679,7 +5704,7 @@ static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
56795704
struct perf_pmu_events_hybrid_attr *pmu_attr =
56805705
container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
56815706

5682-
return pmu->cpu_type & pmu_attr->pmu_type;
5707+
return pmu->pmu_type & pmu_attr->pmu_type;
56835708
}
56845709

56855710
static umode_t hybrid_events_is_visible(struct kobject *kobj,
@@ -5716,7 +5741,7 @@ static umode_t hybrid_format_is_visible(struct kobject *kobj,
57165741
container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
57175742
int cpu = hybrid_find_supported_cpu(pmu);
57185743

5719-
return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0;
5744+
return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
57205745
}
57215746

57225747
static struct attribute_group hybrid_group_events_td = {
@@ -6607,7 +6632,7 @@ __init int intel_pmu_init(void)
66076632
/* Initialize big core specific PerfMon capabilities.*/
66086633
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
66096634
pmu->name = "cpu_core";
6610-
pmu->cpu_type = hybrid_big;
6635+
pmu->pmu_type = hybrid_big;
66116636
intel_pmu_init_glc(&pmu->pmu);
66126637
pmu->late_ack = true;
66136638
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
@@ -6643,7 +6668,7 @@ __init int intel_pmu_init(void)
66436668
/* Initialize Atom core specific PerfMon capabilities.*/
66446669
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
66456670
pmu->name = "cpu_atom";
6646-
pmu->cpu_type = hybrid_small;
6671+
pmu->pmu_type = hybrid_small;
66476672
intel_pmu_init_grt(&pmu->pmu);
66486673
pmu->mid_ack = true;
66496674
pmu->num_counters = x86_pmu.num_counters;

arch/x86/events/intel/ds.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ static u64 __adl_latency_data_small(struct perf_event *event, u64 status,
261261
{
262262
u64 val;
263263

264-
WARN_ON_ONCE(hybrid_pmu(event->pmu)->cpu_type == hybrid_big);
264+
WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
265265

266266
dse &= PERF_PEBS_DATA_SOURCE_MASK;
267267
val = hybrid_var(event->pmu, pebs_data_source)[dse];

arch/x86/events/perf_event.h

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -652,10 +652,29 @@ enum {
652652
#define PERF_PEBS_DATA_SOURCE_MAX 0x10
653653
#define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1)
654654

655+
enum hybrid_cpu_type {
656+
HYBRID_INTEL_NONE,
657+
HYBRID_INTEL_ATOM = 0x20,
658+
HYBRID_INTEL_CORE = 0x40,
659+
};
660+
661+
enum hybrid_pmu_type {
662+
not_hybrid,
663+
hybrid_small = BIT(0),
664+
hybrid_big = BIT(1),
665+
666+
hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */
667+
};
668+
669+
#define X86_HYBRID_PMU_ATOM_IDX 0
670+
#define X86_HYBRID_PMU_CORE_IDX 1
671+
672+
#define X86_HYBRID_NUM_PMUS 2
673+
655674
struct x86_hybrid_pmu {
656675
struct pmu pmu;
657676
const char *name;
658-
u8 cpu_type;
677+
enum hybrid_pmu_type pmu_type;
659678
cpumask_t supported_cpus;
660679
union perf_capabilities intel_cap;
661680
u64 intel_ctrl;
@@ -721,18 +740,6 @@ extern struct static_key_false perf_is_hybrid;
721740
__Fp; \
722741
})
723742

724-
enum hybrid_pmu_type {
725-
hybrid_big = 0x40,
726-
hybrid_small = 0x20,
727-
728-
hybrid_big_small = hybrid_big | hybrid_small,
729-
};
730-
731-
#define X86_HYBRID_PMU_ATOM_IDX 0
732-
#define X86_HYBRID_PMU_CORE_IDX 1
733-
734-
#define X86_HYBRID_NUM_PMUS 2
735-
736743
/*
737744
* struct x86_pmu - generic x86 pmu
738745
*/
@@ -940,7 +947,7 @@ struct x86_pmu {
940947
*/
941948
int num_hybrid_pmus;
942949
struct x86_hybrid_pmu *hybrid_pmu;
943-
u8 (*get_hybrid_cpu_type) (void);
950+
enum hybrid_cpu_type (*get_hybrid_cpu_type) (void);
944951
};
945952

946953
struct x86_perf_task_context_opt {

0 commit comments

Comments
 (0)