Skip to content

Commit 299a5fc

Browse files
Kan LiangIngo Molnar
authored andcommitted
perf/x86/intel: Apply the common initialization code for ADL
Use the intel_pmu_init_glc() and intel_pmu_init_grt() to replace the duplicate code for ADL. The current code already checks the PERF_X86_EVENT_TOPDOWN flag before invoking the Topdown metrics functions. (The PERF_X86_EVENT_TOPDOWN flag is to indicate the Topdown metric feature, which is only available for the p-core.) Drop the unnecessary adl_set_topdown_event_period() and adl_update_topdown_event(). Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent d87d221 commit 299a5fc

File tree

1 file changed

+2
-51
lines changed

1 file changed

+2
-51
lines changed

arch/x86/events/intel/core.c

Lines changed: 2 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -2556,16 +2556,6 @@ static int icl_set_topdown_event_period(struct perf_event *event)
25562556
return 0;
25572557
}
25582558

2559-
static int adl_set_topdown_event_period(struct perf_event *event)
2560-
{
2561-
struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2562-
2563-
if (pmu->cpu_type != hybrid_big)
2564-
return 0;
2565-
2566-
return icl_set_topdown_event_period(event);
2567-
}
2568-
25692559
DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
25702560

25712561
static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
@@ -2708,16 +2698,6 @@ static u64 icl_update_topdown_event(struct perf_event *event)
27082698
x86_pmu.num_topdown_events - 1);
27092699
}
27102700

2711-
static u64 adl_update_topdown_event(struct perf_event *event)
2712-
{
2713-
struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2714-
2715-
if (pmu->cpu_type != hybrid_big)
2716-
return 0;
2717-
2718-
return icl_update_topdown_event(event);
2719-
}
2720-
27212701
DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
27222702

27232703
static void intel_pmu_read_topdown_event(struct perf_event *event)
@@ -6612,32 +6592,11 @@ __init int intel_pmu_init(void)
66126592
static_branch_enable(&perf_is_hybrid);
66136593
x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
66146594

6615-
x86_pmu.pebs_aliases = NULL;
6616-
x86_pmu.pebs_prec_dist = true;
6617-
x86_pmu.pebs_block = true;
6618-
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6619-
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6620-
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6621-
x86_pmu.lbr_pt_coexist = true;
66226595
x86_pmu.pebs_latency_data = adl_latency_data_small;
6623-
x86_pmu.num_topdown_events = 8;
6624-
static_call_update(intel_pmu_update_topdown_event,
6625-
&adl_update_topdown_event);
6626-
static_call_update(intel_pmu_set_topdown_event_period,
6627-
&adl_set_topdown_event_period);
6628-
66296596
x86_pmu.filter = intel_pmu_filter;
66306597
x86_pmu.get_event_constraints = adl_get_event_constraints;
66316598
x86_pmu.hw_config = adl_hw_config;
6632-
x86_pmu.limit_period = glc_limit_period;
66336599
x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
6634-
/*
6635-
* The rtm_abort_event is used to check whether to enable GPRs
6636-
* for the RTM abort event. Atom doesn't have the RTM abort
6637-
* event. There is no harmful to set it in the common
6638-
* x86_pmu.rtm_abort_event.
6639-
*/
6640-
x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
66416600

66426601
td_attr = adl_hybrid_events_attrs;
66436602
mem_attr = adl_hybrid_mem_attrs;
@@ -6649,6 +6608,7 @@ __init int intel_pmu_init(void)
66496608
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
66506609
pmu->name = "cpu_core";
66516610
pmu->cpu_type = hybrid_big;
6611+
intel_pmu_init_glc(&pmu->pmu);
66526612
pmu->late_ack = true;
66536613
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
66546614
pmu->num_counters = x86_pmu.num_counters + 2;
@@ -6678,16 +6638,13 @@ __init int intel_pmu_init(void)
66786638
pmu->intel_cap.perf_metrics = 1;
66796639
pmu->intel_cap.pebs_output_pt_available = 0;
66806640

6681-
memcpy(pmu->hw_cache_event_ids, glc_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6682-
memcpy(pmu->hw_cache_extra_regs, glc_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6683-
pmu->event_constraints = intel_glc_event_constraints;
6684-
pmu->pebs_constraints = intel_glc_pebs_event_constraints;
66856641
pmu->extra_regs = intel_glc_extra_regs;
66866642

66876643
/* Initialize Atom core specific PerfMon capabilities.*/
66886644
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
66896645
pmu->name = "cpu_atom";
66906646
pmu->cpu_type = hybrid_small;
6647+
intel_pmu_init_grt(&pmu->pmu);
66916648
pmu->mid_ack = true;
66926649
pmu->num_counters = x86_pmu.num_counters;
66936650
pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
@@ -6699,12 +6656,6 @@ __init int intel_pmu_init(void)
66996656
pmu->intel_cap.perf_metrics = 0;
67006657
pmu->intel_cap.pebs_output_pt_available = 1;
67016658

6702-
memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6703-
memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6704-
pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6705-
pmu->event_constraints = intel_slm_event_constraints;
6706-
pmu->pebs_constraints = intel_grt_pebs_event_constraints;
6707-
pmu->extra_regs = intel_grt_extra_regs;
67086659
if (is_mtl(boot_cpu_data.x86_model)) {
67096660
x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_rwc_extra_regs;
67106661
x86_pmu.pebs_latency_data = mtl_latency_data_small;

0 commit comments

Comments
 (0)