@@ -2556,16 +2556,6 @@ static int icl_set_topdown_event_period(struct perf_event *event)
2556
2556
return 0 ;
2557
2557
}
2558
2558
2559
- static int adl_set_topdown_event_period (struct perf_event * event )
2560
- {
2561
- struct x86_hybrid_pmu * pmu = hybrid_pmu (event -> pmu );
2562
-
2563
- if (pmu -> cpu_type != hybrid_big )
2564
- return 0 ;
2565
-
2566
- return icl_set_topdown_event_period (event );
2567
- }
2568
-
2569
2559
DEFINE_STATIC_CALL (intel_pmu_set_topdown_event_period , x86_perf_event_set_period );
2570
2560
2571
2561
static inline u64 icl_get_metrics_event_value (u64 metric , u64 slots , int idx )
@@ -2708,16 +2698,6 @@ static u64 icl_update_topdown_event(struct perf_event *event)
2708
2698
x86_pmu .num_topdown_events - 1 );
2709
2699
}
2710
2700
2711
- static u64 adl_update_topdown_event (struct perf_event * event )
2712
- {
2713
- struct x86_hybrid_pmu * pmu = hybrid_pmu (event -> pmu );
2714
-
2715
- if (pmu -> cpu_type != hybrid_big )
2716
- return 0 ;
2717
-
2718
- return icl_update_topdown_event (event );
2719
- }
2720
-
2721
2701
DEFINE_STATIC_CALL (intel_pmu_update_topdown_event , x86_perf_event_update );
2722
2702
2723
2703
static void intel_pmu_read_topdown_event (struct perf_event * event )
@@ -6612,32 +6592,11 @@ __init int intel_pmu_init(void)
6612
6592
static_branch_enable (& perf_is_hybrid );
6613
6593
x86_pmu .num_hybrid_pmus = X86_HYBRID_NUM_PMUS ;
6614
6594
6615
- x86_pmu .pebs_aliases = NULL ;
6616
- x86_pmu .pebs_prec_dist = true;
6617
- x86_pmu .pebs_block = true;
6618
- x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
6619
- x86_pmu .flags |= PMU_FL_NO_HT_SHARING ;
6620
- x86_pmu .flags |= PMU_FL_INSTR_LATENCY ;
6621
- x86_pmu .lbr_pt_coexist = true;
6622
6595
x86_pmu .pebs_latency_data = adl_latency_data_small ;
6623
- x86_pmu .num_topdown_events = 8 ;
6624
- static_call_update (intel_pmu_update_topdown_event ,
6625
- & adl_update_topdown_event );
6626
- static_call_update (intel_pmu_set_topdown_event_period ,
6627
- & adl_set_topdown_event_period );
6628
-
6629
6596
x86_pmu .filter = intel_pmu_filter ;
6630
6597
x86_pmu .get_event_constraints = adl_get_event_constraints ;
6631
6598
x86_pmu .hw_config = adl_hw_config ;
6632
- x86_pmu .limit_period = glc_limit_period ;
6633
6599
x86_pmu .get_hybrid_cpu_type = adl_get_hybrid_cpu_type ;
6634
- /*
6635
- * The rtm_abort_event is used to check whether to enable GPRs
6636
- * for the RTM abort event. Atom doesn't have the RTM abort
6637
- * event. There is no harmful to set it in the common
6638
- * x86_pmu.rtm_abort_event.
6639
- */
6640
- x86_pmu .rtm_abort_event = X86_CONFIG (.event = 0xc9 , .umask = 0x04 );
6641
6600
6642
6601
td_attr = adl_hybrid_events_attrs ;
6643
6602
mem_attr = adl_hybrid_mem_attrs ;
@@ -6649,6 +6608,7 @@ __init int intel_pmu_init(void)
6649
6608
pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_CORE_IDX ];
6650
6609
pmu -> name = "cpu_core" ;
6651
6610
pmu -> cpu_type = hybrid_big ;
6611
+ intel_pmu_init_glc (& pmu -> pmu );
6652
6612
pmu -> late_ack = true;
6653
6613
if (cpu_feature_enabled (X86_FEATURE_HYBRID_CPU )) {
6654
6614
pmu -> num_counters = x86_pmu .num_counters + 2 ;
@@ -6678,16 +6638,13 @@ __init int intel_pmu_init(void)
6678
6638
pmu -> intel_cap .perf_metrics = 1 ;
6679
6639
pmu -> intel_cap .pebs_output_pt_available = 0 ;
6680
6640
6681
- memcpy (pmu -> hw_cache_event_ids , glc_hw_cache_event_ids , sizeof (pmu -> hw_cache_event_ids ));
6682
- memcpy (pmu -> hw_cache_extra_regs , glc_hw_cache_extra_regs , sizeof (pmu -> hw_cache_extra_regs ));
6683
- pmu -> event_constraints = intel_glc_event_constraints ;
6684
- pmu -> pebs_constraints = intel_glc_pebs_event_constraints ;
6685
6641
pmu -> extra_regs = intel_glc_extra_regs ;
6686
6642
6687
6643
/* Initialize Atom core specific PerfMon capabilities.*/
6688
6644
pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_ATOM_IDX ];
6689
6645
pmu -> name = "cpu_atom" ;
6690
6646
pmu -> cpu_type = hybrid_small ;
6647
+ intel_pmu_init_grt (& pmu -> pmu );
6691
6648
pmu -> mid_ack = true;
6692
6649
pmu -> num_counters = x86_pmu .num_counters ;
6693
6650
pmu -> num_counters_fixed = x86_pmu .num_counters_fixed ;
@@ -6699,12 +6656,6 @@ __init int intel_pmu_init(void)
6699
6656
pmu -> intel_cap .perf_metrics = 0 ;
6700
6657
pmu -> intel_cap .pebs_output_pt_available = 1 ;
6701
6658
6702
- memcpy (pmu -> hw_cache_event_ids , glp_hw_cache_event_ids , sizeof (pmu -> hw_cache_event_ids ));
6703
- memcpy (pmu -> hw_cache_extra_regs , tnt_hw_cache_extra_regs , sizeof (pmu -> hw_cache_extra_regs ));
6704
- pmu -> hw_cache_event_ids [C (ITLB )][C (OP_READ )][C (RESULT_ACCESS )] = -1 ;
6705
- pmu -> event_constraints = intel_slm_event_constraints ;
6706
- pmu -> pebs_constraints = intel_grt_pebs_event_constraints ;
6707
- pmu -> extra_regs = intel_grt_extra_regs ;
6708
6659
if (is_mtl (boot_cpu_data .x86_model )) {
6709
6660
x86_pmu .hybrid_pmu [X86_HYBRID_PMU_CORE_IDX ].extra_regs = intel_rwc_extra_regs ;
6710
6661
x86_pmu .pebs_latency_data = mtl_latency_data_small ;
0 commit comments