@@ -14,7 +14,7 @@ struct pci_driver *uncore_pci_driver;
14
14
DEFINE_RAW_SPINLOCK (pci2phy_map_lock );
15
15
struct list_head pci2phy_map_head = LIST_HEAD_INIT (pci2phy_map_head );
16
16
struct pci_extra_dev * uncore_extra_pci_dev ;
17
- static int max_packages ;
17
+ static int max_dies ;
18
18
19
19
/* mask of cpus that collect uncore events */
20
20
static cpumask_t uncore_cpu_mask ;
@@ -100,13 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
100
100
101
101
struct intel_uncore_box * uncore_pmu_to_box (struct intel_uncore_pmu * pmu , int cpu )
102
102
{
103
- unsigned int pkgid = topology_logical_die_id (cpu );
103
+ unsigned int dieid = topology_logical_die_id (cpu );
104
104
105
105
/*
106
106
* The unsigned check also catches the '-1' return value for non
107
107
* existent mappings in the topology map.
108
108
*/
109
- return pkgid < max_packages ? pmu -> boxes [pkgid ] : NULL ;
109
+ return dieid < max_dies ? pmu -> boxes [dieid ] : NULL ;
110
110
}
111
111
112
112
u64 uncore_msr_read_counter (struct intel_uncore_box * box , struct perf_event * event )
@@ -311,7 +311,7 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
311
311
uncore_pmu_init_hrtimer (box );
312
312
box -> cpu = -1 ;
313
313
box -> pci_phys_id = -1 ;
314
- box -> pkgid = -1 ;
314
+ box -> dieid = -1 ;
315
315
316
316
/* set default hrtimer timeout */
317
317
box -> hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL ;
@@ -826,10 +826,10 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
826
826
827
827
static void uncore_free_boxes (struct intel_uncore_pmu * pmu )
828
828
{
829
- int pkg ;
829
+ int die ;
830
830
831
- for (pkg = 0 ; pkg < max_packages ; pkg ++ )
832
- kfree (pmu -> boxes [pkg ]);
831
+ for (die = 0 ; die < max_dies ; die ++ )
832
+ kfree (pmu -> boxes [die ]);
833
833
kfree (pmu -> boxes );
834
834
}
835
835
@@ -866,7 +866,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
866
866
if (!pmus )
867
867
return - ENOMEM ;
868
868
869
- size = max_packages * sizeof (struct intel_uncore_box * );
869
+ size = max_dies * sizeof (struct intel_uncore_box * );
870
870
871
871
for (i = 0 ; i < type -> num_boxes ; i ++ ) {
872
872
pmus [i ].func_id = setid ? i : -1 ;
@@ -936,21 +936,21 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
936
936
struct intel_uncore_type * type ;
937
937
struct intel_uncore_pmu * pmu = NULL ;
938
938
struct intel_uncore_box * box ;
939
- int phys_id , pkg , ret ;
939
+ int phys_id , die , ret ;
940
940
941
941
phys_id = uncore_pcibus_to_physid (pdev -> bus );
942
942
if (phys_id < 0 )
943
943
return - ENODEV ;
944
944
945
- pkg = (topology_max_die_per_package () > 1 ) ? phys_id :
945
+ die = (topology_max_die_per_package () > 1 ) ? phys_id :
946
946
topology_phys_to_logical_pkg (phys_id );
947
- if (pkg < 0 )
947
+ if (die < 0 )
948
948
return - EINVAL ;
949
949
950
950
if (UNCORE_PCI_DEV_TYPE (id -> driver_data ) == UNCORE_EXTRA_PCI_DEV ) {
951
951
int idx = UNCORE_PCI_DEV_IDX (id -> driver_data );
952
952
953
- uncore_extra_pci_dev [pkg ].dev [idx ] = pdev ;
953
+ uncore_extra_pci_dev [die ].dev [idx ] = pdev ;
954
954
pci_set_drvdata (pdev , NULL );
955
955
return 0 ;
956
956
}
@@ -989,7 +989,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
989
989
pmu = & type -> pmus [UNCORE_PCI_DEV_IDX (id -> driver_data )];
990
990
}
991
991
992
- if (WARN_ON_ONCE (pmu -> boxes [pkg ] != NULL ))
992
+ if (WARN_ON_ONCE (pmu -> boxes [die ] != NULL ))
993
993
return - EINVAL ;
994
994
995
995
box = uncore_alloc_box (type , NUMA_NO_NODE );
@@ -1003,21 +1003,21 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
1003
1003
1004
1004
atomic_inc (& box -> refcnt );
1005
1005
box -> pci_phys_id = phys_id ;
1006
- box -> pkgid = pkg ;
1006
+ box -> dieid = die ;
1007
1007
box -> pci_dev = pdev ;
1008
1008
box -> pmu = pmu ;
1009
1009
uncore_box_init (box );
1010
1010
pci_set_drvdata (pdev , box );
1011
1011
1012
- pmu -> boxes [pkg ] = box ;
1012
+ pmu -> boxes [die ] = box ;
1013
1013
if (atomic_inc_return (& pmu -> activeboxes ) > 1 )
1014
1014
return 0 ;
1015
1015
1016
1016
/* First active box registers the pmu */
1017
1017
ret = uncore_pmu_register (pmu );
1018
1018
if (ret ) {
1019
1019
pci_set_drvdata (pdev , NULL );
1020
- pmu -> boxes [pkg ] = NULL ;
1020
+ pmu -> boxes [die ] = NULL ;
1021
1021
uncore_box_exit (box );
1022
1022
kfree (box );
1023
1023
}
@@ -1028,17 +1028,17 @@ static void uncore_pci_remove(struct pci_dev *pdev)
1028
1028
{
1029
1029
struct intel_uncore_box * box ;
1030
1030
struct intel_uncore_pmu * pmu ;
1031
- int i , phys_id , pkg ;
1031
+ int i , phys_id , die ;
1032
1032
1033
1033
phys_id = uncore_pcibus_to_physid (pdev -> bus );
1034
1034
1035
1035
box = pci_get_drvdata (pdev );
1036
1036
if (!box ) {
1037
- pkg = (topology_max_die_per_package () > 1 ) ? phys_id :
1037
+ die = (topology_max_die_per_package () > 1 ) ? phys_id :
1038
1038
topology_phys_to_logical_pkg (phys_id );
1039
1039
for (i = 0 ; i < UNCORE_EXTRA_PCI_DEV_MAX ; i ++ ) {
1040
- if (uncore_extra_pci_dev [pkg ].dev [i ] == pdev ) {
1041
- uncore_extra_pci_dev [pkg ].dev [i ] = NULL ;
1040
+ if (uncore_extra_pci_dev [die ].dev [i ] == pdev ) {
1041
+ uncore_extra_pci_dev [die ].dev [i ] = NULL ;
1042
1042
break ;
1043
1043
}
1044
1044
}
@@ -1051,7 +1051,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
1051
1051
return ;
1052
1052
1053
1053
pci_set_drvdata (pdev , NULL );
1054
- pmu -> boxes [box -> pkgid ] = NULL ;
1054
+ pmu -> boxes [box -> dieid ] = NULL ;
1055
1055
if (atomic_dec_return (& pmu -> activeboxes ) == 0 )
1056
1056
uncore_pmu_unregister (pmu );
1057
1057
uncore_box_exit (box );
@@ -1063,7 +1063,7 @@ static int __init uncore_pci_init(void)
1063
1063
size_t size ;
1064
1064
int ret ;
1065
1065
1066
- size = max_packages * sizeof (struct pci_extra_dev );
1066
+ size = max_dies * sizeof (struct pci_extra_dev );
1067
1067
uncore_extra_pci_dev = kzalloc (size , GFP_KERNEL );
1068
1068
if (!uncore_extra_pci_dev ) {
1069
1069
ret = - ENOMEM ;
@@ -1110,11 +1110,11 @@ static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1110
1110
{
1111
1111
struct intel_uncore_pmu * pmu = type -> pmus ;
1112
1112
struct intel_uncore_box * box ;
1113
- int i , pkg ;
1113
+ int i , die ;
1114
1114
1115
- pkg = topology_logical_die_id (old_cpu < 0 ? new_cpu : old_cpu );
1115
+ die = topology_logical_die_id (old_cpu < 0 ? new_cpu : old_cpu );
1116
1116
for (i = 0 ; i < type -> num_boxes ; i ++ , pmu ++ ) {
1117
- box = pmu -> boxes [pkg ];
1117
+ box = pmu -> boxes [die ];
1118
1118
if (!box )
1119
1119
continue ;
1120
1120
@@ -1147,7 +1147,7 @@ static int uncore_event_cpu_offline(unsigned int cpu)
1147
1147
struct intel_uncore_type * type , * * types = uncore_msr_uncores ;
1148
1148
struct intel_uncore_pmu * pmu ;
1149
1149
struct intel_uncore_box * box ;
1150
- int i , pkg , target ;
1150
+ int i , die , target ;
1151
1151
1152
1152
/* Check if exiting cpu is used for collecting uncore events */
1153
1153
if (!cpumask_test_and_clear_cpu (cpu , & uncore_cpu_mask ))
@@ -1166,12 +1166,12 @@ static int uncore_event_cpu_offline(unsigned int cpu)
1166
1166
1167
1167
unref :
1168
1168
/* Clear the references */
1169
- pkg = topology_logical_die_id (cpu );
1169
+ die = topology_logical_die_id (cpu );
1170
1170
for (; * types ; types ++ ) {
1171
1171
type = * types ;
1172
1172
pmu = type -> pmus ;
1173
1173
for (i = 0 ; i < type -> num_boxes ; i ++ , pmu ++ ) {
1174
- box = pmu -> boxes [pkg ];
1174
+ box = pmu -> boxes [die ];
1175
1175
if (box && atomic_dec_return (& box -> refcnt ) == 0 )
1176
1176
uncore_box_exit (box );
1177
1177
}
@@ -1180,7 +1180,7 @@ static int uncore_event_cpu_offline(unsigned int cpu)
1180
1180
}
1181
1181
1182
1182
static int allocate_boxes (struct intel_uncore_type * * types ,
1183
- unsigned int pkg , unsigned int cpu )
1183
+ unsigned int die , unsigned int cpu )
1184
1184
{
1185
1185
struct intel_uncore_box * box , * tmp ;
1186
1186
struct intel_uncore_type * type ;
@@ -1193,20 +1193,20 @@ static int allocate_boxes(struct intel_uncore_type **types,
1193
1193
type = * types ;
1194
1194
pmu = type -> pmus ;
1195
1195
for (i = 0 ; i < type -> num_boxes ; i ++ , pmu ++ ) {
1196
- if (pmu -> boxes [pkg ])
1196
+ if (pmu -> boxes [die ])
1197
1197
continue ;
1198
1198
box = uncore_alloc_box (type , cpu_to_node (cpu ));
1199
1199
if (!box )
1200
1200
goto cleanup ;
1201
1201
box -> pmu = pmu ;
1202
- box -> pkgid = pkg ;
1202
+ box -> dieid = die ;
1203
1203
list_add (& box -> active_list , & allocated );
1204
1204
}
1205
1205
}
1206
1206
/* Install them in the pmus */
1207
1207
list_for_each_entry_safe (box , tmp , & allocated , active_list ) {
1208
1208
list_del_init (& box -> active_list );
1209
- box -> pmu -> boxes [pkg ] = box ;
1209
+ box -> pmu -> boxes [die ] = box ;
1210
1210
}
1211
1211
return 0 ;
1212
1212
@@ -1223,18 +1223,18 @@ static int uncore_event_cpu_online(unsigned int cpu)
1223
1223
struct intel_uncore_type * type , * * types = uncore_msr_uncores ;
1224
1224
struct intel_uncore_pmu * pmu ;
1225
1225
struct intel_uncore_box * box ;
1226
- int i , ret , pkg , target ;
1226
+ int i , ret , die , target ;
1227
1227
1228
- pkg = topology_logical_die_id (cpu );
1229
- ret = allocate_boxes (types , pkg , cpu );
1228
+ die = topology_logical_die_id (cpu );
1229
+ ret = allocate_boxes (types , die , cpu );
1230
1230
if (ret )
1231
1231
return ret ;
1232
1232
1233
1233
for (; * types ; types ++ ) {
1234
1234
type = * types ;
1235
1235
pmu = type -> pmus ;
1236
1236
for (i = 0 ; i < type -> num_boxes ; i ++ , pmu ++ ) {
1237
- box = pmu -> boxes [pkg ];
1237
+ box = pmu -> boxes [die ];
1238
1238
if (box && atomic_inc_return (& box -> refcnt ) == 1 )
1239
1239
uncore_box_init (box );
1240
1240
}
@@ -1419,7 +1419,7 @@ static int __init intel_uncore_init(void)
1419
1419
if (boot_cpu_has (X86_FEATURE_HYPERVISOR ))
1420
1420
return - ENODEV ;
1421
1421
1422
- max_packages = topology_max_packages () * topology_max_die_per_package ();
1422
+ max_dies = topology_max_packages () * topology_max_die_per_package ();
1423
1423
1424
1424
uncore_init = (struct intel_uncore_init_fun * )id -> driver_data ;
1425
1425
if (uncore_init -> pci_init ) {
0 commit comments