@@ -52,7 +52,7 @@ static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
52
52
return pmu -> counter_bitmask [pmc -> type ];
53
53
}
54
54
55
- static inline bool pmc_enabled (struct kvm_pmc * pmc )
55
+ static inline bool pmc_is_enabled (struct kvm_pmc * pmc )
56
56
{
57
57
struct kvm_pmu * pmu = & pmc -> vcpu -> arch .pmu ;
58
58
return test_bit (pmc -> idx , (unsigned long * )& pmu -> global_ctrl );
@@ -87,20 +87,20 @@ static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
87
87
return get_fixed_pmc_idx (pmu , idx - INTEL_PMC_IDX_FIXED );
88
88
}
89
89
90
- void kvm_deliver_pmi (struct kvm_vcpu * vcpu )
90
+ void kvm_pmu_deliver_pmi (struct kvm_vcpu * vcpu )
91
91
{
92
92
if (vcpu -> arch .apic )
93
93
kvm_apic_local_deliver (vcpu -> arch .apic , APIC_LVTPC );
94
94
}
95
95
96
- static void trigger_pmi (struct irq_work * irq_work )
96
+ static void kvm_pmi_trigger_fn (struct irq_work * irq_work )
97
97
{
98
98
struct kvm_pmu * pmu = container_of (irq_work , struct kvm_pmu ,
99
99
irq_work );
100
100
struct kvm_vcpu * vcpu = container_of (pmu , struct kvm_vcpu ,
101
101
arch .pmu );
102
102
103
- kvm_deliver_pmi (vcpu );
103
+ kvm_pmu_deliver_pmi (vcpu );
104
104
}
105
105
106
106
static void kvm_perf_overflow (struct perf_event * perf_event ,
@@ -138,7 +138,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
138
138
}
139
139
}
140
140
141
- static u64 read_pmc (struct kvm_pmc * pmc )
141
+ static u64 pmc_read_counter (struct kvm_pmc * pmc )
142
142
{
143
143
u64 counter , enabled , running ;
144
144
@@ -153,16 +153,16 @@ static u64 read_pmc(struct kvm_pmc *pmc)
153
153
return counter & pmc_bitmask (pmc );
154
154
}
155
155
156
- static void stop_counter (struct kvm_pmc * pmc )
156
+ static void pmc_stop_counter (struct kvm_pmc * pmc )
157
157
{
158
158
if (pmc -> perf_event ) {
159
- pmc -> counter = read_pmc (pmc );
159
+ pmc -> counter = pmc_read_counter (pmc );
160
160
perf_event_release_kernel (pmc -> perf_event );
161
161
pmc -> perf_event = NULL ;
162
162
}
163
163
}
164
164
165
- static void reprogram_counter (struct kvm_pmc * pmc , u32 type ,
165
+ static void pmc_reprogram_counter (struct kvm_pmc * pmc , u32 type ,
166
166
unsigned config , bool exclude_user , bool exclude_kernel ,
167
167
bool intr , bool in_tx , bool in_tx_cp )
168
168
{
@@ -224,9 +224,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
224
224
225
225
pmc -> eventsel = eventsel ;
226
226
227
- stop_counter (pmc );
227
+ pmc_stop_counter (pmc );
228
228
229
- if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE ) || !pmc_enabled (pmc ))
229
+ if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE ) || !pmc_is_enabled (pmc ))
230
230
return ;
231
231
232
232
event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT ;
@@ -246,7 +246,7 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
246
246
if (type == PERF_TYPE_RAW )
247
247
config = eventsel & X86_RAW_EVENT_MASK ;
248
248
249
- reprogram_counter (pmc , type , config ,
249
+ pmc_reprogram_counter (pmc , type , config ,
250
250
!(eventsel & ARCH_PERFMON_EVENTSEL_USR ),
251
251
!(eventsel & ARCH_PERFMON_EVENTSEL_OS ),
252
252
eventsel & ARCH_PERFMON_EVENTSEL_INT ,
@@ -259,19 +259,19 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
259
259
unsigned en = en_pmi & 0x3 ;
260
260
bool pmi = en_pmi & 0x8 ;
261
261
262
- stop_counter (pmc );
262
+ pmc_stop_counter (pmc );
263
263
264
- if (!en || !pmc_enabled (pmc ))
264
+ if (!en || !pmc_is_enabled (pmc ))
265
265
return ;
266
266
267
- reprogram_counter (pmc , PERF_TYPE_HARDWARE ,
267
+ pmc_reprogram_counter (pmc , PERF_TYPE_HARDWARE ,
268
268
arch_events [fixed_pmc_events [idx ]].event_type ,
269
269
!(en & 0x2 ), /* exclude user */
270
270
!(en & 0x1 ), /* exclude kernel */
271
271
pmi , false, false);
272
272
}
273
273
274
- static inline u8 fixed_en_pmi (u64 ctrl , int idx )
274
+ static inline u8 fixed_ctrl_field (u64 ctrl , int idx )
275
275
{
276
276
return (ctrl >> (idx * 4 )) & 0xf ;
277
277
}
@@ -281,10 +281,10 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
281
281
int i ;
282
282
283
283
for (i = 0 ; i < pmu -> nr_arch_fixed_counters ; i ++ ) {
284
- u8 en_pmi = fixed_en_pmi (data , i );
284
+ u8 en_pmi = fixed_ctrl_field (data , i );
285
285
struct kvm_pmc * pmc = get_fixed_pmc_idx (pmu , i );
286
286
287
- if (fixed_en_pmi (pmu -> fixed_ctr_ctrl , i ) == en_pmi )
287
+ if (fixed_ctrl_field (pmu -> fixed_ctr_ctrl , i ) == en_pmi )
288
288
continue ;
289
289
290
290
reprogram_fixed_counter (pmc , en_pmi , i );
@@ -293,7 +293,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
293
293
pmu -> fixed_ctr_ctrl = data ;
294
294
}
295
295
296
- static void reprogram_idx (struct kvm_pmu * pmu , int idx )
296
+ static void reprogram_counter (struct kvm_pmu * pmu , int idx )
297
297
{
298
298
struct kvm_pmc * pmc = global_idx_to_pmc (pmu , idx );
299
299
@@ -305,7 +305,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
305
305
else {
306
306
int fidx = idx - INTEL_PMC_IDX_FIXED ;
307
307
reprogram_fixed_counter (pmc ,
308
- fixed_en_pmi (pmu -> fixed_ctr_ctrl , fidx ), fidx );
308
+ fixed_ctrl_field (pmu -> fixed_ctr_ctrl , fidx ), fidx );
309
309
}
310
310
}
311
311
@@ -317,10 +317,10 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
317
317
pmu -> global_ctrl = data ;
318
318
319
319
for_each_set_bit (bit , (unsigned long * )& diff , X86_PMC_IDX_MAX )
320
- reprogram_idx (pmu , bit );
320
+ reprogram_counter (pmu , bit );
321
321
}
322
322
323
- bool kvm_pmu_msr (struct kvm_vcpu * vcpu , u32 msr )
323
+ bool kvm_pmu_is_valid_msr (struct kvm_vcpu * vcpu , u32 msr )
324
324
{
325
325
struct kvm_pmu * pmu = & vcpu -> arch .pmu ;
326
326
int ret ;
@@ -362,7 +362,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
362
362
default :
363
363
if ((pmc = get_gp_pmc (pmu , index , MSR_IA32_PERFCTR0 )) ||
364
364
(pmc = get_fixed_pmc (pmu , index ))) {
365
- * data = read_pmc (pmc );
365
+ * data = pmc_read_counter (pmc );
366
366
return 0 ;
367
367
} else if ((pmc = get_gp_pmc (pmu , index , MSR_P6_EVNTSEL0 ))) {
368
368
* data = pmc -> eventsel ;
@@ -415,7 +415,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
415
415
(pmc = get_fixed_pmc (pmu , index ))) {
416
416
if (!msr_info -> host_initiated )
417
417
data = (s64 )(s32 )data ;
418
- pmc -> counter += data - read_pmc (pmc );
418
+ pmc -> counter += data - pmc_read_counter (pmc );
419
419
return 0 ;
420
420
} else if ((pmc = get_gp_pmc (pmu , index , MSR_P6_EVNTSEL0 ))) {
421
421
if (data == pmc -> eventsel )
@@ -429,7 +429,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
429
429
return 1 ;
430
430
}
431
431
432
- int kvm_pmu_check_pmc (struct kvm_vcpu * vcpu , unsigned pmc )
432
+ int kvm_pmu_is_valid_msr_idx (struct kvm_vcpu * vcpu , unsigned pmc )
433
433
{
434
434
struct kvm_pmu * pmu = & vcpu -> arch .pmu ;
435
435
bool fixed = pmc & (1u << 30 );
@@ -438,7 +438,7 @@ int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
438
438
(fixed && pmc >= pmu -> nr_arch_fixed_counters );
439
439
}
440
440
441
- int kvm_pmu_read_pmc (struct kvm_vcpu * vcpu , unsigned pmc , u64 * data )
441
+ int kvm_pmu_rdpmc (struct kvm_vcpu * vcpu , unsigned pmc , u64 * data )
442
442
{
443
443
struct kvm_pmu * pmu = & vcpu -> arch .pmu ;
444
444
bool fast_mode = pmc & (1u << 31 );
@@ -452,15 +452,15 @@ int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
452
452
if (fixed && pmc >= pmu -> nr_arch_fixed_counters )
453
453
return 1 ;
454
454
counters = fixed ? pmu -> fixed_counters : pmu -> gp_counters ;
455
- ctr = read_pmc (& counters [pmc ]);
455
+ ctr = pmc_read_counter (& counters [pmc ]);
456
456
if (fast_mode )
457
457
ctr = (u32 )ctr ;
458
458
* data = ctr ;
459
459
460
460
return 0 ;
461
461
}
462
462
463
- void kvm_pmu_cpuid_update (struct kvm_vcpu * vcpu )
463
+ void kvm_pmu_refresh (struct kvm_vcpu * vcpu )
464
464
{
465
465
struct kvm_pmu * pmu = & vcpu -> arch .pmu ;
466
466
struct kvm_cpuid_entry2 * entry ;
@@ -527,8 +527,8 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
527
527
pmu -> fixed_counters [i ].vcpu = vcpu ;
528
528
pmu -> fixed_counters [i ].idx = i + INTEL_PMC_IDX_FIXED ;
529
529
}
530
- init_irq_work (& pmu -> irq_work , trigger_pmi );
531
- kvm_pmu_cpuid_update (vcpu );
530
+ init_irq_work (& pmu -> irq_work , kvm_pmi_trigger_fn );
531
+ kvm_pmu_refresh (vcpu );
532
532
}
533
533
534
534
void kvm_pmu_reset (struct kvm_vcpu * vcpu )
@@ -539,12 +539,12 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
539
539
irq_work_sync (& pmu -> irq_work );
540
540
for (i = 0 ; i < INTEL_PMC_MAX_GENERIC ; i ++ ) {
541
541
struct kvm_pmc * pmc = & pmu -> gp_counters [i ];
542
- stop_counter (pmc );
542
+ pmc_stop_counter (pmc );
543
543
pmc -> counter = pmc -> eventsel = 0 ;
544
544
}
545
545
546
546
for (i = 0 ; i < INTEL_PMC_MAX_FIXED ; i ++ )
547
- stop_counter (& pmu -> fixed_counters [i ]);
547
+ pmc_stop_counter (& pmu -> fixed_counters [i ]);
548
548
549
549
pmu -> fixed_ctr_ctrl = pmu -> global_ctrl = pmu -> global_status =
550
550
pmu -> global_ovf_ctrl = 0 ;
@@ -555,7 +555,7 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
555
555
kvm_pmu_reset (vcpu );
556
556
}
557
557
558
- void kvm_handle_pmu_event (struct kvm_vcpu * vcpu )
558
+ void kvm_pmu_handle_event (struct kvm_vcpu * vcpu )
559
559
{
560
560
struct kvm_pmu * pmu = & vcpu -> arch .pmu ;
561
561
u64 bitmask ;
@@ -571,6 +571,6 @@ void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
571
571
continue ;
572
572
}
573
573
574
- reprogram_idx (pmu , bit );
574
+ reprogram_counter (pmu , bit );
575
575
}
576
576
}
0 commit comments