@@ -103,25 +103,31 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
103
103
{
104
104
struct kvm_pmc * pmc = perf_event -> overflow_handler_context ;
105
105
struct kvm_pmu * pmu = pmc_to_pmu (pmc );
106
- if (!test_and_set_bit (pmc -> idx , (unsigned long * )& pmu -> reprogram_pmi )) {
106
+
107
+ if (!test_and_set_bit (pmc -> idx ,
108
+ (unsigned long * )& pmu -> reprogram_pmi )) {
107
109
__set_bit (pmc -> idx , (unsigned long * )& pmu -> global_status );
108
110
kvm_make_request (KVM_REQ_PMU , pmc -> vcpu );
109
111
}
110
112
}
111
113
112
114
static void kvm_perf_overflow_intr (struct perf_event * perf_event ,
113
- struct perf_sample_data * data , struct pt_regs * regs )
115
+ struct perf_sample_data * data ,
116
+ struct pt_regs * regs )
114
117
{
115
118
struct kvm_pmc * pmc = perf_event -> overflow_handler_context ;
116
119
struct kvm_pmu * pmu = pmc_to_pmu (pmc );
117
- if (!test_and_set_bit (pmc -> idx , (unsigned long * )& pmu -> reprogram_pmi )) {
120
+
121
+ if (!test_and_set_bit (pmc -> idx ,
122
+ (unsigned long * )& pmu -> reprogram_pmi )) {
118
123
__set_bit (pmc -> idx , (unsigned long * )& pmu -> global_status );
119
124
kvm_make_request (KVM_REQ_PMU , pmc -> vcpu );
125
+
120
126
/*
121
127
* Inject PMI. If vcpu was in a guest mode during NMI PMI
122
128
* can be ejected on a guest mode re-entry. Otherwise we can't
123
129
* be sure that vcpu wasn't executing hlt instruction at the
124
- * time of vmexit and is not going to re-enter guest mode until,
130
+ * time of vmexit and is not going to re-enter guest mode until
125
131
* woken up. So we should wake it, but this is impossible from
126
132
* NMI context. Do it from irq work instead.
127
133
*/
@@ -157,8 +163,9 @@ static void pmc_stop_counter(struct kvm_pmc *pmc)
157
163
}
158
164
159
165
static void pmc_reprogram_counter (struct kvm_pmc * pmc , u32 type ,
160
- unsigned config , bool exclude_user , bool exclude_kernel ,
161
- bool intr , bool in_tx , bool in_tx_cp )
166
+ unsigned config , bool exclude_user ,
167
+ bool exclude_kernel , bool intr ,
168
+ bool in_tx , bool in_tx_cp )
162
169
{
163
170
struct perf_event * event ;
164
171
struct perf_event_attr attr = {
@@ -171,6 +178,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
171
178
.exclude_kernel = exclude_kernel ,
172
179
.config = config ,
173
180
};
181
+
174
182
if (in_tx )
175
183
attr .config |= HSW_IN_TX ;
176
184
if (in_tx_cp )
@@ -182,8 +190,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
182
190
intr ? kvm_perf_overflow_intr :
183
191
kvm_perf_overflow , pmc );
184
192
if (IS_ERR (event )) {
185
- printk_once ("kvm: pmu event creation failed %ld\n" ,
186
- PTR_ERR (event ));
193
+ printk_once ("kvm_pmu: event creation failed %ld\n" ,
194
+ PTR_ERR (event ));
187
195
return ;
188
196
}
189
197
@@ -227,10 +235,10 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
227
235
unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK ) >> 8 ;
228
236
229
237
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
230
- ARCH_PERFMON_EVENTSEL_INV |
231
- ARCH_PERFMON_EVENTSEL_CMASK |
232
- HSW_IN_TX |
233
- HSW_IN_TX_CHECKPOINTED ))) {
238
+ ARCH_PERFMON_EVENTSEL_INV |
239
+ ARCH_PERFMON_EVENTSEL_CMASK |
240
+ HSW_IN_TX |
241
+ HSW_IN_TX_CHECKPOINTED ))) {
234
242
config = find_arch_event (pmc_to_pmu (pmc ), event_select ,
235
243
unit_mask );
236
244
if (config != PERF_COUNT_HW_MAX )
@@ -241,28 +249,28 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
241
249
config = eventsel & X86_RAW_EVENT_MASK ;
242
250
243
251
pmc_reprogram_counter (pmc , type , config ,
244
- !(eventsel & ARCH_PERFMON_EVENTSEL_USR ),
245
- !(eventsel & ARCH_PERFMON_EVENTSEL_OS ),
246
- eventsel & ARCH_PERFMON_EVENTSEL_INT ,
247
- (eventsel & HSW_IN_TX ),
248
- (eventsel & HSW_IN_TX_CHECKPOINTED ));
252
+ !(eventsel & ARCH_PERFMON_EVENTSEL_USR ),
253
+ !(eventsel & ARCH_PERFMON_EVENTSEL_OS ),
254
+ eventsel & ARCH_PERFMON_EVENTSEL_INT ,
255
+ (eventsel & HSW_IN_TX ),
256
+ (eventsel & HSW_IN_TX_CHECKPOINTED ));
249
257
}
250
258
251
- static void reprogram_fixed_counter (struct kvm_pmc * pmc , u8 en_pmi , int idx )
259
+ static void reprogram_fixed_counter (struct kvm_pmc * pmc , u8 ctrl , int idx )
252
260
{
253
- unsigned en = en_pmi & 0x3 ;
254
- bool pmi = en_pmi & 0x8 ;
261
+ unsigned en_field = ctrl & 0x3 ;
262
+ bool pmi = ctrl & 0x8 ;
255
263
256
264
pmc_stop_counter (pmc );
257
265
258
- if (!en || !pmc_is_enabled (pmc ))
266
+ if (!en_field || !pmc_is_enabled (pmc ))
259
267
return ;
260
268
261
269
pmc_reprogram_counter (pmc , PERF_TYPE_HARDWARE ,
262
- arch_events [fixed_pmc_events [idx ]].event_type ,
263
- !( en & 0x2 ), /* exclude user */
264
- !( en & 0x1 ), /* exclude kernel */
265
- pmi , false, false);
270
+ arch_events [fixed_pmc_events [idx ]].event_type ,
271
+ !( en_field & 0x2 ), /* exclude user */
272
+ !( en_field & 0x1 ), /* exclude kernel */
273
+ pmi , false, false);
266
274
}
267
275
268
276
static inline u8 fixed_ctrl_field (u64 ctrl , int idx )
@@ -275,31 +283,33 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
275
283
int i ;
276
284
277
285
for (i = 0 ; i < pmu -> nr_arch_fixed_counters ; i ++ ) {
278
- u8 en_pmi = fixed_ctrl_field (data , i );
286
+ u8 old_ctrl = fixed_ctrl_field (pmu -> fixed_ctr_ctrl , i );
287
+ u8 new_ctrl = fixed_ctrl_field (data , i );
279
288
struct kvm_pmc * pmc = get_fixed_pmc_idx (pmu , i );
280
289
281
- if (fixed_ctrl_field ( pmu -> fixed_ctr_ctrl , i ) == en_pmi )
290
+ if (old_ctrl == new_ctrl )
282
291
continue ;
283
292
284
- reprogram_fixed_counter (pmc , en_pmi , i );
293
+ reprogram_fixed_counter (pmc , new_ctrl , i );
285
294
}
286
295
287
296
pmu -> fixed_ctr_ctrl = data ;
288
297
}
289
298
290
- static void reprogram_counter (struct kvm_pmu * pmu , int idx )
299
+ static void reprogram_counter (struct kvm_pmu * pmu , int pmc_idx )
291
300
{
292
- struct kvm_pmc * pmc = global_idx_to_pmc (pmu , idx );
301
+ struct kvm_pmc * pmc = global_idx_to_pmc (pmu , pmc_idx );
293
302
294
303
if (!pmc )
295
304
return ;
296
305
297
306
if (pmc_is_gp (pmc ))
298
307
reprogram_gp_counter (pmc , pmc -> eventsel );
299
308
else {
300
- int fidx = idx - INTEL_PMC_IDX_FIXED ;
301
- reprogram_fixed_counter (pmc ,
302
- fixed_ctrl_field (pmu -> fixed_ctr_ctrl , fidx ), fidx );
309
+ int idx = pmc_idx - INTEL_PMC_IDX_FIXED ;
310
+ u8 ctrl = fixed_ctrl_field (pmu -> fixed_ctr_ctrl , idx );
311
+
312
+ reprogram_fixed_counter (pmc , ctrl , idx );
303
313
}
304
314
}
305
315
@@ -423,37 +433,43 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
423
433
return 1 ;
424
434
}
425
435
426
- int kvm_pmu_is_valid_msr_idx (struct kvm_vcpu * vcpu , unsigned pmc )
436
+ /* check if idx is a valid index to access PMU */
437
+ int kvm_pmu_is_valid_msr_idx (struct kvm_vcpu * vcpu , unsigned idx )
427
438
{
428
439
struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
429
- bool fixed = pmc & (1u << 30 );
430
- pmc &= ~(3u << 30 );
431
- return (!fixed && pmc >= pmu -> nr_arch_gp_counters ) ||
432
- (fixed && pmc >= pmu -> nr_arch_fixed_counters );
440
+ bool fixed = idx & (1u << 30 );
441
+ idx &= ~(3u << 30 );
442
+ return (!fixed && idx >= pmu -> nr_arch_gp_counters ) ||
443
+ (fixed && idx >= pmu -> nr_arch_fixed_counters );
433
444
}
434
445
435
- int kvm_pmu_rdpmc (struct kvm_vcpu * vcpu , unsigned pmc , u64 * data )
446
+ int kvm_pmu_rdpmc (struct kvm_vcpu * vcpu , unsigned idx , u64 * data )
436
447
{
437
448
struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
438
- bool fast_mode = pmc & (1u << 31 );
439
- bool fixed = pmc & (1u << 30 );
449
+ bool fast_mode = idx & (1u << 31 );
450
+ bool fixed = idx & (1u << 30 );
440
451
struct kvm_pmc * counters ;
441
- u64 ctr ;
452
+ u64 ctr_val ;
442
453
443
- pmc &= ~(3u << 30 );
444
- if (!fixed && pmc >= pmu -> nr_arch_gp_counters )
454
+ idx &= ~(3u << 30 );
455
+ if (!fixed && idx >= pmu -> nr_arch_gp_counters )
445
456
return 1 ;
446
- if (fixed && pmc >= pmu -> nr_arch_fixed_counters )
457
+ if (fixed && idx >= pmu -> nr_arch_fixed_counters )
447
458
return 1 ;
448
459
counters = fixed ? pmu -> fixed_counters : pmu -> gp_counters ;
449
- ctr = pmc_read_counter (& counters [pmc ]);
460
+
461
+ ctr_val = pmc_read_counter (& counters [idx ]);
450
462
if (fast_mode )
451
- ctr = (u32 )ctr ;
452
- * data = ctr ;
463
+ ctr_val = (u32 )ctr_val ;
453
464
465
+ * data = ctr_val ;
454
466
return 0 ;
455
467
}
456
468
469
+ /* refresh PMU settings. This function generally is called when underlying
470
+ * settings are changed (such as changes of PMU CPUID by guest VMs), which
471
+ * should rarely happen.
472
+ */
457
473
void kvm_pmu_refresh (struct kvm_vcpu * vcpu )
458
474
{
459
475
struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
0 commit comments