@@ -83,12 +83,6 @@ static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
83
83
return get_fixed_pmc_idx (pmu , idx - INTEL_PMC_IDX_FIXED );
84
84
}
85
85
86
- void kvm_pmu_deliver_pmi (struct kvm_vcpu * vcpu )
87
- {
88
- if (vcpu -> arch .apic )
89
- kvm_apic_local_deliver (vcpu -> arch .apic , APIC_LVTPC );
90
- }
91
-
92
86
static void kvm_pmi_trigger_fn (struct irq_work * irq_work )
93
87
{
94
88
struct kvm_pmu * pmu = container_of (irq_work , struct kvm_pmu , irq_work );
@@ -324,6 +318,65 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
324
318
reprogram_counter (pmu , bit );
325
319
}
326
320
321
+ void kvm_pmu_handle_event (struct kvm_vcpu * vcpu )
322
+ {
323
+ struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
324
+ u64 bitmask ;
325
+ int bit ;
326
+
327
+ bitmask = pmu -> reprogram_pmi ;
328
+
329
+ for_each_set_bit (bit , (unsigned long * )& bitmask , X86_PMC_IDX_MAX ) {
330
+ struct kvm_pmc * pmc = global_idx_to_pmc (pmu , bit );
331
+
332
+ if (unlikely (!pmc || !pmc -> perf_event )) {
333
+ clear_bit (bit , (unsigned long * )& pmu -> reprogram_pmi );
334
+ continue ;
335
+ }
336
+
337
+ reprogram_counter (pmu , bit );
338
+ }
339
+ }
340
+
341
+ /* check if idx is a valid index to access PMU */
342
+ int kvm_pmu_is_valid_msr_idx (struct kvm_vcpu * vcpu , unsigned idx )
343
+ {
344
+ struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
345
+ bool fixed = idx & (1u << 30 );
346
+ idx &= ~(3u << 30 );
347
+ return (!fixed && idx >= pmu -> nr_arch_gp_counters ) ||
348
+ (fixed && idx >= pmu -> nr_arch_fixed_counters );
349
+ }
350
+
351
+ int kvm_pmu_rdpmc (struct kvm_vcpu * vcpu , unsigned idx , u64 * data )
352
+ {
353
+ struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
354
+ bool fast_mode = idx & (1u << 31 );
355
+ bool fixed = idx & (1u << 30 );
356
+ struct kvm_pmc * counters ;
357
+ u64 ctr_val ;
358
+
359
+ idx &= ~(3u << 30 );
360
+ if (!fixed && idx >= pmu -> nr_arch_gp_counters )
361
+ return 1 ;
362
+ if (fixed && idx >= pmu -> nr_arch_fixed_counters )
363
+ return 1 ;
364
+ counters = fixed ? pmu -> fixed_counters : pmu -> gp_counters ;
365
+
366
+ ctr_val = pmc_read_counter (& counters [idx ]);
367
+ if (fast_mode )
368
+ ctr_val = (u32 )ctr_val ;
369
+
370
+ * data = ctr_val ;
371
+ return 0 ;
372
+ }
373
+
374
+ void kvm_pmu_deliver_pmi (struct kvm_vcpu * vcpu )
375
+ {
376
+ if (vcpu -> arch .apic )
377
+ kvm_apic_local_deliver (vcpu -> arch .apic , APIC_LVTPC );
378
+ }
379
+
327
380
bool kvm_pmu_is_valid_msr (struct kvm_vcpu * vcpu , u32 msr )
328
381
{
329
382
struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
@@ -433,39 +486,6 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
433
486
return 1 ;
434
487
}
435
488
436
- /* check if idx is a valid index to access PMU */
437
- int kvm_pmu_is_valid_msr_idx (struct kvm_vcpu * vcpu , unsigned idx )
438
- {
439
- struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
440
- bool fixed = idx & (1u << 30 );
441
- idx &= ~(3u << 30 );
442
- return (!fixed && idx >= pmu -> nr_arch_gp_counters ) ||
443
- (fixed && idx >= pmu -> nr_arch_fixed_counters );
444
- }
445
-
446
- int kvm_pmu_rdpmc (struct kvm_vcpu * vcpu , unsigned idx , u64 * data )
447
- {
448
- struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
449
- bool fast_mode = idx & (1u << 31 );
450
- bool fixed = idx & (1u << 30 );
451
- struct kvm_pmc * counters ;
452
- u64 ctr_val ;
453
-
454
- idx &= ~(3u << 30 );
455
- if (!fixed && idx >= pmu -> nr_arch_gp_counters )
456
- return 1 ;
457
- if (fixed && idx >= pmu -> nr_arch_fixed_counters )
458
- return 1 ;
459
- counters = fixed ? pmu -> fixed_counters : pmu -> gp_counters ;
460
-
461
- ctr_val = pmc_read_counter (& counters [idx ]);
462
- if (fast_mode )
463
- ctr_val = (u32 )ctr_val ;
464
-
465
- * data = ctr_val ;
466
- return 0 ;
467
- }
468
-
469
489
/* refresh PMU settings. This function generally is called when underlying
470
490
* settings are changed (such as changes of PMU CPUID by guest VMs), which
471
491
* should rarely happen.
@@ -521,26 +541,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
521
541
pmu -> reserved_bits ^= HSW_IN_TX |HSW_IN_TX_CHECKPOINTED ;
522
542
}
523
543
524
- void kvm_pmu_init (struct kvm_vcpu * vcpu )
525
- {
526
- int i ;
527
- struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
528
-
529
- memset (pmu , 0 , sizeof (* pmu ));
530
- for (i = 0 ; i < INTEL_PMC_MAX_GENERIC ; i ++ ) {
531
- pmu -> gp_counters [i ].type = KVM_PMC_GP ;
532
- pmu -> gp_counters [i ].vcpu = vcpu ;
533
- pmu -> gp_counters [i ].idx = i ;
534
- }
535
- for (i = 0 ; i < INTEL_PMC_MAX_FIXED ; i ++ ) {
536
- pmu -> fixed_counters [i ].type = KVM_PMC_FIXED ;
537
- pmu -> fixed_counters [i ].vcpu = vcpu ;
538
- pmu -> fixed_counters [i ].idx = i + INTEL_PMC_IDX_FIXED ;
539
- }
540
- init_irq_work (& pmu -> irq_work , kvm_pmi_trigger_fn );
541
- kvm_pmu_refresh (vcpu );
542
- }
543
-
544
544
void kvm_pmu_reset (struct kvm_vcpu * vcpu )
545
545
{
546
546
struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
@@ -560,27 +560,27 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
560
560
pmu -> global_ovf_ctrl = 0 ;
561
561
}
562
562
563
- void kvm_pmu_destroy (struct kvm_vcpu * vcpu )
564
- {
565
- kvm_pmu_reset (vcpu );
566
- }
567
-
568
- void kvm_pmu_handle_event (struct kvm_vcpu * vcpu )
563
+ void kvm_pmu_init (struct kvm_vcpu * vcpu )
569
564
{
565
+ int i ;
570
566
struct kvm_pmu * pmu = vcpu_to_pmu (vcpu );
571
- u64 bitmask ;
572
- int bit ;
573
-
574
- bitmask = pmu -> reprogram_pmi ;
575
-
576
- for_each_set_bit (bit , (unsigned long * )& bitmask , X86_PMC_IDX_MAX ) {
577
- struct kvm_pmc * pmc = global_idx_to_pmc (pmu , bit );
578
567
579
- if (unlikely (!pmc || !pmc -> perf_event )) {
580
- clear_bit (bit , (unsigned long * )& pmu -> reprogram_pmi );
581
- continue ;
582
- }
583
-
584
- reprogram_counter (pmu , bit );
568
+ memset (pmu , 0 , sizeof (* pmu ));
569
+ for (i = 0 ; i < INTEL_PMC_MAX_GENERIC ; i ++ ) {
570
+ pmu -> gp_counters [i ].type = KVM_PMC_GP ;
571
+ pmu -> gp_counters [i ].vcpu = vcpu ;
572
+ pmu -> gp_counters [i ].idx = i ;
585
573
}
574
+ for (i = 0 ; i < INTEL_PMC_MAX_FIXED ; i ++ ) {
575
+ pmu -> fixed_counters [i ].type = KVM_PMC_FIXED ;
576
+ pmu -> fixed_counters [i ].vcpu = vcpu ;
577
+ pmu -> fixed_counters [i ].idx = i + INTEL_PMC_IDX_FIXED ;
578
+ }
579
+ init_irq_work (& pmu -> irq_work , kvm_pmi_trigger_fn );
580
+ kvm_pmu_refresh (vcpu );
581
+ }
582
+
583
+ void kvm_pmu_destroy (struct kvm_vcpu * vcpu )
584
+ {
585
+ kvm_pmu_reset (vcpu );
586
586
}
0 commit comments