Skip to content

Commit e5af058

Browse files
Wei Huangbonzini
authored andcommitted
KVM: x86/vPMU: reorder PMU functions
Keep called functions closer to their callers, and init/destroy functions next to each other. Signed-off-by: Paolo Bonzini <[email protected]>
1 parent e84cfe4 commit e5af058

File tree

1 file changed

+78
-78
lines changed

1 file changed

+78
-78
lines changed

arch/x86/kvm/pmu.c

Lines changed: 78 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -83,12 +83,6 @@ static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
8383
return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
8484
}
8585

86-
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
87-
{
88-
if (vcpu->arch.apic)
89-
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
90-
}
91-
9286
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
9387
{
9488
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
@@ -324,6 +318,65 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
324318
reprogram_counter(pmu, bit);
325319
}
326320

321+
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
322+
{
323+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
324+
u64 bitmask;
325+
int bit;
326+
327+
bitmask = pmu->reprogram_pmi;
328+
329+
for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
330+
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
331+
332+
if (unlikely(!pmc || !pmc->perf_event)) {
333+
clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
334+
continue;
335+
}
336+
337+
reprogram_counter(pmu, bit);
338+
}
339+
}
340+
341+
/* check if idx is a valid index to access PMU */
342+
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
343+
{
344+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
345+
bool fixed = idx & (1u << 30);
346+
idx &= ~(3u << 30);
347+
return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
348+
(fixed && idx >= pmu->nr_arch_fixed_counters);
349+
}
350+
351+
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
352+
{
353+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
354+
bool fast_mode = idx & (1u << 31);
355+
bool fixed = idx & (1u << 30);
356+
struct kvm_pmc *counters;
357+
u64 ctr_val;
358+
359+
idx &= ~(3u << 30);
360+
if (!fixed && idx >= pmu->nr_arch_gp_counters)
361+
return 1;
362+
if (fixed && idx >= pmu->nr_arch_fixed_counters)
363+
return 1;
364+
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
365+
366+
ctr_val = pmc_read_counter(&counters[idx]);
367+
if (fast_mode)
368+
ctr_val = (u32)ctr_val;
369+
370+
*data = ctr_val;
371+
return 0;
372+
}
373+
374+
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
375+
{
376+
if (vcpu->arch.apic)
377+
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
378+
}
379+
327380
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
328381
{
329382
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -433,39 +486,6 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
433486
return 1;
434487
}
435488

436-
/* check if idx is a valid index to access PMU */
437-
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
438-
{
439-
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
440-
bool fixed = idx & (1u << 30);
441-
idx &= ~(3u << 30);
442-
return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
443-
(fixed && idx >= pmu->nr_arch_fixed_counters);
444-
}
445-
446-
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
447-
{
448-
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
449-
bool fast_mode = idx & (1u << 31);
450-
bool fixed = idx & (1u << 30);
451-
struct kvm_pmc *counters;
452-
u64 ctr_val;
453-
454-
idx &= ~(3u << 30);
455-
if (!fixed && idx >= pmu->nr_arch_gp_counters)
456-
return 1;
457-
if (fixed && idx >= pmu->nr_arch_fixed_counters)
458-
return 1;
459-
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
460-
461-
ctr_val = pmc_read_counter(&counters[idx]);
462-
if (fast_mode)
463-
ctr_val = (u32)ctr_val;
464-
465-
*data = ctr_val;
466-
return 0;
467-
}
468-
469489
/* refresh PMU settings. This function generally is called when underlying
470490
* settings are changed (such as changes of PMU CPUID by guest VMs), which
471491
* should rarely happen.
@@ -521,26 +541,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
521541
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
522542
}
523543

524-
void kvm_pmu_init(struct kvm_vcpu *vcpu)
525-
{
526-
int i;
527-
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
528-
529-
memset(pmu, 0, sizeof(*pmu));
530-
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
531-
pmu->gp_counters[i].type = KVM_PMC_GP;
532-
pmu->gp_counters[i].vcpu = vcpu;
533-
pmu->gp_counters[i].idx = i;
534-
}
535-
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
536-
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
537-
pmu->fixed_counters[i].vcpu = vcpu;
538-
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
539-
}
540-
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
541-
kvm_pmu_refresh(vcpu);
542-
}
543-
544544
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
545545
{
546546
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -560,27 +560,27 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
560560
pmu->global_ovf_ctrl = 0;
561561
}
562562

563-
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
564-
{
565-
kvm_pmu_reset(vcpu);
566-
}
567-
568-
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
563+
void kvm_pmu_init(struct kvm_vcpu *vcpu)
569564
{
565+
int i;
570566
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
571-
u64 bitmask;
572-
int bit;
573-
574-
bitmask = pmu->reprogram_pmi;
575-
576-
for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
577-
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
578567

579-
if (unlikely(!pmc || !pmc->perf_event)) {
580-
clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
581-
continue;
582-
}
583-
584-
reprogram_counter(pmu, bit);
568+
memset(pmu, 0, sizeof(*pmu));
569+
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
570+
pmu->gp_counters[i].type = KVM_PMC_GP;
571+
pmu->gp_counters[i].vcpu = vcpu;
572+
pmu->gp_counters[i].idx = i;
585573
}
574+
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
575+
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
576+
pmu->fixed_counters[i].vcpu = vcpu;
577+
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
578+
}
579+
init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
580+
kvm_pmu_refresh(vcpu);
581+
}
582+
583+
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
584+
{
585+
kvm_pmu_reset(vcpu);
586586
}

0 commit comments

Comments
 (0)