Skip to content

Commit 212dba1

Browse files
Wei Huangbonzini
authored andcommitted
KVM: x86/vPMU: use the new macros to go between PMC, PMU and VCPU
Signed-off-by: Wei Huang <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 474a5bb commit 212dba1

File tree

1 file changed

+18
-20
lines changed

1 file changed

+18
-20
lines changed

arch/x86/kvm/pmu.c

Lines changed: 18 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -43,14 +43,14 @@ static bool pmc_is_gp(struct kvm_pmc *pmc)
4343

4444
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
4545
{
46-
struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
46+
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
4747

4848
return pmu->counter_bitmask[pmc->type];
4949
}
5050

5151
static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
5252
{
53-
struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
53+
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
5454
return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
5555
}
5656

@@ -91,10 +91,8 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
9191

9292
static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
9393
{
94-
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
95-
irq_work);
96-
struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
97-
arch.pmu);
94+
struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
95+
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
9896

9997
kvm_pmu_deliver_pmi(vcpu);
10098
}
@@ -104,7 +102,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
104102
struct pt_regs *regs)
105103
{
106104
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
107-
struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
105+
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
108106
if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
109107
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
110108
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
@@ -115,7 +113,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
115113
struct perf_sample_data *data, struct pt_regs *regs)
116114
{
117115
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
118-
struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
116+
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
119117
if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
120118
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
121119
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
@@ -128,7 +126,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
128126
* NMI context. Do it from irq work instead.
129127
*/
130128
if (!kvm_is_in_guest())
131-
irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
129+
irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
132130
else
133131
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
134132
}
@@ -190,7 +188,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
190188
}
191189

192190
pmc->perf_event = event;
193-
clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
191+
clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
194192
}
195193

196194
static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
@@ -233,7 +231,7 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
233231
ARCH_PERFMON_EVENTSEL_CMASK |
234232
HSW_IN_TX |
235233
HSW_IN_TX_CHECKPOINTED))) {
236-
config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
234+
config = find_arch_event(pmc_to_pmu(pmc), event_select,
237235
unit_mask);
238236
if (config != PERF_COUNT_HW_MAX)
239237
type = PERF_TYPE_HARDWARE;
@@ -318,7 +316,7 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
318316

319317
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
320318
{
321-
struct kvm_pmu *pmu = &vcpu->arch.pmu;
319+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
322320
int ret;
323321

324322
switch (msr) {
@@ -339,7 +337,7 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
339337

340338
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
341339
{
342-
struct kvm_pmu *pmu = &vcpu->arch.pmu;
340+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
343341
struct kvm_pmc *pmc;
344342

345343
switch (index) {
@@ -370,7 +368,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
370368

371369
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
372370
{
373-
struct kvm_pmu *pmu = &vcpu->arch.pmu;
371+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
374372
struct kvm_pmc *pmc;
375373
u32 index = msr_info->index;
376374
u64 data = msr_info->data;
@@ -427,7 +425,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
427425

428426
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned pmc)
429427
{
430-
struct kvm_pmu *pmu = &vcpu->arch.pmu;
428+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
431429
bool fixed = pmc & (1u << 30);
432430
pmc &= ~(3u << 30);
433431
return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
@@ -436,7 +434,7 @@ int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned pmc)
436434

437435
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
438436
{
439-
struct kvm_pmu *pmu = &vcpu->arch.pmu;
437+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
440438
bool fast_mode = pmc & (1u << 31);
441439
bool fixed = pmc & (1u << 30);
442440
struct kvm_pmc *counters;
@@ -458,7 +456,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
458456

459457
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
460458
{
461-
struct kvm_pmu *pmu = &vcpu->arch.pmu;
459+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
462460
struct kvm_cpuid_entry2 *entry;
463461
union cpuid10_eax eax;
464462
union cpuid10_edx edx;
@@ -510,7 +508,7 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
510508
void kvm_pmu_init(struct kvm_vcpu *vcpu)
511509
{
512510
int i;
513-
struct kvm_pmu *pmu = &vcpu->arch.pmu;
511+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
514512

515513
memset(pmu, 0, sizeof(*pmu));
516514
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
@@ -529,7 +527,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
529527

530528
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
531529
{
532-
struct kvm_pmu *pmu = &vcpu->arch.pmu;
530+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
533531
int i;
534532

535533
irq_work_sync(&pmu->irq_work);
@@ -553,7 +551,7 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
553551

554552
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
555553
{
556-
struct kvm_pmu *pmu = &vcpu->arch.pmu;
554+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
557555
u64 bitmask;
558556
int bit;
559557

0 commit comments

Comments
 (0)