@@ -43,14 +43,14 @@ static bool pmc_is_gp(struct kvm_pmc *pmc)
43
43
44
44
static inline u64 pmc_bitmask (struct kvm_pmc * pmc )
45
45
{
46
- struct kvm_pmu * pmu = & pmc -> vcpu -> arch . pmu ;
46
+ struct kvm_pmu * pmu = pmc_to_pmu ( pmc ) ;
47
47
48
48
return pmu -> counter_bitmask [pmc -> type ];
49
49
}
50
50
51
51
static inline bool pmc_is_enabled (struct kvm_pmc * pmc )
52
52
{
53
- struct kvm_pmu * pmu = & pmc -> vcpu -> arch . pmu ;
53
+ struct kvm_pmu * pmu = pmc_to_pmu ( pmc ) ;
54
54
return test_bit (pmc -> idx , (unsigned long * )& pmu -> global_ctrl );
55
55
}
56
56
@@ -91,10 +91,8 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
91
91
92
92
static void kvm_pmi_trigger_fn (struct irq_work * irq_work )
93
93
{
94
- struct kvm_pmu * pmu = container_of (irq_work , struct kvm_pmu ,
95
- irq_work );
96
- struct kvm_vcpu * vcpu = container_of (pmu , struct kvm_vcpu ,
97
- arch .pmu );
94
+ struct kvm_pmu * pmu = container_of (irq_work , struct kvm_pmu , irq_work );
95
+ struct kvm_vcpu * vcpu = pmu_to_vcpu (pmu );
98
96
99
97
kvm_pmu_deliver_pmi (vcpu );
100
98
}
@@ -104,7 +102,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
104
102
struct pt_regs * regs )
105
103
{
106
104
struct kvm_pmc * pmc = perf_event -> overflow_handler_context ;
107
- struct kvm_pmu * pmu = & pmc -> vcpu -> arch . pmu ;
105
+ struct kvm_pmu * pmu = pmc_to_pmu ( pmc ) ;
108
106
if (!test_and_set_bit (pmc -> idx , (unsigned long * )& pmu -> reprogram_pmi )) {
109
107
__set_bit (pmc -> idx , (unsigned long * )& pmu -> global_status );
110
108
kvm_make_request (KVM_REQ_PMU , pmc -> vcpu );
@@ -115,7 +113,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
115
113
struct perf_sample_data * data , struct pt_regs * regs )
116
114
{
117
115
struct kvm_pmc * pmc = perf_event -> overflow_handler_context ;
118
- struct kvm_pmu * pmu = & pmc -> vcpu -> arch . pmu ;
116
+ struct kvm_pmu * pmu = pmc_to_pmu ( pmc ) ;
119
117
if (!test_and_set_bit (pmc -> idx , (unsigned long * )& pmu -> reprogram_pmi )) {
120
118
__set_bit (pmc -> idx , (unsigned long * )& pmu -> global_status );
121
119
kvm_make_request (KVM_REQ_PMU , pmc -> vcpu );
@@ -128,7 +126,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
128
126
* NMI context. Do it from irq work instead.
129
127
*/
130
128
if (!kvm_is_in_guest ())
131
- irq_work_queue (& pmc -> vcpu -> arch . pmu . irq_work );
129
+ irq_work_queue (& pmc_to_pmu ( pmc ) -> irq_work );
132
130
else
133
131
kvm_make_request (KVM_REQ_PMI , pmc -> vcpu );
134
132
}
@@ -190,7 +188,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
190
188
}
191
189
192
190
pmc -> perf_event = event ;
193
- clear_bit (pmc -> idx , (unsigned long * )& pmc -> vcpu -> arch . pmu . reprogram_pmi );
191
+ clear_bit (pmc -> idx , (unsigned long * )& pmc_to_pmu ( pmc ) -> reprogram_pmi );
194
192
}
195
193
196
194
static unsigned find_arch_event (struct kvm_pmu * pmu , u8 event_select ,
@@ -233,7 +231,7 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
233
231
ARCH_PERFMON_EVENTSEL_CMASK |
234
232
HSW_IN_TX |
235
233
HSW_IN_TX_CHECKPOINTED ))) {
236
- config = find_arch_event (& pmc -> vcpu -> arch . pmu , event_select ,
234
+ config = find_arch_event (pmc_to_pmu ( pmc ) , event_select ,
237
235
unit_mask );
238
236
if (config != PERF_COUNT_HW_MAX )
239
237
type = PERF_TYPE_HARDWARE ;
@@ -318,7 +316,7 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
318
316
319
317
bool kvm_pmu_is_valid_msr (struct kvm_vcpu * vcpu , u32 msr )
320
318
{
321
- struct kvm_pmu * pmu = & vcpu -> arch . pmu ;
319
+ struct kvm_pmu * pmu = vcpu_to_pmu ( vcpu ) ;
322
320
int ret ;
323
321
324
322
switch (msr ) {
@@ -339,7 +337,7 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
339
337
340
338
int kvm_pmu_get_msr (struct kvm_vcpu * vcpu , u32 index , u64 * data )
341
339
{
342
- struct kvm_pmu * pmu = & vcpu -> arch . pmu ;
340
+ struct kvm_pmu * pmu = vcpu_to_pmu ( vcpu ) ;
343
341
struct kvm_pmc * pmc ;
344
342
345
343
switch (index ) {
@@ -370,7 +368,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
370
368
371
369
int kvm_pmu_set_msr (struct kvm_vcpu * vcpu , struct msr_data * msr_info )
372
370
{
373
- struct kvm_pmu * pmu = & vcpu -> arch . pmu ;
371
+ struct kvm_pmu * pmu = vcpu_to_pmu ( vcpu ) ;
374
372
struct kvm_pmc * pmc ;
375
373
u32 index = msr_info -> index ;
376
374
u64 data = msr_info -> data ;
@@ -427,7 +425,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
427
425
428
426
int kvm_pmu_is_valid_msr_idx (struct kvm_vcpu * vcpu , unsigned pmc )
429
427
{
430
- struct kvm_pmu * pmu = & vcpu -> arch . pmu ;
428
+ struct kvm_pmu * pmu = vcpu_to_pmu ( vcpu ) ;
431
429
bool fixed = pmc & (1u << 30 );
432
430
pmc &= ~(3u << 30 );
433
431
return (!fixed && pmc >= pmu -> nr_arch_gp_counters ) ||
@@ -436,7 +434,7 @@ int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned pmc)
436
434
437
435
int kvm_pmu_rdpmc (struct kvm_vcpu * vcpu , unsigned pmc , u64 * data )
438
436
{
439
- struct kvm_pmu * pmu = & vcpu -> arch . pmu ;
437
+ struct kvm_pmu * pmu = vcpu_to_pmu ( vcpu ) ;
440
438
bool fast_mode = pmc & (1u << 31 );
441
439
bool fixed = pmc & (1u << 30 );
442
440
struct kvm_pmc * counters ;
@@ -458,7 +456,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
458
456
459
457
void kvm_pmu_refresh (struct kvm_vcpu * vcpu )
460
458
{
461
- struct kvm_pmu * pmu = & vcpu -> arch . pmu ;
459
+ struct kvm_pmu * pmu = vcpu_to_pmu ( vcpu ) ;
462
460
struct kvm_cpuid_entry2 * entry ;
463
461
union cpuid10_eax eax ;
464
462
union cpuid10_edx edx ;
@@ -510,7 +508,7 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
510
508
void kvm_pmu_init (struct kvm_vcpu * vcpu )
511
509
{
512
510
int i ;
513
- struct kvm_pmu * pmu = & vcpu -> arch . pmu ;
511
+ struct kvm_pmu * pmu = vcpu_to_pmu ( vcpu ) ;
514
512
515
513
memset (pmu , 0 , sizeof (* pmu ));
516
514
for (i = 0 ; i < INTEL_PMC_MAX_GENERIC ; i ++ ) {
@@ -529,7 +527,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
529
527
530
528
void kvm_pmu_reset (struct kvm_vcpu * vcpu )
531
529
{
532
- struct kvm_pmu * pmu = & vcpu -> arch . pmu ;
530
+ struct kvm_pmu * pmu = vcpu_to_pmu ( vcpu ) ;
533
531
int i ;
534
532
535
533
irq_work_sync (& pmu -> irq_work );
@@ -553,7 +551,7 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
553
551
554
552
void kvm_pmu_handle_event (struct kvm_vcpu * vcpu )
555
553
{
556
- struct kvm_pmu * pmu = & vcpu -> arch . pmu ;
554
+ struct kvm_pmu * pmu = vcpu_to_pmu ( vcpu ) ;
557
555
u64 bitmask ;
558
556
int bit ;
559
557
0 commit comments