Skip to content

Commit e84cfe4

Browse files
Wei Huangbonzini
authored andcommitted
KVM: x86/vPMU: whitespace and stylistic adjustments in PMU code
Signed-off-by: Wei Huang <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 212dba1 commit e84cfe4

File tree

1 file changed

+64
-48
lines changed

1 file changed

+64
-48
lines changed

arch/x86/kvm/pmu.c

Lines changed: 64 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -103,25 +103,31 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
103103
{
104104
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
105105
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
106-
if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
106+
107+
if (!test_and_set_bit(pmc->idx,
108+
(unsigned long *)&pmu->reprogram_pmi)) {
107109
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
108110
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
109111
}
110112
}
111113

112114
static void kvm_perf_overflow_intr(struct perf_event *perf_event,
113-
struct perf_sample_data *data, struct pt_regs *regs)
115+
struct perf_sample_data *data,
116+
struct pt_regs *regs)
114117
{
115118
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
116119
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
117-
if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
120+
121+
if (!test_and_set_bit(pmc->idx,
122+
(unsigned long *)&pmu->reprogram_pmi)) {
118123
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
119124
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
125+
120126
/*
121127
* Inject PMI. If vcpu was in a guest mode during NMI PMI
122128
* can be ejected on a guest mode re-entry. Otherwise we can't
123129
* be sure that vcpu wasn't executing hlt instruction at the
124-
* time of vmexit and is not going to re-enter guest mode until,
130+
* time of vmexit and is not going to re-enter guest mode until
125131
* woken up. So we should wake it, but this is impossible from
126132
* NMI context. Do it from irq work instead.
127133
*/
@@ -157,8 +163,9 @@ static void pmc_stop_counter(struct kvm_pmc *pmc)
157163
}
158164

159165
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
160-
unsigned config, bool exclude_user, bool exclude_kernel,
161-
bool intr, bool in_tx, bool in_tx_cp)
166+
unsigned config, bool exclude_user,
167+
bool exclude_kernel, bool intr,
168+
bool in_tx, bool in_tx_cp)
162169
{
163170
struct perf_event *event;
164171
struct perf_event_attr attr = {
@@ -171,6 +178,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
171178
.exclude_kernel = exclude_kernel,
172179
.config = config,
173180
};
181+
174182
if (in_tx)
175183
attr.config |= HSW_IN_TX;
176184
if (in_tx_cp)
@@ -182,8 +190,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
182190
intr ? kvm_perf_overflow_intr :
183191
kvm_perf_overflow, pmc);
184192
if (IS_ERR(event)) {
185-
printk_once("kvm: pmu event creation failed %ld\n",
186-
PTR_ERR(event));
193+
printk_once("kvm_pmu: event creation failed %ld\n",
194+
PTR_ERR(event));
187195
return;
188196
}
189197

@@ -227,10 +235,10 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
227235
unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
228236

229237
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
230-
ARCH_PERFMON_EVENTSEL_INV |
231-
ARCH_PERFMON_EVENTSEL_CMASK |
232-
HSW_IN_TX |
233-
HSW_IN_TX_CHECKPOINTED))) {
238+
ARCH_PERFMON_EVENTSEL_INV |
239+
ARCH_PERFMON_EVENTSEL_CMASK |
240+
HSW_IN_TX |
241+
HSW_IN_TX_CHECKPOINTED))) {
234242
config = find_arch_event(pmc_to_pmu(pmc), event_select,
235243
unit_mask);
236244
if (config != PERF_COUNT_HW_MAX)
@@ -241,28 +249,28 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
241249
config = eventsel & X86_RAW_EVENT_MASK;
242250

243251
pmc_reprogram_counter(pmc, type, config,
244-
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
245-
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
246-
eventsel & ARCH_PERFMON_EVENTSEL_INT,
247-
(eventsel & HSW_IN_TX),
248-
(eventsel & HSW_IN_TX_CHECKPOINTED));
252+
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
253+
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
254+
eventsel & ARCH_PERFMON_EVENTSEL_INT,
255+
(eventsel & HSW_IN_TX),
256+
(eventsel & HSW_IN_TX_CHECKPOINTED));
249257
}
250258

251-
static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
259+
static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
252260
{
253-
unsigned en = en_pmi & 0x3;
254-
bool pmi = en_pmi & 0x8;
261+
unsigned en_field = ctrl & 0x3;
262+
bool pmi = ctrl & 0x8;
255263

256264
pmc_stop_counter(pmc);
257265

258-
if (!en || !pmc_is_enabled(pmc))
266+
if (!en_field || !pmc_is_enabled(pmc))
259267
return;
260268

261269
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
262-
arch_events[fixed_pmc_events[idx]].event_type,
263-
!(en & 0x2), /* exclude user */
264-
!(en & 0x1), /* exclude kernel */
265-
pmi, false, false);
270+
arch_events[fixed_pmc_events[idx]].event_type,
271+
!(en_field & 0x2), /* exclude user */
272+
!(en_field & 0x1), /* exclude kernel */
273+
pmi, false, false);
266274
}
267275

268276
static inline u8 fixed_ctrl_field(u64 ctrl, int idx)
@@ -275,31 +283,33 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
275283
int i;
276284

277285
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
278-
u8 en_pmi = fixed_ctrl_field(data, i);
286+
u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
287+
u8 new_ctrl = fixed_ctrl_field(data, i);
279288
struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
280289

281-
if (fixed_ctrl_field(pmu->fixed_ctr_ctrl, i) == en_pmi)
290+
if (old_ctrl == new_ctrl)
282291
continue;
283292

284-
reprogram_fixed_counter(pmc, en_pmi, i);
293+
reprogram_fixed_counter(pmc, new_ctrl, i);
285294
}
286295

287296
pmu->fixed_ctr_ctrl = data;
288297
}
289298

290-
static void reprogram_counter(struct kvm_pmu *pmu, int idx)
299+
static void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
291300
{
292-
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
301+
struct kvm_pmc *pmc = global_idx_to_pmc(pmu, pmc_idx);
293302

294303
if (!pmc)
295304
return;
296305

297306
if (pmc_is_gp(pmc))
298307
reprogram_gp_counter(pmc, pmc->eventsel);
299308
else {
300-
int fidx = idx - INTEL_PMC_IDX_FIXED;
301-
reprogram_fixed_counter(pmc,
302-
fixed_ctrl_field(pmu->fixed_ctr_ctrl, fidx), fidx);
309+
int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
310+
u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
311+
312+
reprogram_fixed_counter(pmc, ctrl, idx);
303313
}
304314
}
305315

@@ -423,37 +433,43 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
423433
return 1;
424434
}
425435

426-
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned pmc)
436+
/* check if idx is a valid index to access PMU */
437+
int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
427438
{
428439
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
429-
bool fixed = pmc & (1u << 30);
430-
pmc &= ~(3u << 30);
431-
return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
432-
(fixed && pmc >= pmu->nr_arch_fixed_counters);
440+
bool fixed = idx & (1u << 30);
441+
idx &= ~(3u << 30);
442+
return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
443+
(fixed && idx >= pmu->nr_arch_fixed_counters);
433444
}
434445

435-
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
446+
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
436447
{
437448
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
438-
bool fast_mode = pmc & (1u << 31);
439-
bool fixed = pmc & (1u << 30);
449+
bool fast_mode = idx & (1u << 31);
450+
bool fixed = idx & (1u << 30);
440451
struct kvm_pmc *counters;
441-
u64 ctr;
452+
u64 ctr_val;
442453

443-
pmc &= ~(3u << 30);
444-
if (!fixed && pmc >= pmu->nr_arch_gp_counters)
454+
idx &= ~(3u << 30);
455+
if (!fixed && idx >= pmu->nr_arch_gp_counters)
445456
return 1;
446-
if (fixed && pmc >= pmu->nr_arch_fixed_counters)
457+
if (fixed && idx >= pmu->nr_arch_fixed_counters)
447458
return 1;
448459
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
449-
ctr = pmc_read_counter(&counters[pmc]);
460+
461+
ctr_val = pmc_read_counter(&counters[idx]);
450462
if (fast_mode)
451-
ctr = (u32)ctr;
452-
*data = ctr;
463+
ctr_val = (u32)ctr_val;
453464

465+
*data = ctr_val;
454466
return 0;
455467
}
456468

469+
/* refresh PMU settings. This function generally is called when underlying
470+
* settings are changed (such as changes of PMU CPUID by guest VMs), which
471+
* should rarely happen.
472+
*/
457473
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
458474
{
459475
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);

0 commit comments

Comments
 (0)