Skip to content

Commit ce360c2

Browse files
committed
Merge tag 'kvmarm-fixes-6.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 6.16, take #2 - Rework of system register accessors for system registers that are directly writen to memory, so that sanitisation of the in-memory value happens at the correct time (after the read, or before the write). For convenience, RMW-style accessors are also provided. - Multiple fixes for the so-called "arch-timer-edge-cases' selftest, which was always broken.
2 parents 19272b3 + fad4cf9 commit ce360c2

File tree

16 files changed

+151
-116
lines changed

16 files changed

+151
-116
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 27 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1107,14 +1107,36 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
11071107
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
11081108

11091109
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
1110+
1111+
#define __vcpu_assign_sys_reg(v, r, val) \
1112+
do { \
1113+
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
1114+
u64 __v = (val); \
1115+
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
1116+
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
1117+
\
1118+
ctxt_sys_reg(ctxt, (r)) = __v; \
1119+
} while (0)
1120+
1121+
#define __vcpu_rmw_sys_reg(v, r, op, val) \
1122+
do { \
1123+
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
1124+
u64 __v = ctxt_sys_reg(ctxt, (r)); \
1125+
__v op (val); \
1126+
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
1127+
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
1128+
\
1129+
ctxt_sys_reg(ctxt, (r)) = __v; \
1130+
} while (0)
1131+
11101132
#define __vcpu_sys_reg(v,r) \
1111-
(*({ \
1133+
({ \
11121134
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
1113-
u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
1135+
u64 __v = ctxt_sys_reg(ctxt, (r)); \
11141136
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
1115-
*__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\
1116-
__r; \
1117-
}))
1137+
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
1138+
__v; \
1139+
})
11181140

11191141
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
11201142
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);

arch/arm64/kvm/arch_timer.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -108,16 +108,16 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
108108

109109
switch(arch_timer_ctx_index(ctxt)) {
110110
case TIMER_VTIMER:
111-
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
111+
__vcpu_assign_sys_reg(vcpu, CNTV_CTL_EL0, ctl);
112112
break;
113113
case TIMER_PTIMER:
114-
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
114+
__vcpu_assign_sys_reg(vcpu, CNTP_CTL_EL0, ctl);
115115
break;
116116
case TIMER_HVTIMER:
117-
__vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
117+
__vcpu_assign_sys_reg(vcpu, CNTHV_CTL_EL2, ctl);
118118
break;
119119
case TIMER_HPTIMER:
120-
__vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
120+
__vcpu_assign_sys_reg(vcpu, CNTHP_CTL_EL2, ctl);
121121
break;
122122
default:
123123
WARN_ON(1);
@@ -130,16 +130,16 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
130130

131131
switch(arch_timer_ctx_index(ctxt)) {
132132
case TIMER_VTIMER:
133-
__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
133+
__vcpu_assign_sys_reg(vcpu, CNTV_CVAL_EL0, cval);
134134
break;
135135
case TIMER_PTIMER:
136-
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
136+
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, cval);
137137
break;
138138
case TIMER_HVTIMER:
139-
__vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
139+
__vcpu_assign_sys_reg(vcpu, CNTHV_CVAL_EL2, cval);
140140
break;
141141
case TIMER_HPTIMER:
142-
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
142+
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, cval);
143143
break;
144144
default:
145145
WARN_ON(1);
@@ -1036,7 +1036,7 @@ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
10361036
if (vcpu_has_nv(vcpu)) {
10371037
struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
10381038

1039-
offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
1039+
offs->vcpu_offset = __ctxt_sys_reg(&vcpu->arch.ctxt, CNTVOFF_EL2);
10401040
offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
10411041
}
10421042

arch/arm64/kvm/debug.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -216,9 +216,9 @@ void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
216216
void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
217217
{
218218
if (val & OSLAR_EL1_OSLK)
219-
__vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK;
219+
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
220220
else
221-
__vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK;
221+
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
222222

223223
preempt_disable();
224224
kvm_arch_vcpu_put(vcpu);

arch/arm64/kvm/fpsimd.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,8 +103,8 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
103103
fp_state.sve_state = vcpu->arch.sve_state;
104104
fp_state.sve_vl = vcpu->arch.sve_max_vl;
105105
fp_state.sme_state = NULL;
106-
fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR);
107-
fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR);
106+
fp_state.svcr = __ctxt_sys_reg(&vcpu->arch.ctxt, SVCR);
107+
fp_state.fpmr = __ctxt_sys_reg(&vcpu->arch.ctxt, FPMR);
108108
fp_state.fp_type = &vcpu->arch.fp_type;
109109

110110
if (vcpu_has_sve(vcpu))

arch/arm64/kvm/hyp/exception.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
3737
if (unlikely(vcpu_has_nv(vcpu)))
3838
vcpu_write_sys_reg(vcpu, val, reg);
3939
else if (!__vcpu_write_sys_reg_to_cpu(val, reg))
40-
__vcpu_sys_reg(vcpu, reg) = val;
40+
__vcpu_assign_sys_reg(vcpu, reg, val);
4141
}
4242

4343
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
@@ -51,7 +51,7 @@ static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
5151
} else if (has_vhe()) {
5252
write_sysreg_el1(val, SYS_SPSR);
5353
} else {
54-
__vcpu_sys_reg(vcpu, SPSR_EL1) = val;
54+
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, val);
5555
}
5656
}
5757

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
4545
if (!vcpu_el1_is_32bit(vcpu))
4646
return;
4747

48-
__vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
48+
__vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2));
4949
}
5050

5151
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
@@ -456,7 +456,7 @@ static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
456456
*/
457457
if (vcpu_has_sve(vcpu)) {
458458
zcr_el1 = read_sysreg_el1(SYS_ZCR);
459-
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
459+
__vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
460460

461461
/*
462462
* The guest's state is always saved using the guest's max VL.

arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -307,11 +307,11 @@ static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
307307
vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
308308
vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
309309

310-
__vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
311-
__vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
310+
__vcpu_assign_sys_reg(vcpu, DACR32_EL2, read_sysreg(dacr32_el2));
311+
__vcpu_assign_sys_reg(vcpu, IFSR32_EL2, read_sysreg(ifsr32_el2));
312312

313313
if (has_vhe() || kvm_debug_regs_in_use(vcpu))
314-
__vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
314+
__vcpu_assign_sys_reg(vcpu, DBGVCR32_EL2, read_sysreg(dbgvcr32_el2));
315315
}
316316

317317
static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
2626

2727
static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
2828
{
29-
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
29+
__vcpu_assign_sys_reg(vcpu, ZCR_EL1, read_sysreg_el1(SYS_ZCR));
3030
/*
3131
* On saving/restoring guest sve state, always use the maximum VL for
3232
* the guest. The layout of the data when saving the sve state depends
@@ -79,7 +79,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
7979

8080
has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
8181
if (has_fpmr)
82-
__vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
82+
__vcpu_assign_sys_reg(vcpu, FPMR, read_sysreg_s(SYS_FPMR));
8383

8484
if (system_supports_sve())
8585
__hyp_sve_restore_host();

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -223,9 +223,9 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
223223
*/
224224
val = read_sysreg_el0(SYS_CNTP_CVAL);
225225
if (map.direct_ptimer == vcpu_ptimer(vcpu))
226-
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
226+
__vcpu_assign_sys_reg(vcpu, CNTP_CVAL_EL0, val);
227227
if (map.direct_ptimer == vcpu_hptimer(vcpu))
228-
__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
228+
__vcpu_assign_sys_reg(vcpu, CNTHP_CVAL_EL2, val);
229229

230230
offset = read_sysreg_s(SYS_CNTPOFF_EL2);
231231

arch/arm64/kvm/hyp/vhe/sysreg-sr.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -18,17 +18,17 @@
1818
static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
1919
{
2020
/* These registers are common with EL1 */
21-
__vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1);
22-
__vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1);
23-
24-
__vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR);
25-
__vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0);
26-
__vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1);
27-
__vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR);
28-
__vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR);
29-
__vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR);
30-
__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR);
31-
__vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR);
21+
__vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1));
22+
__vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1));
23+
24+
__vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR));
25+
__vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0));
26+
__vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1));
27+
__vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR));
28+
__vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR));
29+
__vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR));
30+
__vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR));
31+
__vcpu_assign_sys_reg(vcpu, AMAIR_EL2, read_sysreg_el1(SYS_AMAIR));
3232

3333
/*
3434
* In VHE mode those registers are compatible between EL1 and EL2,
@@ -46,21 +46,21 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
4646
* are always trapped, ensuring that the in-memory
4747
* copy is always up-to-date. A small blessing...
4848
*/
49-
__vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR);
50-
__vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0);
51-
__vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1);
52-
__vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR);
49+
__vcpu_assign_sys_reg(vcpu, SCTLR_EL2, read_sysreg_el1(SYS_SCTLR));
50+
__vcpu_assign_sys_reg(vcpu, TTBR0_EL2, read_sysreg_el1(SYS_TTBR0));
51+
__vcpu_assign_sys_reg(vcpu, TTBR1_EL2, read_sysreg_el1(SYS_TTBR1));
52+
__vcpu_assign_sys_reg(vcpu, TCR_EL2, read_sysreg_el1(SYS_TCR));
5353

5454
if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
55-
__vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2);
55+
__vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2));
5656

5757
if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
58-
__vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0);
59-
__vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR);
58+
__vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0));
59+
__vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR));
6060
}
6161

6262
if (ctxt_has_s1poe(&vcpu->arch.ctxt))
63-
__vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR);
63+
__vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR));
6464
}
6565

6666
/*
@@ -70,13 +70,13 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
7070
*/
7171
val = read_sysreg_el1(SYS_CNTKCTL);
7272
val &= CNTKCTL_VALID_BITS;
73-
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS;
74-
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val;
73+
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS);
74+
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val);
7575
}
7676

77-
__vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1);
78-
__vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR);
79-
__vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR);
77+
__vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
78+
__vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR));
79+
__vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR));
8080
}
8181

8282
static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)

arch/arm64/kvm/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1757,7 +1757,7 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
17571757

17581758
out:
17591759
for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
1760-
(void)__vcpu_sys_reg(vcpu, sr);
1760+
__vcpu_rmw_sys_reg(vcpu, sr, |=, 0);
17611761

17621762
return 0;
17631763
}

arch/arm64/kvm/pmu-emul.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
178178
val |= lower_32_bits(val);
179179
}
180180

181-
__vcpu_sys_reg(vcpu, reg) = val;
181+
__vcpu_assign_sys_reg(vcpu, reg, val);
182182

183183
/* Recreate the perf event to reflect the updated sample_period */
184184
kvm_pmu_create_perf_event(pmc);
@@ -204,7 +204,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
204204
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
205205
{
206206
kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
207-
__vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val;
207+
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(select_idx), val);
208208
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
209209
}
210210

@@ -239,7 +239,7 @@ static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
239239

240240
reg = counter_index_to_reg(pmc->idx);
241241

242-
__vcpu_sys_reg(vcpu, reg) = val;
242+
__vcpu_assign_sys_reg(vcpu, reg, val);
243243

244244
kvm_pmu_release_perf_event(pmc);
245245
}
@@ -503,14 +503,14 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
503503
reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
504504
if (!kvm_pmc_is_64bit(pmc))
505505
reg = lower_32_bits(reg);
506-
__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
506+
__vcpu_assign_sys_reg(vcpu, counter_index_to_reg(i), reg);
507507

508508
/* No overflow? move on */
509509
if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
510510
continue;
511511

512512
/* Mark overflow */
513-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
513+
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i));
514514

515515
if (kvm_pmu_counter_can_chain(pmc))
516516
kvm_pmu_counter_increment(vcpu, BIT(i + 1),
@@ -556,7 +556,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
556556
perf_event->attr.sample_period = period;
557557
perf_event->hw.sample_period = period;
558558

559-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
559+
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx));
560560

561561
if (kvm_pmu_counter_can_chain(pmc))
562562
kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
@@ -602,7 +602,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
602602
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
603603

604604
/* The reset bits don't indicate any state, and shouldn't be saved. */
605-
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
605+
__vcpu_assign_sys_reg(vcpu, PMCR_EL0, (val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P)));
606606

607607
if (val & ARMV8_PMU_PMCR_C)
608608
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
@@ -779,7 +779,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
779779
u64 reg;
780780

781781
reg = counter_index_to_evtreg(pmc->idx);
782-
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
782+
__vcpu_assign_sys_reg(vcpu, reg, (data & kvm_pmu_evtyper_mask(vcpu->kvm)));
783783

784784
kvm_pmu_create_perf_event(pmc);
785785
}
@@ -914,9 +914,9 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
914914
{
915915
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
916916

917-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
918-
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
919-
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
917+
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask);
918+
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask);
919+
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask);
920920

921921
kvm_pmu_reprogram_counter_mask(vcpu, mask);
922922
}
@@ -1038,7 +1038,7 @@ static void kvm_arm_set_nr_counters(struct kvm *kvm, unsigned int nr)
10381038
u64 val = __vcpu_sys_reg(vcpu, MDCR_EL2);
10391039
val &= ~MDCR_EL2_HPMN;
10401040
val |= FIELD_PREP(MDCR_EL2_HPMN, kvm->arch.nr_pmu_counters);
1041-
__vcpu_sys_reg(vcpu, MDCR_EL2) = val;
1041+
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
10421042
}
10431043
}
10441044
}

0 commit comments

Comments
 (0)