Skip to content

Commit 8d404c4

Browse files
chazyMarc Zyngier
authored andcommitted
KVM: arm64: Rewrite system register accessors to read/write functions
Currently we access the system registers array via the vcpu_sys_reg() macro. However, we are about to change the behavior to some times modify the register file directly, so let's change this to two primitives: * Accessor macros vcpu_write_sys_reg() and vcpu_read_sys_reg() * Direct array access macro __vcpu_sys_reg() The accessor macros should be used in places where the code needs to access the currently loaded VCPU's state as observed by the guest. For example, when trapping on cache related registers, a write to a system register should go directly to the VCPU version of the register. The direct array access macro can be used in places where the VCPU is known to never be running (for example userspace access) or for registers which are never context switched (for example all the PMU system registers). This rewrites all users of vcpu_sys_regs to one of the macros described above. No functional change. Acked-by: Marc Zyngier <[email protected]> Reviewed-by: Andrew Jones <[email protected]> Signed-off-by: Christoffer Dall <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent 52f6c4f commit 8d404c4

File tree

9 files changed

+101
-76
lines changed

9 files changed

+101
-76
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -290,23 +290,26 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
290290

291291
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
292292
{
293-
return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
293+
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
294294
}
295295

296296
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
297297
{
298-
if (vcpu_mode_is_32bit(vcpu))
298+
if (vcpu_mode_is_32bit(vcpu)) {
299299
*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
300-
else
301-
vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
300+
} else {
301+
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
302+
sctlr |= (1 << 25);
303+
vcpu_write_sys_reg(vcpu, SCTLR_EL1, sctlr);
304+
}
302305
}
303306

304307
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
305308
{
306309
if (vcpu_mode_is_32bit(vcpu))
307310
return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
308311

309-
return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
312+
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
310313
}
311314

312315
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,

arch/arm64/include/asm/kvm_host.h

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,18 @@ struct kvm_vcpu_arch {
287287
};
288288

289289
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
290-
#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
290+
291+
/*
292+
* Only use __vcpu_sys_reg if you know you want the memory backed version of a
293+
* register, and not the one most recently accessed by a running VCPU. For
294+
* example, for userspace access or for system registers that are never context
295+
* switched, but only emulated.
296+
*/
297+
#define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
298+
299+
#define vcpu_read_sys_reg(v,r) __vcpu_sys_reg(v,r)
300+
#define vcpu_write_sys_reg(v,n,r) do { __vcpu_sys_reg(v,r) = n; } while (0)
301+
291302
/*
292303
* CP14 and CP15 live in the same array, as they are backed by the
293304
* same system registers.

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ struct kvm;
249249

250250
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
251251
{
252-
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
252+
return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
253253
}
254254

255255
static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)

arch/arm64/kvm/debug.c

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -46,18 +46,22 @@ static DEFINE_PER_CPU(u32, mdcr_el2);
4646
*/
4747
static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
4848
{
49-
vcpu->arch.guest_debug_preserved.mdscr_el1 = vcpu_sys_reg(vcpu, MDSCR_EL1);
49+
u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
50+
51+
vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
5052

5153
trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
5254
vcpu->arch.guest_debug_preserved.mdscr_el1);
5355
}
5456

5557
static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
5658
{
57-
vcpu_sys_reg(vcpu, MDSCR_EL1) = vcpu->arch.guest_debug_preserved.mdscr_el1;
59+
u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
60+
61+
vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
5862

5963
trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
60-
vcpu_sys_reg(vcpu, MDSCR_EL1));
64+
vcpu_read_sys_reg(vcpu, MDSCR_EL1));
6165
}
6266

6367
/**
@@ -108,6 +112,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
108112
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
109113
{
110114
bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
115+
unsigned long mdscr;
111116

112117
trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
113118

@@ -152,9 +157,13 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
152157
*/
153158
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
154159
*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
155-
vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_SS;
160+
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
161+
mdscr |= DBG_MDSCR_SS;
162+
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
156163
} else {
157-
vcpu_sys_reg(vcpu, MDSCR_EL1) &= ~DBG_MDSCR_SS;
164+
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
165+
mdscr &= ~DBG_MDSCR_SS;
166+
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
158167
}
159168

160169
trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
@@ -170,7 +179,9 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
170179
*/
171180
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
172181
/* Enable breakpoints/watchpoints */
173-
vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_MDE;
182+
mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
183+
mdscr |= DBG_MDSCR_MDE;
184+
vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
174185

175186
vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
176187
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
@@ -194,12 +205,11 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
194205
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
195206

196207
/* If KDE or MDE are set, perform a full save/restore cycle. */
197-
if ((vcpu_sys_reg(vcpu, MDSCR_EL1) & DBG_MDSCR_KDE) ||
198-
(vcpu_sys_reg(vcpu, MDSCR_EL1) & DBG_MDSCR_MDE))
208+
if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
199209
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
200210

201211
trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
202-
trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_sys_reg(vcpu, MDSCR_EL1));
212+
trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
203213
}
204214

205215
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)

arch/arm64/kvm/inject_fault.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
5858
exc_offset = LOWER_EL_AArch32_VECTOR;
5959
}
6060

61-
return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
61+
return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
6262
}
6363

6464
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
@@ -73,7 +73,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
7373
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
7474
*vcpu_spsr(vcpu) = cpsr;
7575

76-
vcpu_sys_reg(vcpu, FAR_EL1) = addr;
76+
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
7777

7878
/*
7979
* Build an {i,d}abort, depending on the level and the
@@ -94,7 +94,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
9494
if (!is_iabt)
9595
esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
9696

97-
vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
97+
vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
9898
}
9999

100100
static void inject_undef64(struct kvm_vcpu *vcpu)
@@ -115,7 +115,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
115115
if (kvm_vcpu_trap_il_is32bit(vcpu))
116116
esr |= ESR_ELx_IL;
117117

118-
vcpu_sys_reg(vcpu, ESR_EL1) = esr;
118+
vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
119119
}
120120

121121
/**

arch/arm64/kvm/sys_regs.c

Lines changed: 35 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -133,14 +133,14 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
133133
if (!p->is_aarch32 || !p->is_32bit) {
134134
val = p->regval;
135135
} else {
136-
val = vcpu_sys_reg(vcpu, reg);
136+
val = vcpu_read_sys_reg(vcpu, reg);
137137
if (r->reg % 2)
138138
val = (p->regval << 32) | (u64)lower_32_bits(val);
139139
else
140140
val = ((u64)upper_32_bits(val) << 32) |
141141
lower_32_bits(p->regval);
142142
}
143-
vcpu_sys_reg(vcpu, reg) = val;
143+
vcpu_write_sys_reg(vcpu, val, reg);
144144

145145
kvm_toggle_cache(vcpu, was_enabled);
146146
return true;
@@ -249,10 +249,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
249249
const struct sys_reg_desc *r)
250250
{
251251
if (p->is_write) {
252-
vcpu_sys_reg(vcpu, r->reg) = p->regval;
252+
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
253253
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
254254
} else {
255-
p->regval = vcpu_sys_reg(vcpu, r->reg);
255+
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
256256
}
257257

258258
trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
@@ -465,7 +465,8 @@ static void reset_wcr(struct kvm_vcpu *vcpu,
465465

466466
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
467467
{
468-
vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
468+
u64 amair = read_sysreg(amair_el1);
469+
vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
469470
}
470471

471472
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
@@ -482,7 +483,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
482483
mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
483484
mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
484485
mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
485-
vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
486+
vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
486487
}
487488

488489
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
@@ -496,12 +497,12 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
496497
*/
497498
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
498499
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
499-
vcpu_sys_reg(vcpu, PMCR_EL0) = val;
500+
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
500501
}
501502

502503
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
503504
{
504-
u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
505+
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
505506
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
506507

507508
if (!enabled)
@@ -543,14 +544,14 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
543544

544545
if (p->is_write) {
545546
/* Only update writeable bits of PMCR */
546-
val = vcpu_sys_reg(vcpu, PMCR_EL0);
547+
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
547548
val &= ~ARMV8_PMU_PMCR_MASK;
548549
val |= p->regval & ARMV8_PMU_PMCR_MASK;
549-
vcpu_sys_reg(vcpu, PMCR_EL0) = val;
550+
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
550551
kvm_pmu_handle_pmcr(vcpu, val);
551552
} else {
552553
/* PMCR.P & PMCR.C are RAZ */
553-
val = vcpu_sys_reg(vcpu, PMCR_EL0)
554+
val = __vcpu_sys_reg(vcpu, PMCR_EL0)
554555
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
555556
p->regval = val;
556557
}
@@ -568,10 +569,10 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
568569
return false;
569570

570571
if (p->is_write)
571-
vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
572+
__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
572573
else
573574
/* return PMSELR.SEL field */
574-
p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
575+
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
575576
& ARMV8_PMU_COUNTER_MASK;
576577

577578
return true;
@@ -604,7 +605,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
604605
{
605606
u64 pmcr, val;
606607

607-
pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
608+
pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
608609
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
609610
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
610611
kvm_inject_undefined(vcpu);
@@ -629,7 +630,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
629630
if (pmu_access_event_counter_el0_disabled(vcpu))
630631
return false;
631632

632-
idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
633+
idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
633634
& ARMV8_PMU_COUNTER_MASK;
634635
} else if (r->Op2 == 0) {
635636
/* PMCCNTR_EL0 */
@@ -684,7 +685,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
684685

685686
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
686687
/* PMXEVTYPER_EL0 */
687-
idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
688+
idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
688689
reg = PMEVTYPER0_EL0 + idx;
689690
} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
690691
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
@@ -702,9 +703,9 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
702703

703704
if (p->is_write) {
704705
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
705-
vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
706+
__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
706707
} else {
707-
p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
708+
p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
708709
}
709710

710711
return true;
@@ -726,15 +727,15 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
726727
val = p->regval & mask;
727728
if (r->Op2 & 0x1) {
728729
/* accessing PMCNTENSET_EL0 */
729-
vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
730+
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
730731
kvm_pmu_enable_counter(vcpu, val);
731732
} else {
732733
/* accessing PMCNTENCLR_EL0 */
733-
vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
734+
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
734735
kvm_pmu_disable_counter(vcpu, val);
735736
}
736737
} else {
737-
p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
738+
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
738739
}
739740

740741
return true;
@@ -758,12 +759,12 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
758759

759760
if (r->Op2 & 0x1)
760761
/* accessing PMINTENSET_EL1 */
761-
vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
762+
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
762763
else
763764
/* accessing PMINTENCLR_EL1 */
764-
vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
765+
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
765766
} else {
766-
p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
767+
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
767768
}
768769

769770
return true;
@@ -783,12 +784,12 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
783784
if (p->is_write) {
784785
if (r->CRm & 0x2)
785786
/* accessing PMOVSSET_EL0 */
786-
vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
787+
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
787788
else
788789
/* accessing PMOVSCLR_EL0 */
789-
vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
790+
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
790791
} else {
791-
p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
792+
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
792793
}
793794

794795
return true;
@@ -825,10 +826,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
825826
return false;
826827
}
827828

828-
vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
829-
& ARMV8_PMU_USERENR_MASK;
829+
__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
830+
p->regval & ARMV8_PMU_USERENR_MASK;
830831
} else {
831-
p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
832+
p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
832833
& ARMV8_PMU_USERENR_MASK;
833834
}
834835

@@ -2230,7 +2231,7 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
22302231
if (r->get_user)
22312232
return (r->get_user)(vcpu, r, reg, uaddr);
22322233

2233-
return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
2234+
return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
22342235
}
22352236

22362237
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@@ -2251,7 +2252,7 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
22512252
if (r->set_user)
22522253
return (r->set_user)(vcpu, r, reg, uaddr);
22532254

2254-
return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2255+
return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
22552256
}
22562257

22572258
static unsigned int num_demux_regs(void)
@@ -2457,6 +2458,6 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
24572458
reset_sys_reg_descs(vcpu, table, num);
24582459

24592460
for (num = 1; num < NR_SYS_REGS; num++)
2460-
if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2461-
panic("Didn't reset vcpu_sys_reg(%zi)", num);
2461+
if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2462+
panic("Didn't reset __vcpu_sys_reg(%zi)", num);
24622463
}

0 commit comments

Comments
 (0)