Skip to content

Commit 256c096

Browse files
mrutland-armwildea01
authored andcommitted
kvm/arm: use PSR_AA32 definitions
Some code cares about the SPSR_ELx format for exceptions taken from AArch32 to inspect or manipulate the SPSR_ELx value, which is already in the SPSR_ELx format, and not in the AArch32 PSR format. To separate these from cases where we care about the AArch32 PSR format, migrate these cases to use the PSR_AA32_* definitions rather than COMPAT_PSR_*. There should be no functional change as a result of this patch. Note that arm64 KVM does not support a compat KVM API, and always uses the SPSR_ELx format, even for AArch32 guests. Signed-off-by: Mark Rutland <[email protected]> Acked-by: Christoffer Dall <[email protected]> Acked-by: Marc Zyngier <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent d64567f commit 256c096

File tree

7 files changed

+43
-43
lines changed

7 files changed

+43
-43
lines changed

arch/arm/include/asm/kvm_emulate.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,13 @@
2626
#include <asm/cputype.h>
2727

2828
/* arm64 compatibility macros */
29-
#define COMPAT_PSR_MODE_ABT ABT_MODE
30-
#define COMPAT_PSR_MODE_UND UND_MODE
31-
#define COMPAT_PSR_T_BIT PSR_T_BIT
32-
#define COMPAT_PSR_I_BIT PSR_I_BIT
33-
#define COMPAT_PSR_A_BIT PSR_A_BIT
34-
#define COMPAT_PSR_E_BIT PSR_E_BIT
35-
#define COMPAT_PSR_IT_MASK PSR_IT_MASK
29+
#define PSR_AA32_MODE_ABT ABT_MODE
30+
#define PSR_AA32_MODE_UND UND_MODE
31+
#define PSR_AA32_T_BIT PSR_T_BIT
32+
#define PSR_AA32_I_BIT PSR_I_BIT
33+
#define PSR_AA32_A_BIT PSR_A_BIT
34+
#define PSR_AA32_E_BIT PSR_E_BIT
35+
#define PSR_AA32_IT_MASK PSR_IT_MASK
3636

3737
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
3838

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
140140

141141
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
142142
{
143-
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
143+
*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
144144
}
145145

146146
/*
@@ -190,8 +190,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
190190
u32 mode;
191191

192192
if (vcpu_mode_is_32bit(vcpu)) {
193-
mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
194-
return mode > COMPAT_PSR_MODE_USR;
193+
mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
194+
return mode > PSR_AA32_MODE_USR;
195195
}
196196

197197
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
@@ -329,7 +329,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
329329
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
330330
{
331331
if (vcpu_mode_is_32bit(vcpu)) {
332-
*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
332+
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
333333
} else {
334334
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
335335
sctlr |= (1 << 25);
@@ -340,7 +340,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
340340
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
341341
{
342342
if (vcpu_mode_is_32bit(vcpu))
343-
return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
343+
return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
344344

345345
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
346346
}

arch/arm64/kvm/guest.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -107,14 +107,14 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
107107
}
108108

109109
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
110-
u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
110+
u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
111111
switch (mode) {
112-
case COMPAT_PSR_MODE_USR:
113-
case COMPAT_PSR_MODE_FIQ:
114-
case COMPAT_PSR_MODE_IRQ:
115-
case COMPAT_PSR_MODE_SVC:
116-
case COMPAT_PSR_MODE_ABT:
117-
case COMPAT_PSR_MODE_UND:
112+
case PSR_AA32_MODE_USR:
113+
case PSR_AA32_MODE_FIQ:
114+
case PSR_AA32_MODE_IRQ:
115+
case PSR_AA32_MODE_SVC:
116+
case PSR_AA32_MODE_ABT:
117+
case PSR_AA32_MODE_UND:
118118
case PSR_MODE_EL0t:
119119
case PSR_MODE_EL1t:
120120
case PSR_MODE_EL1h:

arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
2828
{
2929
if (vcpu_mode_is_32bit(vcpu))
30-
return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT);
30+
return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT);
3131

3232
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
3333
}

arch/arm64/kvm/regmap.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -112,22 +112,22 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
112112
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
113113
{
114114
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
115-
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
115+
unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
116116

117117
switch (mode) {
118-
case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
118+
case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
119119
mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
120120
break;
121121

122-
case COMPAT_PSR_MODE_ABT:
122+
case PSR_AA32_MODE_ABT:
123123
mode = 4;
124124
break;
125125

126-
case COMPAT_PSR_MODE_UND:
126+
case PSR_AA32_MODE_UND:
127127
mode = 5;
128128
break;
129129

130-
case COMPAT_PSR_MODE_SYS:
130+
case PSR_AA32_MODE_SYS:
131131
mode = 0; /* SYS maps to USR */
132132
break;
133133

@@ -143,13 +143,13 @@ unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
143143
*/
144144
static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
145145
{
146-
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
146+
unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
147147
switch (mode) {
148-
case COMPAT_PSR_MODE_SVC: return KVM_SPSR_SVC;
149-
case COMPAT_PSR_MODE_ABT: return KVM_SPSR_ABT;
150-
case COMPAT_PSR_MODE_UND: return KVM_SPSR_UND;
151-
case COMPAT_PSR_MODE_IRQ: return KVM_SPSR_IRQ;
152-
case COMPAT_PSR_MODE_FIQ: return KVM_SPSR_FIQ;
148+
case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC;
149+
case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT;
150+
case PSR_AA32_MODE_UND: return KVM_SPSR_UND;
151+
case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ;
152+
case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ;
153153
default: BUG();
154154
}
155155
}

arch/arm64/kvm/reset.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ static const struct kvm_regs default_regs_reset = {
4242
};
4343

4444
static const struct kvm_regs default_regs_reset32 = {
45-
.regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
46-
COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
45+
.regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
46+
PSR_AA32_I_BIT | PSR_AA32_F_BIT),
4747
};
4848

4949
static bool cpu_has_32bit_el1(void)

virt/kvm/arm/aarch32.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,9 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
108108
{
109109
unsigned long itbits, cond;
110110
unsigned long cpsr = *vcpu_cpsr(vcpu);
111-
bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
111+
bool is_arm = !(cpsr & PSR_AA32_T_BIT);
112112

113-
if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK))
113+
if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
114114
return;
115115

116116
cond = (cpsr & 0xe000) >> 13;
@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
123123
else
124124
itbits = (itbits << 1) & 0x1f;
125125

126-
cpsr &= ~COMPAT_PSR_IT_MASK;
126+
cpsr &= ~PSR_AA32_IT_MASK;
127127
cpsr |= cond << 13;
128128
cpsr |= (itbits & 0x1c) << (10 - 2);
129129
cpsr |= (itbits & 0x3) << 25;
@@ -138,7 +138,7 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
138138
{
139139
bool is_thumb;
140140

141-
is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
141+
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
142142
if (is_thumb && !is_wide_instr)
143143
*vcpu_pc(vcpu) += 2;
144144
else
@@ -164,16 +164,16 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
164164
{
165165
unsigned long cpsr;
166166
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
167-
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
167+
bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
168168
u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
169169
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
170170

171-
cpsr = mode | COMPAT_PSR_I_BIT;
171+
cpsr = mode | PSR_AA32_I_BIT;
172172

173173
if (sctlr & (1 << 30))
174-
cpsr |= COMPAT_PSR_T_BIT;
174+
cpsr |= PSR_AA32_T_BIT;
175175
if (sctlr & (1 << 25))
176-
cpsr |= COMPAT_PSR_E_BIT;
176+
cpsr |= PSR_AA32_E_BIT;
177177

178178
*vcpu_cpsr(vcpu) = cpsr;
179179

@@ -192,7 +192,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
192192

193193
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
194194
{
195-
prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
195+
prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
196196
}
197197

198198
/*
@@ -216,7 +216,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
216216
fsr = &vcpu_cp15(vcpu, c5_DFSR);
217217
}
218218

219-
prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
219+
prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
220220

221221
*far = addr;
222222

0 commit comments

Comments
 (0)