Skip to content

Commit 74a64a9

Browse files
Marc Zyngierchazy
authored andcommitted
KVM: arm/arm64: Unify 32bit fault injection
Both arm and arm64 implementations are capable of injecting faults, and yet have completely divergent implementations, leading to different bugs and reduced maintainability. Let's elect the arm64 version as the canonical one and move it into aarch32.c, which is common to both architectures. Reviewed-by: Christoffer Dall <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Christoffer Dall <[email protected]>
1 parent 3eb4271 commit 74a64a9

File tree

5 files changed

+131
-218
lines changed

5 files changed

+131
-218
lines changed

arch/arm/include/asm/kvm_emulate.h

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,22 @@
2525
#include <asm/kvm_arm.h>
2626
#include <asm/cputype.h>
2727

28+
/* arm64 compatibility macros */
29+
#define COMPAT_PSR_MODE_ABT ABT_MODE
30+
#define COMPAT_PSR_MODE_UND UND_MODE
31+
#define COMPAT_PSR_T_BIT PSR_T_BIT
32+
#define COMPAT_PSR_I_BIT PSR_I_BIT
33+
#define COMPAT_PSR_A_BIT PSR_A_BIT
34+
#define COMPAT_PSR_E_BIT PSR_E_BIT
35+
#define COMPAT_PSR_IT_MASK PSR_IT_MASK
36+
2837
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
38+
39+
static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
40+
{
41+
return vcpu_reg(vcpu, reg_num);
42+
}
43+
2944
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
3045

3146
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
@@ -42,10 +57,25 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
4257

4358
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
4459
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
45-
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
60+
void kvm_inject_undef32(struct kvm_vcpu *vcpu);
61+
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
62+
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
4663
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
47-
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
48-
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
64+
65+
static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
66+
{
67+
kvm_inject_undef32(vcpu);
68+
}
69+
70+
static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
71+
{
72+
kvm_inject_dabt32(vcpu, addr);
73+
}
74+
75+
static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
76+
{
77+
kvm_inject_pabt32(vcpu, addr);
78+
}
4979

5080
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
5181
{

arch/arm/kvm/emulate.c

Lines changed: 0 additions & 139 deletions
Original file line numberDiff line numberDiff line change
@@ -165,145 +165,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
165165
* Inject exceptions into the guest
166166
*/
167167

168-
static u32 exc_vector_base(struct kvm_vcpu *vcpu)
169-
{
170-
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
171-
u32 vbar = vcpu_cp15(vcpu, c12_VBAR);
172-
173-
if (sctlr & SCTLR_V)
174-
return 0xffff0000;
175-
else /* always have security exceptions */
176-
return vbar;
177-
}
178-
179-
/*
180-
* Switch to an exception mode, updating both CPSR and SPSR. Follow
181-
* the logic described in AArch32.EnterMode() from the ARMv8 ARM.
182-
*/
183-
static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
184-
{
185-
unsigned long cpsr = *vcpu_cpsr(vcpu);
186-
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
187-
188-
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
189-
190-
switch (mode) {
191-
case FIQ_MODE:
192-
*vcpu_cpsr(vcpu) |= PSR_F_BIT;
193-
/* Fall through */
194-
case ABT_MODE:
195-
case IRQ_MODE:
196-
*vcpu_cpsr(vcpu) |= PSR_A_BIT;
197-
/* Fall through */
198-
default:
199-
*vcpu_cpsr(vcpu) |= PSR_I_BIT;
200-
}
201-
202-
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
203-
204-
if (sctlr & SCTLR_TE)
205-
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
206-
if (sctlr & SCTLR_EE)
207-
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
208-
209-
/* Note: These now point to the mode banked copies */
210-
*vcpu_spsr(vcpu) = cpsr;
211-
}
212-
213-
/**
214-
* kvm_inject_undefined - inject an undefined exception into the guest
215-
* @vcpu: The VCPU to receive the undefined exception
216-
*
217-
* It is assumed that this code is called from the VCPU thread and that the
218-
* VCPU therefore is not currently executing guest code.
219-
*
220-
* Modelled after TakeUndefInstrException() pseudocode.
221-
*/
222-
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
223-
{
224-
unsigned long cpsr = *vcpu_cpsr(vcpu);
225-
bool is_thumb = (cpsr & PSR_T_BIT);
226-
u32 vect_offset = 4;
227-
u32 return_offset = (is_thumb) ? 2 : 4;
228-
229-
kvm_update_psr(vcpu, UND_MODE);
230-
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
231-
232-
/* Branch to exception vector */
233-
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
234-
}
235-
236-
/*
237-
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
238-
* pseudocode.
239-
*/
240-
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
241-
{
242-
unsigned long cpsr = *vcpu_cpsr(vcpu);
243-
bool is_thumb = (cpsr & PSR_T_BIT);
244-
u32 vect_offset;
245-
u32 return_offset = (is_thumb) ? 4 : 0;
246-
bool is_lpae;
247-
248-
kvm_update_psr(vcpu, ABT_MODE);
249-
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
250-
251-
if (is_pabt)
252-
vect_offset = 12;
253-
else
254-
vect_offset = 16;
255-
256-
/* Branch to exception vector */
257-
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
258-
259-
if (is_pabt) {
260-
/* Set IFAR and IFSR */
261-
vcpu_cp15(vcpu, c6_IFAR) = addr;
262-
is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
263-
/* Always give debug fault for now - should give guest a clue */
264-
if (is_lpae)
265-
vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22;
266-
else
267-
vcpu_cp15(vcpu, c5_IFSR) = 2;
268-
} else { /* !iabt */
269-
/* Set DFAR and DFSR */
270-
vcpu_cp15(vcpu, c6_DFAR) = addr;
271-
is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
272-
/* Always give debug fault for now - should give guest a clue */
273-
if (is_lpae)
274-
vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22;
275-
else
276-
vcpu_cp15(vcpu, c5_DFSR) = 2;
277-
}
278-
279-
}
280-
281-
/**
282-
* kvm_inject_dabt - inject a data abort into the guest
283-
* @vcpu: The VCPU to receive the undefined exception
284-
* @addr: The address to report in the DFAR
285-
*
286-
* It is assumed that this code is called from the VCPU thread and that the
287-
* VCPU therefore is not currently executing guest code.
288-
*/
289-
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
290-
{
291-
inject_abt(vcpu, false, addr);
292-
}
293-
294-
/**
295-
* kvm_inject_pabt - inject a prefetch abort into the guest
296-
* @vcpu: The VCPU to receive the undefined exception
297-
* @addr: The address to report in the DFAR
298-
*
299-
* It is assumed that this code is called from the VCPU thread and that the
300-
* VCPU therefore is not currently executing guest code.
301-
*/
302-
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
303-
{
304-
inject_abt(vcpu, true, addr);
305-
}
306-
307168
/**
308169
* kvm_inject_vabt - inject an async abort / SError into the guest
309170
* @vcpu: The VCPU to receive the exception

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4141
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
4242
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
4343
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
44+
void kvm_inject_undef32(struct kvm_vcpu *vcpu);
45+
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
46+
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
4447

4548
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
4649
{

arch/arm64/kvm/inject_fault.c

Lines changed: 3 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -33,74 +33,6 @@
3333
#define LOWER_EL_AArch64_VECTOR 0x400
3434
#define LOWER_EL_AArch32_VECTOR 0x600
3535

36-
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
37-
{
38-
unsigned long cpsr;
39-
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
40-
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
41-
u32 return_offset = (is_thumb) ? 4 : 0;
42-
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
43-
44-
cpsr = mode | COMPAT_PSR_I_BIT;
45-
46-
if (sctlr & (1 << 30))
47-
cpsr |= COMPAT_PSR_T_BIT;
48-
if (sctlr & (1 << 25))
49-
cpsr |= COMPAT_PSR_E_BIT;
50-
51-
*vcpu_cpsr(vcpu) = cpsr;
52-
53-
/* Note: These now point to the banked copies */
54-
*vcpu_spsr(vcpu) = new_spsr_value;
55-
*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
56-
57-
/* Branch to exception vector */
58-
if (sctlr & (1 << 13))
59-
vect_offset += 0xffff0000;
60-
else /* always have security exceptions */
61-
vect_offset += vcpu_cp15(vcpu, c12_VBAR);
62-
63-
*vcpu_pc(vcpu) = vect_offset;
64-
}
65-
66-
static void inject_undef32(struct kvm_vcpu *vcpu)
67-
{
68-
prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
69-
}
70-
71-
/*
72-
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
73-
* pseudocode.
74-
*/
75-
static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
76-
unsigned long addr)
77-
{
78-
u32 vect_offset;
79-
u32 *far, *fsr;
80-
bool is_lpae;
81-
82-
if (is_pabt) {
83-
vect_offset = 12;
84-
far = &vcpu_cp15(vcpu, c6_IFAR);
85-
fsr = &vcpu_cp15(vcpu, c5_IFSR);
86-
} else { /* !iabt */
87-
vect_offset = 16;
88-
far = &vcpu_cp15(vcpu, c6_DFAR);
89-
fsr = &vcpu_cp15(vcpu, c5_DFSR);
90-
}
91-
92-
prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
93-
94-
*far = addr;
95-
96-
/* Give the guest an IMPLEMENTATION DEFINED exception */
97-
is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
98-
if (is_lpae)
99-
*fsr = 1 << 9 | 0x34;
100-
else
101-
*fsr = 0x14;
102-
}
103-
10436
enum exception_type {
10537
except_type_sync = 0,
10638
except_type_irq = 0x80,
@@ -197,7 +129,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
197129
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
198130
{
199131
if (!(vcpu->arch.hcr_el2 & HCR_RW))
200-
inject_abt32(vcpu, false, addr);
132+
kvm_inject_dabt32(vcpu, addr);
201133
else
202134
inject_abt64(vcpu, false, addr);
203135
}
@@ -213,7 +145,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
213145
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
214146
{
215147
if (!(vcpu->arch.hcr_el2 & HCR_RW))
216-
inject_abt32(vcpu, true, addr);
148+
kvm_inject_pabt32(vcpu, addr);
217149
else
218150
inject_abt64(vcpu, true, addr);
219151
}
@@ -227,7 +159,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
227159
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
228160
{
229161
if (!(vcpu->arch.hcr_el2 & HCR_RW))
230-
inject_undef32(vcpu);
162+
kvm_inject_undef32(vcpu);
231163
else
232164
inject_undef64(vcpu);
233165
}

0 commit comments

Comments
 (0)