Skip to content

Commit 3aedd5c

Browse files
Marc Zyngierchazy
authored andcommitted
arm: KVM: Use common AArch32 conditional execution code
Add the bit of glue and const-ification that is required to use the code inherited from the arm64 port, and move over to it. Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Christoffer Dall <[email protected]>
1 parent 427d7ca commit 3aedd5c

File tree

4 files changed

+33
-104
lines changed

4 files changed

+33
-104
lines changed

arch/arm/include/asm/kvm_emulate.h

Lines changed: 27 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -40,18 +40,28 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
4040
*vcpu_reg(vcpu, reg_num) = val;
4141
}
4242

43-
bool kvm_condition_valid(struct kvm_vcpu *vcpu);
44-
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
43+
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
44+
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
4545
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
4646
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
4747
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
4848

49+
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
50+
{
51+
return kvm_condition_valid32(vcpu);
52+
}
53+
54+
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
55+
{
56+
kvm_skip_instr32(vcpu, is_wide_instr);
57+
}
58+
4959
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
5060
{
5161
vcpu->arch.hcr = HCR_GUEST_MASK;
5262
}
5363

54-
static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
64+
static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
5565
{
5666
return vcpu->arch.hcr;
5767
}
@@ -61,7 +71,7 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
6171
vcpu->arch.hcr = hcr;
6272
}
6373

64-
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
74+
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
6575
{
6676
return 1;
6777
}
@@ -71,9 +81,9 @@ static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
7181
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
7282
}
7383

74-
static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
84+
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
7585
{
76-
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
86+
return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
7787
}
7888

7989
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -93,11 +103,21 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
93103
return cpsr_mode > USR_MODE;;
94104
}
95105

96-
static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
106+
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
97107
{
98108
return vcpu->arch.fault.hsr;
99109
}
100110

111+
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
112+
{
113+
u32 hsr = kvm_vcpu_get_hsr(vcpu);
114+
115+
if (hsr & HSR_CV)
116+
return (hsr & HSR_COND) >> HSR_COND_SHIFT;
117+
118+
return -1;
119+
}
120+
101121
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
102122
{
103123
return vcpu->arch.fault.hxfar;

arch/arm/kvm/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/
2121
obj-y += kvm-arm.o init.o interrupts.o
2222
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
2323
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
24+
obj-y += $(KVM)/arm/aarch32.o
2425

2526
obj-y += $(KVM)/arm/vgic/vgic.o
2627
obj-y += $(KVM)/arm/vgic/vgic-init.o

arch/arm/kvm/emulate.c

Lines changed: 0 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -161,103 +161,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
161161
}
162162
}
163163

164-
/*
165-
* A conditional instruction is allowed to trap, even though it
166-
* wouldn't be executed. So let's re-implement the hardware, in
167-
* software!
168-
*/
169-
bool kvm_condition_valid(struct kvm_vcpu *vcpu)
170-
{
171-
unsigned long cpsr, cond, insn;
172-
173-
/*
174-
* Exception Code 0 can only happen if we set HCR.TGE to 1, to
175-
* catch undefined instructions, and then we won't get past
176-
* the arm_exit_handlers test anyway.
177-
*/
178-
BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
179-
180-
/* Top two bits non-zero? Unconditional. */
181-
if (kvm_vcpu_get_hsr(vcpu) >> 30)
182-
return true;
183-
184-
cpsr = *vcpu_cpsr(vcpu);
185-
186-
/* Is condition field valid? */
187-
if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
188-
cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
189-
else {
190-
/* This can happen in Thumb mode: examine IT state. */
191-
unsigned long it;
192-
193-
it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
194-
195-
/* it == 0 => unconditional. */
196-
if (it == 0)
197-
return true;
198-
199-
/* The cond for this insn works out as the top 4 bits. */
200-
cond = (it >> 4);
201-
}
202-
203-
/* Shift makes it look like an ARM-mode instruction */
204-
insn = cond << 28;
205-
return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
206-
}
207-
208-
/**
209-
* adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
210-
* @vcpu: The VCPU pointer
211-
*
212-
* When exceptions occur while instructions are executed in Thumb IF-THEN
213-
* blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
214-
* to do this little bit of work manually. The fields map like this:
215-
*
216-
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
217-
*/
218-
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
219-
{
220-
unsigned long itbits, cond;
221-
unsigned long cpsr = *vcpu_cpsr(vcpu);
222-
bool is_arm = !(cpsr & PSR_T_BIT);
223-
224-
if (is_arm || !(cpsr & PSR_IT_MASK))
225-
return;
226-
227-
cond = (cpsr & 0xe000) >> 13;
228-
itbits = (cpsr & 0x1c00) >> (10 - 2);
229-
itbits |= (cpsr & (0x3 << 25)) >> 25;
230-
231-
/* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
232-
if ((itbits & 0x7) == 0)
233-
itbits = cond = 0;
234-
else
235-
itbits = (itbits << 1) & 0x1f;
236-
237-
cpsr &= ~PSR_IT_MASK;
238-
cpsr |= cond << 13;
239-
cpsr |= (itbits & 0x1c) << (10 - 2);
240-
cpsr |= (itbits & 0x3) << 25;
241-
*vcpu_cpsr(vcpu) = cpsr;
242-
}
243-
244-
/**
245-
* kvm_skip_instr - skip a trapped instruction and proceed to the next
246-
* @vcpu: The vcpu pointer
247-
*/
248-
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
249-
{
250-
bool is_thumb;
251-
252-
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
253-
if (is_thumb && !is_wide_instr)
254-
*vcpu_pc(vcpu) += 2;
255-
else
256-
*vcpu_pc(vcpu) += 4;
257-
kvm_adjust_itstate(vcpu);
258-
}
259-
260-
261164
/******************************************************************************
262165
* Inject exceptions into the guest
263166
*/

virt/kvm/arm/aarch32.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,11 @@
2424
#include <linux/kvm_host.h>
2525
#include <asm/kvm_emulate.h>
2626

27+
#ifndef CONFIG_ARM64
28+
#define COMPAT_PSR_T_BIT PSR_T_BIT
29+
#define COMPAT_PSR_IT_MASK PSR_IT_MASK
30+
#endif
31+
2732
/*
2833
* stolen from arch/arm/kernel/opcodes.c
2934
*

0 commit comments

Comments
 (0)