Skip to content

Commit 07ae538

Browse files
agrafpaulusmack
authored andcommitted
KVM: PPC: Book3S PR: Fix svcpu copying with preemption enabled
When copying between the vcpu and svcpu, we may get scheduled away onto a different host CPU which in turn means our svcpu pointer may change. That means we need to atomically copy to and from the svcpu with preemption disabled, so that all code around it always sees a coherent state. Reported-by: Simon Guo <[email protected]> Fixes: 3d3319b ("KVM: PPC: Book3S: PR: Enable interrupts earlier") Signed-off-by: Alexander Graf <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
1 parent 36ee41d commit 07ae538

File tree

3 files changed

+12
-18
lines changed

3 files changed

+12
-18
lines changed

arch/powerpc/include/asm/kvm_book3s.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -249,10 +249,8 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
249249
extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
250250
extern int kvmppc_hcall_impl_pr(unsigned long cmd);
251251
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
252-
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
253-
struct kvm_vcpu *vcpu);
254-
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
255-
struct kvmppc_book3s_shadow_vcpu *svcpu);
252+
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
253+
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
256254
extern int kvm_irq_bypass;
257255

258256
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)

arch/powerpc/kvm/book3s_interrupts.S

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ kvm_start_entry:
9696

9797
kvm_start_lightweight:
9898
/* Copy registers into shadow vcpu so we can access them in real mode */
99-
GET_SHADOW_VCPU(r3)
99+
mr r3, r4
100100
bl FUNC(kvmppc_copy_to_svcpu)
101101
nop
102102
REST_GPR(4, r1)
@@ -165,9 +165,7 @@ after_sprg3_load:
165165
stw r12, VCPU_TRAP(r3)
166166

167167
/* Transfer reg values from shadow vcpu back to vcpu struct */
168-
/* On 64-bit, interrupts are still off at this point */
169168

170-
GET_SHADOW_VCPU(r4)
171169
bl FUNC(kvmppc_copy_from_svcpu)
172170
nop
173171

arch/powerpc/kvm/book3s_pr.c

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
120120
#ifdef CONFIG_PPC_BOOK3S_64
121121
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
122122
if (svcpu->in_use) {
123-
kvmppc_copy_from_svcpu(vcpu, svcpu);
123+
kvmppc_copy_from_svcpu(vcpu);
124124
}
125125
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
126126
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
@@ -142,9 +142,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
142142
}
143143

144144
/* Copy data needed by real-mode code from vcpu to shadow vcpu */
145-
void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
146-
struct kvm_vcpu *vcpu)
145+
void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
147146
{
147+
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
148+
148149
svcpu->gpr[0] = vcpu->arch.gpr[0];
149150
svcpu->gpr[1] = vcpu->arch.gpr[1];
150151
svcpu->gpr[2] = vcpu->arch.gpr[2];
@@ -176,17 +177,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
176177
if (cpu_has_feature(CPU_FTR_ARCH_207S))
177178
vcpu->arch.entry_ic = mfspr(SPRN_IC);
178179
svcpu->in_use = true;
180+
181+
svcpu_put(svcpu);
179182
}
180183

181184
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
182-
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
183-
struct kvmppc_book3s_shadow_vcpu *svcpu)
185+
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
184186
{
185-
/*
186-
* vcpu_put would just call us again because in_use hasn't
187-
* been updated yet.
188-
*/
189-
preempt_disable();
187+
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
190188

191189
/*
192190
* Maybe we were already preempted and synced the svcpu from
@@ -232,7 +230,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
232230
svcpu->in_use = false;
233231

234232
out:
235-
preempt_enable();
233+
svcpu_put(svcpu);
236234
}
237235

238236
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)