Skip to content

Commit 40fdd8c

Browse files
committed
KVM: PPC: Book3S: PR: Make svcpu -> vcpu store preempt savvy
As soon as we get back to our "highmem" handler in virtual address space we may get preempted. Today the reason we can get preempted is that we replay interrupts and all the lazy logic thinks we have interrupts enabled. However, it's not hard to make the code interruptible and that way we can enable and handle interrupts even earlier. This fixes random guest crashes that happened with CONFIG_PREEMPT=y for me. Signed-off-by: Alexander Graf <[email protected]>
1 parent c9dad7f commit 40fdd8c

File tree

2 files changed

+23
-0
lines changed

2 files changed

+23
-0
lines changed

arch/powerpc/include/asm/kvm_book3s_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@ struct kvmppc_host_state {
106106
};
107107

108108
struct kvmppc_book3s_shadow_vcpu {
109+
bool in_use;
109110
ulong gpr[14];
110111
u32 cr;
111112
u32 xer;

arch/powerpc/kvm/book3s_pr.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
6666
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
6767
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
6868
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
69+
svcpu->in_use = 0;
6970
svcpu_put(svcpu);
7071
#endif
7172
vcpu->cpu = smp_processor_id();
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
7879
{
7980
#ifdef CONFIG_PPC_BOOK3S_64
8081
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
82+
if (svcpu->in_use) {
83+
kvmppc_copy_from_svcpu(vcpu, svcpu);
84+
}
8185
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
8286
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
8387
svcpu_put(svcpu);
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
110114
svcpu->ctr = vcpu->arch.ctr;
111115
svcpu->lr = vcpu->arch.lr;
112116
svcpu->pc = vcpu->arch.pc;
117+
svcpu->in_use = true;
113118
}
114119

115120
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
116121
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
117122
struct kvmppc_book3s_shadow_vcpu *svcpu)
118123
{
124+
/*
125+
* vcpu_put would just call us again because in_use hasn't
126+
* been updated yet.
127+
*/
128+
preempt_disable();
129+
130+
/*
131+
* Maybe we were already preempted and synced the svcpu from
132+
* our preempt notifiers. Don't bother touching this svcpu then.
133+
*/
134+
if (!svcpu->in_use)
135+
goto out;
136+
119137
vcpu->arch.gpr[0] = svcpu->gpr[0];
120138
vcpu->arch.gpr[1] = svcpu->gpr[1];
121139
vcpu->arch.gpr[2] = svcpu->gpr[2];
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
139157
vcpu->arch.fault_dar = svcpu->fault_dar;
140158
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
141159
vcpu->arch.last_inst = svcpu->last_inst;
160+
svcpu->in_use = false;
161+
162+
out:
163+
preempt_enable();
142164
}
143165

144166
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)