Skip to content

Commit 53655dd

Browse files
paulusmackmpe
authored andcommitted
KVM: PPC: Book3S HV: Call kvmppc_handle_exit_hv() with vcore unlocked
Currently kvmppc_handle_exit_hv() is called with the vcore lock held because it is called within a for_each_runnable_thread loop. However, we already unlock the vcore within kvmppc_handle_exit_hv() under certain circumstances, and this is safe because (a) any vcpus that become runnable and are added to the runnable set by kvmppc_run_vcpu() have their vcpu->arch.trap == 0 and can't actually run in the guest (because the vcore state is VCORE_EXITING), and (b) for_each_runnable_thread is safe against addition or removal of vcpus from the runnable set. Therefore, in order to simplify things for following patches, let's drop the vcore lock in the for_each_runnable_thread loop, so kvmppc_handle_exit_hv() gets called without the vcore lock held. Reviewed-by: David Gibson <[email protected]> Signed-off-by: Paul Mackerras <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
1 parent 7854f75 commit 53655dd

File tree

1 file changed

+10
-9
lines changed

1 file changed

+10
-9
lines changed

arch/powerpc/kvm/book3s_hv.c

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1084,7 +1084,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
10841084
return RESUME_GUEST;
10851085
}
10861086

1087-
/* Called with vcpu->arch.vcore->lock held */
10881087
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
10891088
struct task_struct *tsk)
10901089
{
@@ -1205,10 +1204,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
12051204
swab32(vcpu->arch.emul_inst) :
12061205
vcpu->arch.emul_inst;
12071206
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1208-
/* Need vcore unlocked to call kvmppc_get_last_inst */
1209-
spin_unlock(&vcpu->arch.vcore->lock);
12101207
r = kvmppc_emulate_debug_inst(run, vcpu);
1211-
spin_lock(&vcpu->arch.vcore->lock);
12121208
} else {
12131209
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
12141210
r = RESUME_GUEST;
@@ -1224,12 +1220,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
12241220
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
12251221
r = EMULATE_FAIL;
12261222
if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
1227-
cpu_has_feature(CPU_FTR_ARCH_300)) {
1228-
/* Need vcore unlocked to call kvmppc_get_last_inst */
1229-
spin_unlock(&vcpu->arch.vcore->lock);
1223+
cpu_has_feature(CPU_FTR_ARCH_300))
12301224
r = kvmppc_emulate_doorbell_instr(vcpu);
1231-
spin_lock(&vcpu->arch.vcore->lock);
1232-
}
12331225
if (r == EMULATE_FAIL) {
12341226
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
12351227
r = RESUME_GUEST;
@@ -2599,6 +2591,14 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
25992591
spin_lock(&vc->lock);
26002592
now = get_tb();
26012593
for_each_runnable_thread(i, vcpu, vc) {
2594+
/*
2595+
* It's safe to unlock the vcore in the loop here, because
2596+
* for_each_runnable_thread() is safe against removal of
2597+
* the vcpu, and the vcore state is VCORE_EXITING here,
2598+
* so any vcpus becoming runnable will have their arch.trap
2599+
* set to zero and can't actually run in the guest.
2600+
*/
2601+
spin_unlock(&vc->lock);
26022602
/* cancel pending dec exception if dec is positive */
26032603
if (now < vcpu->arch.dec_expires &&
26042604
kvmppc_core_pending_dec(vcpu))
@@ -2614,6 +2614,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
26142614
vcpu->arch.ret = ret;
26152615
vcpu->arch.trap = 0;
26162616

2617+
spin_lock(&vc->lock);
26172618
if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
26182619
if (vcpu->arch.pending_exceptions)
26192620
kvmppc_core_prepare_to_enter(vcpu);

0 commit comments

Comments
 (0)