Skip to content

Commit 935a733

Browse files
sean-jcbonzini
authored andcommitted
KVM: SVM: Drop AVIC's intermediate avic_set_running() helper
Drop avic_set_running() in favor of calling avic_vcpu_{load,put}() directly, and modify the block+put path to use preempt_disable/enable() instead of get/put_cpu(), as it doesn't actually care about the current pCPU associated with the vCPU. Opportunistically add lockdep assertions as being preempted in avic_vcpu_put() would lead to consuming stale data, even though doing so _in the current code base_ would not be fatal. Add a much needed comment explaining why svm_vcpu_blocking() needs to unload the AVIC and update the IRTE _before_ the vCPU starts blocking. Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 635e635 commit 935a733

File tree

1 file changed

+36
-20
lines changed

1 file changed

+36
-20
lines changed

arch/x86/kvm/svm/avic.c

Lines changed: 36 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -978,6 +978,8 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
978978
int h_physical_id = kvm_cpu_get_apicid(cpu);
979979
struct vcpu_svm *svm = to_svm(vcpu);
980980

981+
lockdep_assert_preemption_disabled();
982+
981983
/*
982984
* Since the host physical APIC id is 8 bits,
983985
* we can support host APIC ID upto 255.
@@ -1011,6 +1013,8 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
10111013
u64 entry;
10121014
struct vcpu_svm *svm = to_svm(vcpu);
10131015

1016+
lockdep_assert_preemption_disabled();
1017+
10141018
entry = READ_ONCE(*(svm->avic_physical_id_cache));
10151019

10161020
/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
@@ -1023,30 +1027,42 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
10231027
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
10241028
}
10251029

1026-
/*
1027-
* This function is called during VCPU halt/unhalt.
1028-
*/
1029-
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1030-
{
1031-
int cpu = get_cpu();
1032-
1033-
WARN_ON(cpu != vcpu->cpu);
1034-
1035-
if (kvm_vcpu_apicv_active(vcpu)) {
1036-
if (is_run)
1037-
avic_vcpu_load(vcpu, cpu);
1038-
else
1039-
avic_vcpu_put(vcpu);
1040-
}
1041-
put_cpu();
1042-
}
1043-
10441030
void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
10451031
{
1046-
avic_set_running(vcpu, false);
1032+
if (!kvm_vcpu_apicv_active(vcpu))
1033+
return;
1034+
1035+
preempt_disable();
1036+
1037+
/*
1038+
* Unload the AVIC when the vCPU is about to block, _before_
1039+
* the vCPU actually blocks.
1040+
*
1041+
* Any IRQs that arrive before IsRunning=0 will not cause an
1042+
* incomplete IPI vmexit on the source, therefore vIRR will also
1043+
* be checked by kvm_vcpu_check_block() before blocking. The
1044+
* memory barrier implicit in set_current_state orders writing
1045+
* IsRunning=0 before reading the vIRR. The processor needs a
1046+
* matching memory barrier on interrupt delivery between writing
1047+
* IRR and reading IsRunning; the lack of this barrier might be
1048+
* the cause of errata #1235).
1049+
*/
1050+
avic_vcpu_put(vcpu);
1051+
1052+
preempt_enable();
10471053
}
10481054

10491055
void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
10501056
{
1051-
avic_set_running(vcpu, true);
1057+
int cpu;
1058+
1059+
if (!kvm_vcpu_apicv_active(vcpu))
1060+
return;
1061+
1062+
cpu = get_cpu();
1063+
WARN_ON(cpu != vcpu->cpu);
1064+
1065+
avic_vcpu_load(vcpu, cpu);
1066+
1067+
put_cpu();
10521068
}

0 commit comments

Comments
 (0)