Skip to content

Commit b6b8a14

Browse files
jan-kiszkabonzini
authored andcommitted
KVM: nVMX: Rework interception of IRQs and NMIs
Move the check for leaving L2 on pending and intercepted IRQs or NMIs from the *_allowed handler into a dedicated callback. Invoke this callback at the relevant points before KVM checks if IRQs/NMIs can be injected. The callback has the task to switch from L2 to L1 if needed and inject the proper vmexit events. The rework fixes L2 wakeups from HLT and provides the foundation for preemption timer emulation. Signed-off-by: Jan Kiszka <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent b010926 commit b6b8a14

File tree

3 files changed

+59
-36
lines changed

3 files changed

+59
-36
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -767,6 +767,8 @@ struct kvm_x86_ops {
767767
enum x86_intercept_stage stage);
768768
void (*handle_external_intr)(struct kvm_vcpu *vcpu);
769769
bool (*mpx_supported)(void);
770+
771+
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
770772
};
771773

772774
struct kvm_arch_async_pf {

arch/x86/kvm/vmx.c

Lines changed: 38 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -4631,22 +4631,8 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
46314631

46324632
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
46334633
{
4634-
if (is_guest_mode(vcpu)) {
4635-
if (to_vmx(vcpu)->nested.nested_run_pending)
4636-
return 0;
4637-
if (nested_exit_on_nmi(vcpu)) {
4638-
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
4639-
NMI_VECTOR | INTR_TYPE_NMI_INTR |
4640-
INTR_INFO_VALID_MASK, 0);
4641-
/*
4642-
* The NMI-triggered VM exit counts as injection:
4643-
* clear this one and block further NMIs.
4644-
*/
4645-
vcpu->arch.nmi_pending = 0;
4646-
vmx_set_nmi_mask(vcpu, true);
4647-
return 0;
4648-
}
4649-
}
4634+
if (to_vmx(vcpu)->nested.nested_run_pending)
4635+
return 0;
46504636

46514637
if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
46524638
return 0;
@@ -4658,19 +4644,8 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
46584644

46594645
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
46604646
{
4661-
if (is_guest_mode(vcpu)) {
4662-
if (to_vmx(vcpu)->nested.nested_run_pending)
4663-
return 0;
4664-
if (nested_exit_on_intr(vcpu)) {
4665-
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
4666-
0, 0);
4667-
/*
4668-
* fall through to normal code, but now in L1, not L2
4669-
*/
4670-
}
4671-
}
4672-
4673-
return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
4647+
return (!to_vmx(vcpu)->nested.nested_run_pending &&
4648+
vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
46744649
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
46754650
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
46764651
}
@@ -8172,6 +8147,35 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
81728147
}
81738148
}
81748149

8150+
static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
8151+
{
8152+
struct vcpu_vmx *vmx = to_vmx(vcpu);
8153+
8154+
if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
8155+
if (vmx->nested.nested_run_pending)
8156+
return -EBUSY;
8157+
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
8158+
NMI_VECTOR | INTR_TYPE_NMI_INTR |
8159+
INTR_INFO_VALID_MASK, 0);
8160+
/*
8161+
* The NMI-triggered VM exit counts as injection:
8162+
* clear this one and block further NMIs.
8163+
*/
8164+
vcpu->arch.nmi_pending = 0;
8165+
vmx_set_nmi_mask(vcpu, true);
8166+
return 0;
8167+
}
8168+
8169+
if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
8170+
nested_exit_on_intr(vcpu)) {
8171+
if (vmx->nested.nested_run_pending)
8172+
return -EBUSY;
8173+
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
8174+
}
8175+
8176+
return 0;
8177+
}
8178+
81758179
/*
81768180
* prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
81778181
* and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
@@ -8512,6 +8516,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
85128516
nested_vmx_succeed(vcpu);
85138517
if (enable_shadow_vmcs)
85148518
vmx->nested.sync_shadow_vmcs = true;
8519+
8520+
/* in case we halted in L2 */
8521+
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
85158522
}
85168523

85178524
/*
@@ -8652,6 +8659,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
86528659
.check_intercept = vmx_check_intercept,
86538660
.handle_external_intr = vmx_handle_external_intr,
86548661
.mpx_supported = vmx_mpx_supported,
8662+
8663+
.check_nested_events = vmx_check_nested_events,
86558664
};
86568665

86578666
static int __init vmx_init(void)

arch/x86/kvm/x86.c

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5821,8 +5821,10 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
58215821
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
58225822
}
58235823

5824-
static void inject_pending_event(struct kvm_vcpu *vcpu)
5824+
static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
58255825
{
5826+
int r;
5827+
58265828
/* try to reinject previous events if any */
58275829
if (vcpu->arch.exception.pending) {
58285830
trace_kvm_inj_exception(vcpu->arch.exception.nr,
@@ -5832,17 +5834,23 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
58325834
vcpu->arch.exception.has_error_code,
58335835
vcpu->arch.exception.error_code,
58345836
vcpu->arch.exception.reinject);
5835-
return;
5837+
return 0;
58365838
}
58375839

58385840
if (vcpu->arch.nmi_injected) {
58395841
kvm_x86_ops->set_nmi(vcpu);
5840-
return;
5842+
return 0;
58415843
}
58425844

58435845
if (vcpu->arch.interrupt.pending) {
58445846
kvm_x86_ops->set_irq(vcpu);
5845-
return;
5847+
return 0;
5848+
}
5849+
5850+
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
5851+
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
5852+
if (r != 0)
5853+
return r;
58465854
}
58475855

58485856
/* try to inject new event if pending */
@@ -5859,6 +5867,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
58595867
kvm_x86_ops->set_irq(vcpu);
58605868
}
58615869
}
5870+
return 0;
58625871
}
58635872

58645873
static void process_nmi(struct kvm_vcpu *vcpu)
@@ -5963,10 +5972,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
59635972
goto out;
59645973
}
59655974

5966-
inject_pending_event(vcpu);
5967-
5975+
if (inject_pending_event(vcpu, req_int_win) != 0)
5976+
req_immediate_exit = true;
59685977
/* enable NMI/IRQ window open exits if needed */
5969-
if (vcpu->arch.nmi_pending)
5978+
else if (vcpu->arch.nmi_pending)
59705979
req_immediate_exit =
59715980
kvm_x86_ops->enable_nmi_window(vcpu) != 0;
59725981
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
@@ -7296,6 +7305,9 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
72967305

72977306
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
72987307
{
7308+
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
7309+
kvm_x86_ops->check_nested_events(vcpu, false);
7310+
72997311
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
73007312
!vcpu->arch.apf.halted)
73017313
|| !list_empty_careful(&vcpu->async_pf.done)

0 commit comments

Comments
 (0)