@@ -1577,7 +1577,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
1577
1577
* i.e. we end up advancing IP with some random value.
1578
1578
*/
1579
1579
if (!static_cpu_has (X86_FEATURE_HYPERVISOR ) ||
1580
- to_vmx (vcpu )-> exit_reason != EXIT_REASON_EPT_MISCONFIG ) {
1580
+ to_vmx (vcpu )-> exit_reason . basic != EXIT_REASON_EPT_MISCONFIG ) {
1581
1581
orig_rip = kvm_rip_read (vcpu );
1582
1582
rip = orig_rip + vmcs_read32 (VM_EXIT_INSTRUCTION_LEN );
1583
1583
#ifdef CONFIG_X86_64
@@ -5667,7 +5667,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
5667
5667
struct vcpu_vmx * vmx = to_vmx (vcpu );
5668
5668
5669
5669
* info1 = vmx_get_exit_qual (vcpu );
5670
- if (!(vmx -> exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY )) {
5670
+ if (!(vmx -> exit_reason . failed_vmentry )) {
5671
5671
* info2 = vmx -> idt_vectoring_info ;
5672
5672
* intr_info = vmx_get_intr_info (vcpu );
5673
5673
if (is_exception_with_error_code (* intr_info ))
@@ -5911,8 +5911,9 @@ void dump_vmcs(void)
5911
5911
static int vmx_handle_exit (struct kvm_vcpu * vcpu , fastpath_t exit_fastpath )
5912
5912
{
5913
5913
struct vcpu_vmx * vmx = to_vmx (vcpu );
5914
- u32 exit_reason = vmx -> exit_reason ;
5914
+ union vmx_exit_reason exit_reason = vmx -> exit_reason ;
5915
5915
u32 vectoring_info = vmx -> idt_vectoring_info ;
5916
+ u16 exit_handler_index ;
5916
5917
5917
5918
/*
5918
5919
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
@@ -5954,11 +5955,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
5954
5955
return 1 ;
5955
5956
}
5956
5957
5957
- if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY ) {
5958
+ if (exit_reason . failed_vmentry ) {
5958
5959
dump_vmcs ();
5959
5960
vcpu -> run -> exit_reason = KVM_EXIT_FAIL_ENTRY ;
5960
5961
vcpu -> run -> fail_entry .hardware_entry_failure_reason
5961
- = exit_reason ;
5962
+ = exit_reason . full ;
5962
5963
vcpu -> run -> fail_entry .cpu = vcpu -> arch .last_vmentry_cpu ;
5963
5964
return 0 ;
5964
5965
}
@@ -5980,18 +5981,18 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
5980
5981
* will cause infinite loop.
5981
5982
*/
5982
5983
if ((vectoring_info & VECTORING_INFO_VALID_MASK ) &&
5983
- (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
5984
- exit_reason != EXIT_REASON_EPT_VIOLATION &&
5985
- exit_reason != EXIT_REASON_PML_FULL &&
5986
- exit_reason != EXIT_REASON_APIC_ACCESS &&
5987
- exit_reason != EXIT_REASON_TASK_SWITCH )) {
5984
+ (exit_reason . basic != EXIT_REASON_EXCEPTION_NMI &&
5985
+ exit_reason . basic != EXIT_REASON_EPT_VIOLATION &&
5986
+ exit_reason . basic != EXIT_REASON_PML_FULL &&
5987
+ exit_reason . basic != EXIT_REASON_APIC_ACCESS &&
5988
+ exit_reason . basic != EXIT_REASON_TASK_SWITCH )) {
5988
5989
vcpu -> run -> exit_reason = KVM_EXIT_INTERNAL_ERROR ;
5989
5990
vcpu -> run -> internal .suberror = KVM_INTERNAL_ERROR_DELIVERY_EV ;
5990
5991
vcpu -> run -> internal .ndata = 3 ;
5991
5992
vcpu -> run -> internal .data [0 ] = vectoring_info ;
5992
- vcpu -> run -> internal .data [1 ] = exit_reason ;
5993
+ vcpu -> run -> internal .data [1 ] = exit_reason . full ;
5993
5994
vcpu -> run -> internal .data [2 ] = vcpu -> arch .exit_qualification ;
5994
- if (exit_reason == EXIT_REASON_EPT_MISCONFIG ) {
5995
+ if (exit_reason . basic == EXIT_REASON_EPT_MISCONFIG ) {
5995
5996
vcpu -> run -> internal .ndata ++ ;
5996
5997
vcpu -> run -> internal .data [3 ] =
5997
5998
vmcs_read64 (GUEST_PHYSICAL_ADDRESS );
@@ -6023,38 +6024,39 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6023
6024
if (exit_fastpath != EXIT_FASTPATH_NONE )
6024
6025
return 1 ;
6025
6026
6026
- if (exit_reason >= kvm_vmx_max_exit_handlers )
6027
+ if (exit_reason . basic >= kvm_vmx_max_exit_handlers )
6027
6028
goto unexpected_vmexit ;
6028
6029
#ifdef CONFIG_RETPOLINE
6029
- if (exit_reason == EXIT_REASON_MSR_WRITE )
6030
+ if (exit_reason . basic == EXIT_REASON_MSR_WRITE )
6030
6031
return kvm_emulate_wrmsr (vcpu );
6031
- else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER )
6032
+ else if (exit_reason . basic == EXIT_REASON_PREEMPTION_TIMER )
6032
6033
return handle_preemption_timer (vcpu );
6033
- else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW )
6034
+ else if (exit_reason . basic == EXIT_REASON_INTERRUPT_WINDOW )
6034
6035
return handle_interrupt_window (vcpu );
6035
- else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT )
6036
+ else if (exit_reason . basic == EXIT_REASON_EXTERNAL_INTERRUPT )
6036
6037
return handle_external_interrupt (vcpu );
6037
- else if (exit_reason == EXIT_REASON_HLT )
6038
+ else if (exit_reason . basic == EXIT_REASON_HLT )
6038
6039
return kvm_emulate_halt (vcpu );
6039
- else if (exit_reason == EXIT_REASON_EPT_MISCONFIG )
6040
+ else if (exit_reason . basic == EXIT_REASON_EPT_MISCONFIG )
6040
6041
return handle_ept_misconfig (vcpu );
6041
6042
#endif
6042
6043
6043
- exit_reason = array_index_nospec (exit_reason ,
6044
- kvm_vmx_max_exit_handlers );
6045
- if (!kvm_vmx_exit_handlers [exit_reason ])
6044
+ exit_handler_index = array_index_nospec (( u16 ) exit_reason . basic ,
6045
+ kvm_vmx_max_exit_handlers );
6046
+ if (!kvm_vmx_exit_handlers [exit_handler_index ])
6046
6047
goto unexpected_vmexit ;
6047
6048
6048
- return kvm_vmx_exit_handlers [exit_reason ](vcpu );
6049
+ return kvm_vmx_exit_handlers [exit_handler_index ](vcpu );
6049
6050
6050
6051
unexpected_vmexit :
6051
- vcpu_unimpl (vcpu , "vmx: unexpected exit reason 0x%x\n" , exit_reason );
6052
+ vcpu_unimpl (vcpu , "vmx: unexpected exit reason 0x%x\n" ,
6053
+ exit_reason .full );
6052
6054
dump_vmcs ();
6053
6055
vcpu -> run -> exit_reason = KVM_EXIT_INTERNAL_ERROR ;
6054
6056
vcpu -> run -> internal .suberror =
6055
6057
KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON ;
6056
6058
vcpu -> run -> internal .ndata = 2 ;
6057
- vcpu -> run -> internal .data [0 ] = exit_reason ;
6059
+ vcpu -> run -> internal .data [0 ] = exit_reason . full ;
6058
6060
vcpu -> run -> internal .data [1 ] = vcpu -> arch .last_vmentry_cpu ;
6059
6061
return 0 ;
6060
6062
}
@@ -6373,9 +6375,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
6373
6375
{
6374
6376
struct vcpu_vmx * vmx = to_vmx (vcpu );
6375
6377
6376
- if (vmx -> exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT )
6378
+ if (vmx -> exit_reason . basic == EXIT_REASON_EXTERNAL_INTERRUPT )
6377
6379
handle_external_interrupt_irqoff (vcpu );
6378
- else if (vmx -> exit_reason == EXIT_REASON_EXCEPTION_NMI )
6380
+ else if (vmx -> exit_reason . basic == EXIT_REASON_EXCEPTION_NMI )
6379
6381
handle_exception_nmi_irqoff (vmx );
6380
6382
}
6381
6383
@@ -6567,7 +6569,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
6567
6569
6568
6570
static fastpath_t vmx_exit_handlers_fastpath (struct kvm_vcpu * vcpu )
6569
6571
{
6570
- switch (to_vmx (vcpu )-> exit_reason ) {
6572
+ switch (to_vmx (vcpu )-> exit_reason . basic ) {
6571
6573
case EXIT_REASON_MSR_WRITE :
6572
6574
return handle_fastpath_set_msr_irqoff (vcpu );
6573
6575
case EXIT_REASON_PREEMPTION_TIMER :
@@ -6768,17 +6770,17 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
6768
6770
vmx -> idt_vectoring_info = 0 ;
6769
6771
6770
6772
if (unlikely (vmx -> fail )) {
6771
- vmx -> exit_reason = 0xdead ;
6773
+ vmx -> exit_reason . full = 0xdead ;
6772
6774
return EXIT_FASTPATH_NONE ;
6773
6775
}
6774
6776
6775
- vmx -> exit_reason = vmcs_read32 (VM_EXIT_REASON );
6776
- if (unlikely ((u16 )vmx -> exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ))
6777
+ vmx -> exit_reason . full = vmcs_read32 (VM_EXIT_REASON );
6778
+ if (unlikely ((u16 )vmx -> exit_reason . basic == EXIT_REASON_MCE_DURING_VMENTRY ))
6777
6779
kvm_machine_check ();
6778
6780
6779
- trace_kvm_exit (vmx -> exit_reason , vcpu , KVM_ISA_VMX );
6781
+ trace_kvm_exit (vmx -> exit_reason . full , vcpu , KVM_ISA_VMX );
6780
6782
6781
- if (unlikely (vmx -> exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY ))
6783
+ if (unlikely (vmx -> exit_reason . failed_vmentry ))
6782
6784
return EXIT_FASTPATH_NONE ;
6783
6785
6784
6786
vmx -> loaded_vmcs -> launched = 1 ;
0 commit comments