@@ -809,7 +809,6 @@ static void kvm_cpu_vmxon(u64 addr);
809
809
static void kvm_cpu_vmxoff (void );
810
810
static bool vmx_mpx_supported (void );
811
811
static bool vmx_xsaves_supported (void );
812
- static int vmx_vm_has_apicv (struct kvm * kvm );
813
812
static int vmx_cpu_uses_apicv (struct kvm_vcpu * vcpu );
814
813
static int vmx_set_tss_addr (struct kvm * kvm , unsigned int addr );
815
814
static void vmx_set_segment (struct kvm_vcpu * vcpu ,
@@ -947,9 +946,9 @@ static inline bool cpu_has_vmx_tpr_shadow(void)
947
946
return vmcs_config .cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW ;
948
947
}
949
948
950
- static inline bool vm_need_tpr_shadow (struct kvm * kvm )
949
+ static inline bool cpu_need_tpr_shadow (struct kvm_vcpu * vcpu )
951
950
{
952
- return ( cpu_has_vmx_tpr_shadow ()) && ( irqchip_in_kernel ( kvm ) );
951
+ return cpu_has_vmx_tpr_shadow () && lapic_in_kernel ( vcpu );
953
952
}
954
953
955
954
static inline bool cpu_has_secondary_exec_ctrls (void )
@@ -1063,9 +1062,9 @@ static inline bool cpu_has_vmx_ple(void)
1063
1062
SECONDARY_EXEC_PAUSE_LOOP_EXITING ;
1064
1063
}
1065
1064
1066
- static inline bool vm_need_virtualize_apic_accesses (struct kvm * kvm )
1065
+ static inline bool cpu_need_virtualize_apic_accesses (struct kvm_vcpu * vcpu )
1067
1066
{
1068
- return flexpriority_enabled && irqchip_in_kernel ( kvm );
1067
+ return flexpriority_enabled && lapic_in_kernel ( vcpu );
1069
1068
}
1070
1069
1071
1070
static inline bool cpu_has_vmx_vpid (void )
@@ -2378,7 +2377,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2378
2377
vmx -> nested .nested_vmx_pinbased_ctls_high |=
2379
2378
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2380
2379
PIN_BASED_VMX_PREEMPTION_TIMER ;
2381
- if (vmx_vm_has_apicv ( vmx -> vcpu . kvm ))
2380
+ if (vmx_cpu_uses_apicv ( & vmx -> vcpu ))
2382
2381
vmx -> nested .nested_vmx_pinbased_ctls_high |=
2383
2382
PIN_BASED_POSTED_INTR ;
2384
2383
@@ -4333,14 +4332,9 @@ static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
4333
4332
msr , MSR_TYPE_W );
4334
4333
}
4335
4334
4336
- static int vmx_vm_has_apicv (struct kvm * kvm )
4337
- {
4338
- return enable_apicv && irqchip_in_kernel (kvm );
4339
- }
4340
-
4341
4335
static int vmx_cpu_uses_apicv (struct kvm_vcpu * vcpu )
4342
4336
{
4343
- return vmx_vm_has_apicv (vcpu -> kvm );
4337
+ return enable_apicv && lapic_in_kernel (vcpu );
4344
4338
}
4345
4339
4346
4340
static int vmx_complete_nested_posted_interrupt (struct kvm_vcpu * vcpu )
@@ -4520,7 +4514,7 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4520
4514
{
4521
4515
u32 pin_based_exec_ctrl = vmcs_config .pin_based_exec_ctrl ;
4522
4516
4523
- if (!vmx_vm_has_apicv ( vmx -> vcpu . kvm ))
4517
+ if (!vmx_cpu_uses_apicv ( & vmx -> vcpu ))
4524
4518
pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR ;
4525
4519
return pin_based_exec_ctrl ;
4526
4520
}
@@ -4532,7 +4526,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4532
4526
if (vmx -> vcpu .arch .switch_db_regs & KVM_DEBUGREG_WONT_EXIT )
4533
4527
exec_control &= ~CPU_BASED_MOV_DR_EXITING ;
4534
4528
4535
- if (!vm_need_tpr_shadow ( vmx -> vcpu . kvm )) {
4529
+ if (!cpu_need_tpr_shadow ( & vmx -> vcpu )) {
4536
4530
exec_control &= ~CPU_BASED_TPR_SHADOW ;
4537
4531
#ifdef CONFIG_X86_64
4538
4532
exec_control |= CPU_BASED_CR8_STORE_EXITING |
@@ -4549,7 +4543,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4549
4543
static u32 vmx_secondary_exec_control (struct vcpu_vmx * vmx )
4550
4544
{
4551
4545
u32 exec_control = vmcs_config .cpu_based_2nd_exec_ctrl ;
4552
- if (!vm_need_virtualize_apic_accesses ( vmx -> vcpu . kvm ))
4546
+ if (!cpu_need_virtualize_apic_accesses ( & vmx -> vcpu ))
4553
4547
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES ;
4554
4548
if (vmx -> vpid == 0 )
4555
4549
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID ;
@@ -4563,7 +4557,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4563
4557
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST ;
4564
4558
if (!ple_gap )
4565
4559
exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING ;
4566
- if (!vmx_vm_has_apicv ( vmx -> vcpu . kvm ))
4560
+ if (!vmx_cpu_uses_apicv ( & vmx -> vcpu ))
4567
4561
exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4568
4562
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY );
4569
4563
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE ;
@@ -4624,7 +4618,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4624
4618
vmx_secondary_exec_control (vmx ));
4625
4619
}
4626
4620
4627
- if (vmx_vm_has_apicv ( vmx -> vcpu . kvm )) {
4621
+ if (vmx_cpu_uses_apicv ( & vmx -> vcpu )) {
4628
4622
vmcs_write64 (EOI_EXIT_BITMAP0 , 0 );
4629
4623
vmcs_write64 (EOI_EXIT_BITMAP1 , 0 );
4630
4624
vmcs_write64 (EOI_EXIT_BITMAP2 , 0 );
@@ -4768,15 +4762,15 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4768
4762
4769
4763
if (cpu_has_vmx_tpr_shadow () && !init_event ) {
4770
4764
vmcs_write64 (VIRTUAL_APIC_PAGE_ADDR , 0 );
4771
- if (vm_need_tpr_shadow (vcpu -> kvm ))
4765
+ if (cpu_need_tpr_shadow (vcpu ))
4772
4766
vmcs_write64 (VIRTUAL_APIC_PAGE_ADDR ,
4773
4767
__pa (vcpu -> arch .apic -> regs ));
4774
4768
vmcs_write32 (TPR_THRESHOLD , 0 );
4775
4769
}
4776
4770
4777
4771
kvm_make_request (KVM_REQ_APIC_PAGE_RELOAD , vcpu );
4778
4772
4779
- if (vmx_vm_has_apicv (vcpu -> kvm ))
4773
+ if (vmx_cpu_uses_apicv (vcpu ))
4780
4774
memset (& vmx -> pi_desc , 0 , sizeof (struct pi_desc ));
4781
4775
4782
4776
if (vmx -> vpid != 0 )
@@ -5316,7 +5310,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
5316
5310
u8 cr8 = (u8 )val ;
5317
5311
err = kvm_set_cr8 (vcpu , cr8 );
5318
5312
kvm_complete_insn_gp (vcpu , err );
5319
- if (irqchip_in_kernel (vcpu -> kvm ))
5313
+ if (lapic_in_kernel (vcpu ))
5320
5314
return 1 ;
5321
5315
if (cr8_prev <= cr8 )
5322
5316
return 1 ;
@@ -5535,7 +5529,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5535
5529
* If the user space waits to inject interrupts, exit as soon as
5536
5530
* possible
5537
5531
*/
5538
- if (!irqchip_in_kernel (vcpu -> kvm ) &&
5532
+ if (!lapic_in_kernel (vcpu ) &&
5539
5533
vcpu -> run -> request_interrupt_window &&
5540
5534
!kvm_cpu_has_interrupt (vcpu )) {
5541
5535
vcpu -> run -> exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN ;
@@ -7944,10 +7938,10 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
7944
7938
* apicv
7945
7939
*/
7946
7940
if (!cpu_has_vmx_virtualize_x2apic_mode () ||
7947
- !vmx_vm_has_apicv (vcpu -> kvm ))
7941
+ !vmx_cpu_uses_apicv (vcpu ))
7948
7942
return ;
7949
7943
7950
- if (!vm_need_tpr_shadow (vcpu -> kvm ))
7944
+ if (!cpu_need_tpr_shadow (vcpu ))
7951
7945
return ;
7952
7946
7953
7947
sec_exec_control = vmcs_read32 (SECONDARY_VM_EXEC_CONTROL );
@@ -8052,7 +8046,7 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
8052
8046
static void vmx_load_eoi_exitmap (struct kvm_vcpu * vcpu )
8053
8047
{
8054
8048
u64 * eoi_exit_bitmap = vcpu -> arch .eoi_exit_bitmap ;
8055
- if (!vmx_vm_has_apicv (vcpu -> kvm ))
8049
+ if (!vmx_cpu_uses_apicv (vcpu ))
8056
8050
return ;
8057
8051
8058
8052
vmcs_write64 (EOI_EXIT_BITMAP0 , eoi_exit_bitmap [0 ]);
@@ -8551,7 +8545,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
8551
8545
put_cpu ();
8552
8546
if (err )
8553
8547
goto free_vmcs ;
8554
- if (vm_need_virtualize_apic_accesses ( kvm )) {
8548
+ if (cpu_need_virtualize_apic_accesses ( & vmx -> vcpu )) {
8555
8549
err = alloc_apic_access_page (kvm );
8556
8550
if (err )
8557
8551
goto free_vmcs ;
@@ -9344,7 +9338,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
9344
9338
vmcs_write64 (APIC_ACCESS_ADDR ,
9345
9339
page_to_phys (vmx -> nested .apic_access_page ));
9346
9340
} else if (!(nested_cpu_has_virt_x2apic_mode (vmcs12 )) &&
9347
- ( vm_need_virtualize_apic_accesses ( vmx -> vcpu . kvm ) )) {
9341
+ cpu_need_virtualize_apic_accesses ( & vmx -> vcpu )) {
9348
9342
exec_control |=
9349
9343
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES ;
9350
9344
kvm_vcpu_reload_apic_access_page (vcpu );
0 commit comments