@@ -4630,33 +4630,18 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
4630
4630
msr , MSR_TYPE_R | MSR_TYPE_W );
4631
4631
}
4632
4632
4633
- static void vmx_disable_intercept_msr_read_x2apic (u32 msr , bool apicv_active )
4633
+ static void vmx_disable_intercept_msr_x2apic (u32 msr , int type , bool apicv_active )
4634
4634
{
4635
4635
if (apicv_active ) {
4636
4636
__vmx_disable_intercept_for_msr (vmx_msr_bitmap_legacy_x2apic_apicv ,
4637
- msr , MSR_TYPE_R );
4637
+ msr , type );
4638
4638
__vmx_disable_intercept_for_msr (vmx_msr_bitmap_longmode_x2apic_apicv ,
4639
- msr , MSR_TYPE_R );
4639
+ msr , type );
4640
4640
} else {
4641
4641
__vmx_disable_intercept_for_msr (vmx_msr_bitmap_legacy_x2apic ,
4642
- msr , MSR_TYPE_R );
4642
+ msr , type );
4643
4643
__vmx_disable_intercept_for_msr (vmx_msr_bitmap_longmode_x2apic ,
4644
- msr , MSR_TYPE_R );
4645
- }
4646
- }
4647
-
4648
- static void vmx_disable_intercept_msr_write_x2apic (u32 msr , bool apicv_active )
4649
- {
4650
- if (apicv_active ) {
4651
- __vmx_disable_intercept_for_msr (vmx_msr_bitmap_legacy_x2apic_apicv ,
4652
- msr , MSR_TYPE_W );
4653
- __vmx_disable_intercept_for_msr (vmx_msr_bitmap_longmode_x2apic_apicv ,
4654
- msr , MSR_TYPE_W );
4655
- } else {
4656
- __vmx_disable_intercept_for_msr (vmx_msr_bitmap_legacy_x2apic ,
4657
- msr , MSR_TYPE_W );
4658
- __vmx_disable_intercept_for_msr (vmx_msr_bitmap_longmode_x2apic ,
4659
- msr , MSR_TYPE_W );
4644
+ msr , type );
4660
4645
}
4661
4646
}
4662
4647
@@ -6437,29 +6422,23 @@ static __init int hardware_setup(void)
6437
6422
6438
6423
set_bit (0 , vmx_vpid_bitmap ); /* 0 is reserved for host */
6439
6424
6440
- /*
6441
- * enable_apicv && kvm_vcpu_apicv_active()
6442
- */
6443
6425
for (msr = 0x800 ; msr <= 0x8ff ; msr ++ ) {
6444
6426
if (msr == 0x839 /* TMCCT */ )
6445
6427
continue ;
6446
- vmx_disable_intercept_msr_read_x2apic (msr , true);
6428
+ vmx_disable_intercept_msr_x2apic (msr , MSR_TYPE_R , true);
6447
6429
}
6448
6430
6449
- /* TPR */
6450
- vmx_disable_intercept_msr_write_x2apic (0x808 , true);
6451
- /* EOI */
6452
- vmx_disable_intercept_msr_write_x2apic (0x80b , true);
6453
- /* SELF-IPI */
6454
- vmx_disable_intercept_msr_write_x2apic (0x83f , true);
6455
-
6456
6431
/*
6457
- * (enable_apicv && !kvm_vcpu_apicv_active()) ||
6458
- * !enable_apicv
6432
+ * TPR reads and writes can be virtualized even if virtual interrupt
6433
+ * delivery is not in use.
6459
6434
*/
6460
- /* TPR */
6461
- vmx_disable_intercept_msr_read_x2apic (0x808 , false);
6462
- vmx_disable_intercept_msr_write_x2apic (0x808 , false);
6435
+ vmx_disable_intercept_msr_x2apic (0x808 , MSR_TYPE_W , true);
6436
+ vmx_disable_intercept_msr_x2apic (0x808 , MSR_TYPE_R | MSR_TYPE_W , false);
6437
+
6438
+ /* EOI */
6439
+ vmx_disable_intercept_msr_x2apic (0x80b , MSR_TYPE_W , true);
6440
+ /* SELF-IPI */
6441
+ vmx_disable_intercept_msr_x2apic (0x83f , MSR_TYPE_W , true);
6463
6442
6464
6443
if (enable_ept ) {
6465
6444
kvm_mmu_set_mask_ptes (VMX_EPT_READABLE_MASK ,
0 commit comments