@@ -365,18 +365,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
365
365
366
366
static bool nested_ept_ad_enabled (struct kvm_vcpu * vcpu );
367
367
static unsigned long nested_ept_get_cr3 (struct kvm_vcpu * vcpu );
368
- static void vmx_set_segment (struct kvm_vcpu * vcpu ,
369
- struct kvm_segment * var , int seg );
370
- static void vmx_get_segment (struct kvm_vcpu * vcpu ,
371
- struct kvm_segment * var , int seg );
372
368
static bool guest_state_valid (struct kvm_vcpu * vcpu );
373
369
static u32 vmx_segment_access_rights (struct kvm_segment * var );
374
370
static void copy_shadow_to_vmcs12 (struct vcpu_vmx * vmx );
375
- static bool vmx_get_nmi_mask (struct kvm_vcpu * vcpu );
376
- static void vmx_set_nmi_mask (struct kvm_vcpu * vcpu , bool masked );
377
371
static bool nested_vmx_is_page_fault_vmexit (struct vmcs12 * vmcs12 ,
378
372
u16 error_code );
379
- static void vmx_update_msr_bitmap (struct kvm_vcpu * vcpu );
380
373
static __always_inline void vmx_disable_intercept_for_msr (unsigned long * msr_bitmap ,
381
374
u32 msr , int type );
382
375
@@ -438,8 +431,6 @@ static const struct kvm_vmx_segment_field {
438
431
439
432
u64 host_efer ;
440
433
441
- static void ept_save_pdptrs (struct kvm_vcpu * vcpu );
442
-
443
434
/*
444
435
* Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
445
436
* away by decrementing the array size.
@@ -687,7 +678,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
687
678
u32 exit_intr_info ,
688
679
unsigned long exit_qualification );
689
680
690
- static int __find_msr_index (struct vcpu_vmx * vmx , u32 msr )
681
+ static inline int __find_msr_index (struct vcpu_vmx * vmx , u32 msr )
691
682
{
692
683
int i ;
693
684
@@ -697,7 +688,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
697
688
return -1 ;
698
689
}
699
690
700
- static struct shared_msr_entry * find_msr_entry (struct vcpu_vmx * vmx , u32 msr )
691
+ struct shared_msr_entry * find_msr_entry (struct vcpu_vmx * vmx , u32 msr )
701
692
{
702
693
int i ;
703
694
@@ -707,15 +698,6 @@ static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
707
698
return NULL ;
708
699
}
709
700
710
- void loaded_vmcs_init (struct loaded_vmcs * loaded_vmcs )
711
- {
712
- vmcs_clear (loaded_vmcs -> vmcs );
713
- if (loaded_vmcs -> shadow_vmcs && loaded_vmcs -> launched )
714
- vmcs_clear (loaded_vmcs -> shadow_vmcs );
715
- loaded_vmcs -> cpu = -1 ;
716
- loaded_vmcs -> launched = 0 ;
717
- }
718
-
719
701
#ifdef CONFIG_KEXEC_CORE
720
702
/*
721
703
* This bitmap is used to indicate whether the vmclear
@@ -840,7 +822,7 @@ static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
840
822
return * p ;
841
823
}
842
824
843
- static void update_exception_bitmap (struct kvm_vcpu * vcpu )
825
+ void update_exception_bitmap (struct kvm_vcpu * vcpu )
844
826
{
845
827
u32 eb ;
846
828
@@ -1140,7 +1122,7 @@ static unsigned long segment_base(u16 selector)
1140
1122
}
1141
1123
#endif
1142
1124
1143
- static void vmx_prepare_switch_to_guest (struct kvm_vcpu * vcpu )
1125
+ void vmx_prepare_switch_to_guest (struct kvm_vcpu * vcpu )
1144
1126
{
1145
1127
struct vcpu_vmx * vmx = to_vmx (vcpu );
1146
1128
struct vmcs_host_state * host_state ;
@@ -1338,7 +1320,7 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
1338
1320
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
1339
1321
* vcpu mutex is already taken.
1340
1322
*/
1341
- static void vmx_vcpu_load (struct kvm_vcpu * vcpu , int cpu )
1323
+ void vmx_vcpu_load (struct kvm_vcpu * vcpu , int cpu )
1342
1324
{
1343
1325
struct vcpu_vmx * vmx = to_vmx (vcpu );
1344
1326
bool already_loaded = vmx -> loaded_vmcs -> cpu == cpu ;
@@ -1419,7 +1401,7 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
1419
1401
pi_set_sn (pi_desc );
1420
1402
}
1421
1403
1422
- static void vmx_vcpu_put (struct kvm_vcpu * vcpu )
1404
+ void vmx_vcpu_put (struct kvm_vcpu * vcpu )
1423
1405
{
1424
1406
vmx_vcpu_pi_put (vcpu );
1425
1407
@@ -1449,7 +1431,7 @@ static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
1449
1431
(fields -> cr4_read_shadow & fields -> cr4_guest_host_mask );
1450
1432
}
1451
1433
1452
- static unsigned long vmx_get_rflags (struct kvm_vcpu * vcpu )
1434
+ unsigned long vmx_get_rflags (struct kvm_vcpu * vcpu )
1453
1435
{
1454
1436
unsigned long rflags , save_rflags ;
1455
1437
@@ -1466,7 +1448,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1466
1448
return to_vmx (vcpu )-> rflags ;
1467
1449
}
1468
1450
1469
- static void vmx_set_rflags (struct kvm_vcpu * vcpu , unsigned long rflags )
1451
+ void vmx_set_rflags (struct kvm_vcpu * vcpu , unsigned long rflags )
1470
1452
{
1471
1453
unsigned long old_rflags = vmx_get_rflags (vcpu );
1472
1454
@@ -1482,7 +1464,7 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1482
1464
to_vmx (vcpu )-> emulation_required = emulation_required (vcpu );
1483
1465
}
1484
1466
1485
- static u32 vmx_get_interrupt_shadow (struct kvm_vcpu * vcpu )
1467
+ u32 vmx_get_interrupt_shadow (struct kvm_vcpu * vcpu )
1486
1468
{
1487
1469
u32 interruptibility = vmcs_read32 (GUEST_INTERRUPTIBILITY_INFO );
1488
1470
int ret = 0 ;
@@ -1495,7 +1477,7 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1495
1477
return ret ;
1496
1478
}
1497
1479
1498
- static void vmx_set_interrupt_shadow (struct kvm_vcpu * vcpu , int mask )
1480
+ void vmx_set_interrupt_shadow (struct kvm_vcpu * vcpu , int mask )
1499
1481
{
1500
1482
u32 interruptibility_old = vmcs_read32 (GUEST_INTERRUPTIBILITY_INFO );
1501
1483
u32 interruptibility = interruptibility_old ;
@@ -3291,7 +3273,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
3291
3273
kvm_mmu_reset_context (vcpu );
3292
3274
}
3293
3275
3294
- static void vmx_set_efer (struct kvm_vcpu * vcpu , u64 efer )
3276
+ void vmx_set_efer (struct kvm_vcpu * vcpu , u64 efer )
3295
3277
{
3296
3278
struct vcpu_vmx * vmx = to_vmx (vcpu );
3297
3279
struct shared_msr_entry * msr = find_msr_entry (vmx , MSR_EFER );
@@ -3391,7 +3373,7 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3391
3373
}
3392
3374
}
3393
3375
3394
- static void ept_save_pdptrs (struct kvm_vcpu * vcpu )
3376
+ void ept_save_pdptrs (struct kvm_vcpu * vcpu )
3395
3377
{
3396
3378
struct kvm_mmu * mmu = vcpu -> arch .walk_mmu ;
3397
3379
@@ -3442,8 +3424,6 @@ static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
3442
3424
#define nested_guest_cr4_valid nested_cr4_valid
3443
3425
#define nested_host_cr4_valid nested_cr4_valid
3444
3426
3445
- static int vmx_set_cr4 (struct kvm_vcpu * vcpu , unsigned long cr4 );
3446
-
3447
3427
static void ept_update_paging_mode_cr0 (unsigned long * hw_cr0 ,
3448
3428
unsigned long cr0 ,
3449
3429
struct kvm_vcpu * vcpu )
@@ -3472,7 +3452,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
3472
3452
* hw_cr0 &= ~X86_CR0_WP ;
3473
3453
}
3474
3454
3475
- static void vmx_set_cr0 (struct kvm_vcpu * vcpu , unsigned long cr0 )
3455
+ void vmx_set_cr0 (struct kvm_vcpu * vcpu , unsigned long cr0 )
3476
3456
{
3477
3457
struct vcpu_vmx * vmx = to_vmx (vcpu );
3478
3458
unsigned long hw_cr0 ;
@@ -3531,7 +3511,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
3531
3511
return eptp ;
3532
3512
}
3533
3513
3534
- static void vmx_set_cr3 (struct kvm_vcpu * vcpu , unsigned long cr3 )
3514
+ void vmx_set_cr3 (struct kvm_vcpu * vcpu , unsigned long cr3 )
3535
3515
{
3536
3516
struct kvm * kvm = vcpu -> kvm ;
3537
3517
unsigned long guest_cr3 ;
@@ -3561,7 +3541,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3561
3541
vmcs_writel (GUEST_CR3 , guest_cr3 );
3562
3542
}
3563
3543
3564
- static int vmx_set_cr4 (struct kvm_vcpu * vcpu , unsigned long cr4 )
3544
+ int vmx_set_cr4 (struct kvm_vcpu * vcpu , unsigned long cr4 )
3565
3545
{
3566
3546
/*
3567
3547
* Pass through host's Machine Check Enable value to hw_cr4, which
@@ -3636,8 +3616,7 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3636
3616
return 0 ;
3637
3617
}
3638
3618
3639
- static void vmx_get_segment (struct kvm_vcpu * vcpu ,
3640
- struct kvm_segment * var , int seg )
3619
+ void vmx_get_segment (struct kvm_vcpu * vcpu , struct kvm_segment * var , int seg )
3641
3620
{
3642
3621
struct vcpu_vmx * vmx = to_vmx (vcpu );
3643
3622
u32 ar ;
@@ -3684,7 +3663,7 @@ static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3684
3663
return vmx_read_guest_seg_base (to_vmx (vcpu ), seg );
3685
3664
}
3686
3665
3687
- static int vmx_get_cpl (struct kvm_vcpu * vcpu )
3666
+ int vmx_get_cpl (struct kvm_vcpu * vcpu )
3688
3667
{
3689
3668
struct vcpu_vmx * vmx = to_vmx (vcpu );
3690
3669
@@ -3716,8 +3695,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
3716
3695
return ar ;
3717
3696
}
3718
3697
3719
- static void vmx_set_segment (struct kvm_vcpu * vcpu ,
3720
- struct kvm_segment * var , int seg )
3698
+ void vmx_set_segment (struct kvm_vcpu * vcpu , struct kvm_segment * var , int seg )
3721
3699
{
3722
3700
struct vcpu_vmx * vmx = to_vmx (vcpu );
3723
3701
const struct kvm_vmx_segment_field * sf = & kvm_vmx_segment_fields [seg ];
@@ -4111,7 +4089,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
4111
4089
return r ;
4112
4090
}
4113
4091
4114
- static int allocate_vpid (void )
4092
+ int allocate_vpid (void )
4115
4093
{
4116
4094
int vpid ;
4117
4095
@@ -4127,7 +4105,7 @@ static int allocate_vpid(void)
4127
4105
return vpid ;
4128
4106
}
4129
4107
4130
- static void free_vpid (int vpid )
4108
+ void free_vpid (int vpid )
4131
4109
{
4132
4110
if (!enable_vpid || vpid == 0 )
4133
4111
return ;
@@ -4302,7 +4280,7 @@ static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
4302
4280
}
4303
4281
}
4304
4282
4305
- static void vmx_update_msr_bitmap (struct kvm_vcpu * vcpu )
4283
+ void vmx_update_msr_bitmap (struct kvm_vcpu * vcpu )
4306
4284
{
4307
4285
struct vcpu_vmx * vmx = to_vmx (vcpu );
4308
4286
unsigned long * msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
@@ -4490,7 +4468,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4490
4468
* Note that host-state that does change is set elsewhere. E.g., host-state
4491
4469
* that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4492
4470
*/
4493
- static void vmx_set_constant_host_state (struct vcpu_vmx * vmx )
4471
+ void vmx_set_constant_host_state (struct vcpu_vmx * vmx )
4494
4472
{
4495
4473
u32 low32 , high32 ;
4496
4474
unsigned long tmpl ;
@@ -4550,7 +4528,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4550
4528
vmcs_write64 (HOST_IA32_EFER , host_efer );
4551
4529
}
4552
4530
4553
- static void set_cr4_guest_host_mask (struct vcpu_vmx * vmx )
4531
+ void set_cr4_guest_host_mask (struct vcpu_vmx * vmx )
4554
4532
{
4555
4533
vmx -> vcpu .arch .cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS ;
4556
4534
if (enable_ept )
@@ -5080,7 +5058,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
5080
5058
vmx_clear_hlt (vcpu );
5081
5059
}
5082
5060
5083
- static bool vmx_get_nmi_mask (struct kvm_vcpu * vcpu )
5061
+ bool vmx_get_nmi_mask (struct kvm_vcpu * vcpu )
5084
5062
{
5085
5063
struct vcpu_vmx * vmx = to_vmx (vcpu );
5086
5064
bool masked ;
@@ -5094,7 +5072,7 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
5094
5072
return masked ;
5095
5073
}
5096
5074
5097
- static void vmx_set_nmi_mask (struct kvm_vcpu * vcpu , bool masked )
5075
+ void vmx_set_nmi_mask (struct kvm_vcpu * vcpu , bool masked )
5098
5076
{
5099
5077
struct vcpu_vmx * vmx = to_vmx (vcpu );
5100
5078
@@ -8688,7 +8666,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
8688
8666
vmcs_write32 (TPR_THRESHOLD , irr );
8689
8667
}
8690
8668
8691
- static void vmx_set_virtual_apic_mode (struct kvm_vcpu * vcpu )
8669
+ void vmx_set_virtual_apic_mode (struct kvm_vcpu * vcpu )
8692
8670
{
8693
8671
u32 sec_exec_control ;
8694
8672
0 commit comments