Skip to content

Commit 97b7ead

Browse files
Sean Christophersonbonzini
authored andcommitted
KVM: VMX: Expose various getters and setters to nested VMX
...as they're used directly by the nested code. This will allow moving the bulk of the nested code out of vmx.c without concurrent changes to vmx.h. Signed-off-by: Sean Christopherson <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent cf3646e commit 97b7ead

File tree

2 files changed

+53
-48
lines changed

2 files changed

+53
-48
lines changed

arch/x86/kvm/vmx/vmx.c

Lines changed: 26 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -365,18 +365,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
365365

366366
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
367367
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
368-
static void vmx_set_segment(struct kvm_vcpu *vcpu,
369-
struct kvm_segment *var, int seg);
370-
static void vmx_get_segment(struct kvm_vcpu *vcpu,
371-
struct kvm_segment *var, int seg);
372368
static bool guest_state_valid(struct kvm_vcpu *vcpu);
373369
static u32 vmx_segment_access_rights(struct kvm_segment *var);
374370
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
375-
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
376-
static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
377371
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
378372
u16 error_code);
379-
static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
380373
static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
381374
u32 msr, int type);
382375

@@ -438,8 +431,6 @@ static const struct kvm_vmx_segment_field {
438431

439432
u64 host_efer;
440433

441-
static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
442-
443434
/*
444435
* Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
445436
* away by decrementing the array size.
@@ -687,7 +678,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
687678
u32 exit_intr_info,
688679
unsigned long exit_qualification);
689680

690-
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
681+
static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
691682
{
692683
int i;
693684

@@ -697,7 +688,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
697688
return -1;
698689
}
699690

700-
static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
691+
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
701692
{
702693
int i;
703694

@@ -707,15 +698,6 @@ static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
707698
return NULL;
708699
}
709700

710-
void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
711-
{
712-
vmcs_clear(loaded_vmcs->vmcs);
713-
if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
714-
vmcs_clear(loaded_vmcs->shadow_vmcs);
715-
loaded_vmcs->cpu = -1;
716-
loaded_vmcs->launched = 0;
717-
}
718-
719701
#ifdef CONFIG_KEXEC_CORE
720702
/*
721703
* This bitmap is used to indicate whether the vmclear
@@ -840,7 +822,7 @@ static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
840822
return *p;
841823
}
842824

843-
static void update_exception_bitmap(struct kvm_vcpu *vcpu)
825+
void update_exception_bitmap(struct kvm_vcpu *vcpu)
844826
{
845827
u32 eb;
846828

@@ -1140,7 +1122,7 @@ static unsigned long segment_base(u16 selector)
11401122
}
11411123
#endif
11421124

1143-
static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1125+
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
11441126
{
11451127
struct vcpu_vmx *vmx = to_vmx(vcpu);
11461128
struct vmcs_host_state *host_state;
@@ -1338,7 +1320,7 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
13381320
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
13391321
* vcpu mutex is already taken.
13401322
*/
1341-
static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1323+
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
13421324
{
13431325
struct vcpu_vmx *vmx = to_vmx(vcpu);
13441326
bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
@@ -1419,7 +1401,7 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
14191401
pi_set_sn(pi_desc);
14201402
}
14211403

1422-
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1404+
void vmx_vcpu_put(struct kvm_vcpu *vcpu)
14231405
{
14241406
vmx_vcpu_pi_put(vcpu);
14251407

@@ -1449,7 +1431,7 @@ static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
14491431
(fields->cr4_read_shadow & fields->cr4_guest_host_mask);
14501432
}
14511433

1452-
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1434+
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
14531435
{
14541436
unsigned long rflags, save_rflags;
14551437

@@ -1466,7 +1448,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
14661448
return to_vmx(vcpu)->rflags;
14671449
}
14681450

1469-
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1451+
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
14701452
{
14711453
unsigned long old_rflags = vmx_get_rflags(vcpu);
14721454

@@ -1482,7 +1464,7 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
14821464
to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
14831465
}
14841466

1485-
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1467+
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
14861468
{
14871469
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
14881470
int ret = 0;
@@ -1495,7 +1477,7 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
14951477
return ret;
14961478
}
14971479

1498-
static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1480+
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
14991481
{
15001482
u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
15011483
u32 interruptibility = interruptibility_old;
@@ -3291,7 +3273,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
32913273
kvm_mmu_reset_context(vcpu);
32923274
}
32933275

3294-
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3276+
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
32953277
{
32963278
struct vcpu_vmx *vmx = to_vmx(vcpu);
32973279
struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
@@ -3391,7 +3373,7 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
33913373
}
33923374
}
33933375

3394-
static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3376+
void ept_save_pdptrs(struct kvm_vcpu *vcpu)
33953377
{
33963378
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
33973379

@@ -3442,8 +3424,6 @@ static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
34423424
#define nested_guest_cr4_valid nested_cr4_valid
34433425
#define nested_host_cr4_valid nested_cr4_valid
34443426

3445-
static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3446-
34473427
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
34483428
unsigned long cr0,
34493429
struct kvm_vcpu *vcpu)
@@ -3472,7 +3452,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
34723452
*hw_cr0 &= ~X86_CR0_WP;
34733453
}
34743454

3475-
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3455+
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
34763456
{
34773457
struct vcpu_vmx *vmx = to_vmx(vcpu);
34783458
unsigned long hw_cr0;
@@ -3531,7 +3511,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
35313511
return eptp;
35323512
}
35333513

3534-
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3514+
void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
35353515
{
35363516
struct kvm *kvm = vcpu->kvm;
35373517
unsigned long guest_cr3;
@@ -3561,7 +3541,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
35613541
vmcs_writel(GUEST_CR3, guest_cr3);
35623542
}
35633543

3564-
static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3544+
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
35653545
{
35663546
/*
35673547
* Pass through host's Machine Check Enable value to hw_cr4, which
@@ -3636,8 +3616,7 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
36363616
return 0;
36373617
}
36383618

3639-
static void vmx_get_segment(struct kvm_vcpu *vcpu,
3640-
struct kvm_segment *var, int seg)
3619+
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
36413620
{
36423621
struct vcpu_vmx *vmx = to_vmx(vcpu);
36433622
u32 ar;
@@ -3684,7 +3663,7 @@ static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
36843663
return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
36853664
}
36863665

3687-
static int vmx_get_cpl(struct kvm_vcpu *vcpu)
3666+
int vmx_get_cpl(struct kvm_vcpu *vcpu)
36883667
{
36893668
struct vcpu_vmx *vmx = to_vmx(vcpu);
36903669

@@ -3716,8 +3695,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
37163695
return ar;
37173696
}
37183697

3719-
static void vmx_set_segment(struct kvm_vcpu *vcpu,
3720-
struct kvm_segment *var, int seg)
3698+
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
37213699
{
37223700
struct vcpu_vmx *vmx = to_vmx(vcpu);
37233701
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -4111,7 +4089,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
41114089
return r;
41124090
}
41134091

4114-
static int allocate_vpid(void)
4092+
int allocate_vpid(void)
41154093
{
41164094
int vpid;
41174095

@@ -4127,7 +4105,7 @@ static int allocate_vpid(void)
41274105
return vpid;
41284106
}
41294107

4130-
static void free_vpid(int vpid)
4108+
void free_vpid(int vpid)
41314109
{
41324110
if (!enable_vpid || vpid == 0)
41334111
return;
@@ -4302,7 +4280,7 @@ static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
43024280
}
43034281
}
43044282

4305-
static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
4283+
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
43064284
{
43074285
struct vcpu_vmx *vmx = to_vmx(vcpu);
43084286
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
@@ -4490,7 +4468,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
44904468
* Note that host-state that does change is set elsewhere. E.g., host-state
44914469
* that is set differently for each CPU is set in vmx_vcpu_load(), not here.
44924470
*/
4493-
static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4471+
void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
44944472
{
44954473
u32 low32, high32;
44964474
unsigned long tmpl;
@@ -4550,7 +4528,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
45504528
vmcs_write64(HOST_IA32_EFER, host_efer);
45514529
}
45524530

4553-
static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4531+
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
45544532
{
45554533
vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
45564534
if (enable_ept)
@@ -5080,7 +5058,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
50805058
vmx_clear_hlt(vcpu);
50815059
}
50825060

5083-
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
5061+
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
50845062
{
50855063
struct vcpu_vmx *vmx = to_vmx(vcpu);
50865064
bool masked;
@@ -5094,7 +5072,7 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
50945072
return masked;
50955073
}
50965074

5097-
static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5075+
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
50985076
{
50995077
struct vcpu_vmx *vmx = to_vmx(vcpu);
51005078

@@ -8688,7 +8666,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
86888666
vmcs_write32(TPR_THRESHOLD, irr);
86898667
}
86908668

8691-
static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
8669+
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
86928670
{
86938671
u32 sec_exec_control;
86948672

arch/x86/kvm/vmx/vmx.h

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,33 @@ struct kvm_vmx {
267267
spinlock_t ept_pointer_lock;
268268
};
269269

270+
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
271+
void vmx_vcpu_put(struct kvm_vcpu *vcpu);
272+
int allocate_vpid(void);
273+
void free_vpid(int vpid);
274+
void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
275+
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
276+
int vmx_get_cpl(struct kvm_vcpu *vcpu);
277+
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
278+
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
279+
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
280+
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
281+
void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
282+
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
283+
void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
284+
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
285+
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
286+
void ept_save_pdptrs(struct kvm_vcpu *vcpu);
287+
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
288+
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
289+
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
290+
void update_exception_bitmap(struct kvm_vcpu *vcpu);
291+
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
292+
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
293+
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
294+
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
295+
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
296+
270297
#define POSTED_INTR_ON 0
271298
#define POSTED_INTR_SN 1
272299

0 commit comments

Comments
 (0)