Skip to content

Commit 50b265a

Browse files
Sean Christophersonbonzini
authored andcommitted
KVM: nVMX: Add helper to handle TLB flushes on nested VM-Enter/VM-Exit
Add a helper to determine whether or not a full TLB flush needs to be performed on nested VM-Enter/VM-Exit, as the logic is identical for both flows and needs a fairly beefy comment to boot. This also provides a common point to make future adjustments to the logic. Handle vpid12 changes the new helper as well even though it is specific to VM-Enter. The vpid12 logic is an extension of the flushing logic, and it's worth the extra bool parameter to provide a single location for the flushing logic. Cc: Liran Alon <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 7780938 commit 50b265a

File tree

1 file changed

+44
-44
lines changed

1 file changed

+44
-44
lines changed

arch/x86/kvm/vmx/nested.c

Lines changed: 44 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1132,6 +1132,48 @@ static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
11321132
(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
11331133
}
11341134

1135+
static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
1136+
struct vmcs12 *vmcs12,
1137+
bool is_vmenter)
1138+
{
1139+
struct vcpu_vmx *vmx = to_vmx(vcpu);
1140+
1141+
/*
1142+
* If VPID is disabled, linear and combined mappings are flushed on
1143+
* VM-Enter/VM-Exit, and guest-physical mappings are valid only for
1144+
* their associated EPTP.
1145+
*/
1146+
if (!enable_vpid)
1147+
return;
1148+
1149+
/*
1150+
* If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1151+
* for *all* contexts to be flushed on VM-Enter/VM-Exit.
1152+
*
1153+
* If VPID is enabled and used by vmc12, but L2 does not have a unique
1154+
* TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
1155+
* a VPID for L2, flush the TLB as the effective ASID is common to both
1156+
* L1 and L2.
1157+
*
1158+
* Defer the flush so that it runs after vmcs02.EPTP has been set by
1159+
* KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
1160+
* redundant flushes further down the nested pipeline.
1161+
*
1162+
* If a TLB flush isn't required due to any of the above, and vpid12 is
1163+
* changing then the new "virtual" VPID (vpid12) will reuse the same
1164+
* "real" VPID (vpid02), and so needs to be sync'd. There is no direct
1165+
* mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
1166+
* all nested vCPUs.
1167+
*/
1168+
if (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu)) {
1169+
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1170+
} else if (is_vmenter &&
1171+
vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1172+
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1173+
vpid_sync_context(nested_get_vpid02(vcpu));
1174+
}
1175+
}
1176+
11351177
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
11361178
{
11371179
superset &= mask;
@@ -2440,32 +2482,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
24402482
if (kvm_has_tsc_control)
24412483
decache_tsc_multiplier(vmx);
24422484

2443-
if (enable_vpid) {
2444-
/*
2445-
* There is no direct mapping between vpid02 and vpid12, the
2446-
* vpid02 is per-vCPU for L0 and reused while the value of
2447-
* vpid12 is changed w/ one invvpid during nested vmentry.
2448-
* The vpid12 is allocated by L1 for L2, so it will not
2449-
* influence global bitmap(for vpid01 and vpid02 allocation)
2450-
* even if spawn a lot of nested vCPUs.
2451-
*/
2452-
if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
2453-
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
2454-
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
2455-
vpid_sync_context(nested_get_vpid02(vcpu));
2456-
}
2457-
} else {
2458-
/*
2459-
* If L1 use EPT, then L0 needs to execute INVEPT on
2460-
* EPTP02 instead of EPTP01. Therefore, delay TLB
2461-
* flush until vmcs02->eptp is fully updated by
2462-
* KVM_REQ_LOAD_MMU_PGD. Note that this assumes
2463-
* KVM_REQ_TLB_FLUSH is evaluated after
2464-
* KVM_REQ_LOAD_MMU_PGD in vcpu_enter_guest().
2465-
*/
2466-
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2467-
}
2468-
}
2485+
nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
24692486

24702487
if (nested_cpu_has_ept(vmcs12))
24712488
nested_ept_init_mmu_context(vcpu);
@@ -4033,24 +4050,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
40334050
if (!enable_ept)
40344051
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
40354052

4036-
/*
4037-
* If vmcs01 doesn't use VPID, CPU flushes TLB on every
4038-
* VMEntry/VMExit. Thus, no need to flush TLB.
4039-
*
4040-
* If vmcs12 doesn't use VPID, L1 expects TLB to be
4041-
* flushed on every VMEntry/VMExit.
4042-
*
4043-
* Otherwise, we can preserve TLB entries as long as we are
4044-
* able to tag L1 TLB entries differently than L2 TLB entries.
4045-
*
4046-
* If vmcs12 uses EPT, we need to execute this flush on EPTP01
4047-
* and therefore we request the TLB flush to happen only after VMCS EPTP
4048-
* has been set by KVM_REQ_LOAD_MMU_PGD.
4049-
*/
4050-
if (enable_vpid &&
4051-
(!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
4052-
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4053-
}
4053+
nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
40544054

40554055
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
40564056
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);

0 commit comments

Comments
 (0)