Skip to content

Commit c5f983f

Browse files
whitebrandybonzini
authored andcommitted
nVMX: Implement emulated Page Modification Logging
With EPT A/D enabled, processor access to L2 guest paging structures will result in a write violation. When this happens, write the GUEST_PHYSICAL_ADDRESS to the pml buffer provided by L1 if the access is write and the dirty bit is being set. This patch also adds necessary checks during VMEntry if L1 has enabled PML. If the PML index overflows, we change the exit reason and run L1 to simulate a PML full event. Signed-off-by: Bandan Das <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent bab4165 commit c5f983f

File tree

1 file changed

+79
-2
lines changed

1 file changed

+79
-2
lines changed

arch/x86/kvm/vmx.c

Lines changed: 79 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -248,6 +248,7 @@ struct __packed vmcs12 {
248248
u64 xss_exit_bitmap;
249249
u64 guest_physical_address;
250250
u64 vmcs_link_pointer;
251+
u64 pml_address;
251252
u64 guest_ia32_debugctl;
252253
u64 guest_ia32_pat;
253254
u64 guest_ia32_efer;
@@ -369,6 +370,7 @@ struct __packed vmcs12 {
369370
u16 guest_ldtr_selector;
370371
u16 guest_tr_selector;
371372
u16 guest_intr_status;
373+
u16 guest_pml_index;
372374
u16 host_es_selector;
373375
u16 host_cs_selector;
374376
u16 host_ss_selector;
@@ -407,6 +409,7 @@ struct nested_vmx {
407409
/* Has the level1 guest done vmxon? */
408410
bool vmxon;
409411
gpa_t vmxon_ptr;
412+
bool pml_full;
410413

411414
/* The guest-physical address of the current VMCS L1 keeps for L2 */
412415
gpa_t current_vmptr;
@@ -742,6 +745,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
742745
FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
743746
FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
744747
FIELD(GUEST_INTR_STATUS, guest_intr_status),
748+
FIELD(GUEST_PML_INDEX, guest_pml_index),
745749
FIELD(HOST_ES_SELECTOR, host_es_selector),
746750
FIELD(HOST_CS_SELECTOR, host_cs_selector),
747751
FIELD(HOST_SS_SELECTOR, host_ss_selector),
@@ -767,6 +771,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
767771
FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
768772
FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
769773
FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
774+
FIELD64(PML_ADDRESS, pml_address),
770775
FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
771776
FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
772777
FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
@@ -1353,6 +1358,11 @@ static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
13531358
vmx_xsaves_supported();
13541359
}
13551360

1361+
static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1362+
{
1363+
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1364+
}
1365+
13561366
static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
13571367
{
13581368
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -9369,13 +9379,20 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
93699379
struct x86_exception *fault)
93709380
{
93719381
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9382+
struct vcpu_vmx *vmx = to_vmx(vcpu);
93729383
u32 exit_reason;
9384+
unsigned long exit_qualification = vcpu->arch.exit_qualification;
93739385

9374-
if (fault->error_code & PFERR_RSVD_MASK)
9386+
if (vmx->nested.pml_full) {
9387+
exit_reason = EXIT_REASON_PML_FULL;
9388+
vmx->nested.pml_full = false;
9389+
exit_qualification &= INTR_INFO_UNBLOCK_NMI;
9390+
} else if (fault->error_code & PFERR_RSVD_MASK)
93759391
exit_reason = EXIT_REASON_EPT_MISCONFIG;
93769392
else
93779393
exit_reason = EXIT_REASON_EPT_VIOLATION;
9378-
nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification);
9394+
9395+
nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
93799396
vmcs12->guest_physical_address = fault->address;
93809397
}
93819398

@@ -9718,6 +9735,22 @@ static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
97189735
return 0;
97199736
}
97209737

9738+
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
9739+
struct vmcs12 *vmcs12)
9740+
{
9741+
u64 address = vmcs12->pml_address;
9742+
int maxphyaddr = cpuid_maxphyaddr(vcpu);
9743+
9744+
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) {
9745+
if (!nested_cpu_has_ept(vmcs12) ||
9746+
!IS_ALIGNED(address, 4096) ||
9747+
address >> maxphyaddr)
9748+
return -EINVAL;
9749+
}
9750+
9751+
return 0;
9752+
}
9753+
97219754
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
97229755
struct vmx_msr_entry *e)
97239756
{
@@ -10253,6 +10286,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
1025310286
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
1025410287
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
1025510288

10289+
if (nested_vmx_check_pml_controls(vcpu, vmcs12))
10290+
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
10291+
1025610292
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
1025710293
vmx->nested.nested_vmx_procbased_ctls_low,
1025810294
vmx->nested.nested_vmx_procbased_ctls_high) ||
@@ -11151,6 +11187,46 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
1115111187
kvm_flush_pml_buffers(kvm);
1115211188
}
1115311189

11190+
static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
11191+
{
11192+
struct vmcs12 *vmcs12;
11193+
struct vcpu_vmx *vmx = to_vmx(vcpu);
11194+
gpa_t gpa;
11195+
struct page *page = NULL;
11196+
u64 *pml_address;
11197+
11198+
if (is_guest_mode(vcpu)) {
11199+
WARN_ON_ONCE(vmx->nested.pml_full);
11200+
11201+
/*
11202+
* Check if PML is enabled for the nested guest.
11203+
* Whether eptp bit 6 is set is already checked
11204+
* as part of A/D emulation.
11205+
*/
11206+
vmcs12 = get_vmcs12(vcpu);
11207+
if (!nested_cpu_has_pml(vmcs12))
11208+
return 0;
11209+
11210+
if (vmcs12->guest_pml_index > PML_ENTITY_NUM) {
11211+
vmx->nested.pml_full = true;
11212+
return 1;
11213+
}
11214+
11215+
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
11216+
11217+
page = nested_get_page(vcpu, vmcs12->pml_address);
11218+
if (!page)
11219+
return 0;
11220+
11221+
pml_address = kmap(page);
11222+
pml_address[vmcs12->guest_pml_index--] = gpa;
11223+
kunmap(page);
11224+
nested_release_page_clean(page);
11225+
}
11226+
11227+
return 0;
11228+
}
11229+
1115411230
static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
1115511231
struct kvm_memory_slot *memslot,
1115611232
gfn_t offset, unsigned long mask)
@@ -11510,6 +11586,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
1151011586
.slot_disable_log_dirty = vmx_slot_disable_log_dirty,
1151111587
.flush_log_dirty = vmx_flush_log_dirty,
1151211588
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
11589+
.write_log_dirty = vmx_write_pml_buffer,
1151311590

1151411591
.pre_block = vmx_pre_block,
1151511592
.post_block = vmx_post_block,

0 commit comments

Comments
 (0)