@@ -248,6 +248,7 @@ struct __packed vmcs12 {
248
248
u64 xss_exit_bitmap ;
249
249
u64 guest_physical_address ;
250
250
u64 vmcs_link_pointer ;
251
+ u64 pml_address ;
251
252
u64 guest_ia32_debugctl ;
252
253
u64 guest_ia32_pat ;
253
254
u64 guest_ia32_efer ;
@@ -369,6 +370,7 @@ struct __packed vmcs12 {
369
370
u16 guest_ldtr_selector ;
370
371
u16 guest_tr_selector ;
371
372
u16 guest_intr_status ;
373
+ u16 guest_pml_index ;
372
374
u16 host_es_selector ;
373
375
u16 host_cs_selector ;
374
376
u16 host_ss_selector ;
@@ -407,6 +409,7 @@ struct nested_vmx {
407
409
/* Has the level1 guest done vmxon? */
408
410
bool vmxon ;
409
411
gpa_t vmxon_ptr ;
412
+ bool pml_full ;
410
413
411
414
/* The guest-physical address of the current VMCS L1 keeps for L2 */
412
415
gpa_t current_vmptr ;
@@ -742,6 +745,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
742
745
FIELD (GUEST_LDTR_SELECTOR , guest_ldtr_selector ),
743
746
FIELD (GUEST_TR_SELECTOR , guest_tr_selector ),
744
747
FIELD (GUEST_INTR_STATUS , guest_intr_status ),
748
+ FIELD (GUEST_PML_INDEX , guest_pml_index ),
745
749
FIELD (HOST_ES_SELECTOR , host_es_selector ),
746
750
FIELD (HOST_CS_SELECTOR , host_cs_selector ),
747
751
FIELD (HOST_SS_SELECTOR , host_ss_selector ),
@@ -767,6 +771,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
767
771
FIELD64 (XSS_EXIT_BITMAP , xss_exit_bitmap ),
768
772
FIELD64 (GUEST_PHYSICAL_ADDRESS , guest_physical_address ),
769
773
FIELD64 (VMCS_LINK_POINTER , vmcs_link_pointer ),
774
+ FIELD64 (PML_ADDRESS , pml_address ),
770
775
FIELD64 (GUEST_IA32_DEBUGCTL , guest_ia32_debugctl ),
771
776
FIELD64 (GUEST_IA32_PAT , guest_ia32_pat ),
772
777
FIELD64 (GUEST_IA32_EFER , guest_ia32_efer ),
@@ -1353,6 +1358,11 @@ static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1353
1358
vmx_xsaves_supported ();
1354
1359
}
1355
1360
1361
+ static inline bool nested_cpu_has_pml (struct vmcs12 * vmcs12 )
1362
+ {
1363
+ return nested_cpu_has2 (vmcs12 , SECONDARY_EXEC_ENABLE_PML );
1364
+ }
1365
+
1356
1366
static inline bool nested_cpu_has_virt_x2apic_mode (struct vmcs12 * vmcs12 )
1357
1367
{
1358
1368
return nested_cpu_has2 (vmcs12 , SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE );
@@ -9369,13 +9379,20 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
9369
9379
struct x86_exception * fault )
9370
9380
{
9371
9381
struct vmcs12 * vmcs12 = get_vmcs12 (vcpu );
9382
+ struct vcpu_vmx * vmx = to_vmx (vcpu );
9372
9383
u32 exit_reason ;
9384
+ unsigned long exit_qualification = vcpu -> arch .exit_qualification ;
9373
9385
9374
- if (fault -> error_code & PFERR_RSVD_MASK )
9386
+ if (vmx -> nested .pml_full ) {
9387
+ exit_reason = EXIT_REASON_PML_FULL ;
9388
+ vmx -> nested .pml_full = false;
9389
+ exit_qualification &= INTR_INFO_UNBLOCK_NMI ;
9390
+ } else if (fault -> error_code & PFERR_RSVD_MASK )
9375
9391
exit_reason = EXIT_REASON_EPT_MISCONFIG ;
9376
9392
else
9377
9393
exit_reason = EXIT_REASON_EPT_VIOLATION ;
9378
- nested_vmx_vmexit (vcpu , exit_reason , 0 , vcpu -> arch .exit_qualification );
9394
+
9395
+ nested_vmx_vmexit (vcpu , exit_reason , 0 , exit_qualification );
9379
9396
vmcs12 -> guest_physical_address = fault -> address ;
9380
9397
}
9381
9398
@@ -9718,6 +9735,22 @@ static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
9718
9735
return 0 ;
9719
9736
}
9720
9737
9738
+ static int nested_vmx_check_pml_controls (struct kvm_vcpu * vcpu ,
9739
+ struct vmcs12 * vmcs12 )
9740
+ {
9741
+ u64 address = vmcs12 -> pml_address ;
9742
+ int maxphyaddr = cpuid_maxphyaddr (vcpu );
9743
+
9744
+ if (nested_cpu_has2 (vmcs12 , SECONDARY_EXEC_ENABLE_PML )) {
9745
+ if (!nested_cpu_has_ept (vmcs12 ) ||
9746
+ !IS_ALIGNED (address , 4096 ) ||
9747
+ address >> maxphyaddr )
9748
+ return - EINVAL ;
9749
+ }
9750
+
9751
+ return 0 ;
9752
+ }
9753
+
9721
9754
static int nested_vmx_msr_check_common (struct kvm_vcpu * vcpu ,
9722
9755
struct vmx_msr_entry * e )
9723
9756
{
@@ -10253,6 +10286,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
10253
10286
if (nested_vmx_check_msr_switch_controls (vcpu , vmcs12 ))
10254
10287
return VMXERR_ENTRY_INVALID_CONTROL_FIELD ;
10255
10288
10289
+ if (nested_vmx_check_pml_controls (vcpu , vmcs12 ))
10290
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD ;
10291
+
10256
10292
if (!vmx_control_verify (vmcs12 -> cpu_based_vm_exec_control ,
10257
10293
vmx -> nested .nested_vmx_procbased_ctls_low ,
10258
10294
vmx -> nested .nested_vmx_procbased_ctls_high ) ||
@@ -11151,6 +11187,46 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
11151
11187
kvm_flush_pml_buffers (kvm );
11152
11188
}
11153
11189
11190
+ static int vmx_write_pml_buffer (struct kvm_vcpu * vcpu )
11191
+ {
11192
+ struct vmcs12 * vmcs12 ;
11193
+ struct vcpu_vmx * vmx = to_vmx (vcpu );
11194
+ gpa_t gpa ;
11195
+ struct page * page = NULL ;
11196
+ u64 * pml_address ;
11197
+
11198
+ if (is_guest_mode (vcpu )) {
11199
+ WARN_ON_ONCE (vmx -> nested .pml_full );
11200
+
11201
+ /*
11202
+ * Check if PML is enabled for the nested guest.
11203
+ * Whether eptp bit 6 is set is already checked
11204
+ * as part of A/D emulation.
11205
+ */
11206
+ vmcs12 = get_vmcs12 (vcpu );
11207
+ if (!nested_cpu_has_pml (vmcs12 ))
11208
+ return 0 ;
11209
+
11210
+ if (vmcs12 -> guest_pml_index > PML_ENTITY_NUM ) {
11211
+ vmx -> nested .pml_full = true;
11212
+ return 1 ;
11213
+ }
11214
+
11215
+ gpa = vmcs_read64 (GUEST_PHYSICAL_ADDRESS ) & ~0xFFFull ;
11216
+
11217
+ page = nested_get_page (vcpu , vmcs12 -> pml_address );
11218
+ if (!page )
11219
+ return 0 ;
11220
+
11221
+ pml_address = kmap (page );
11222
+ pml_address [vmcs12 -> guest_pml_index -- ] = gpa ;
11223
+ kunmap (page );
11224
+ nested_release_page_clean (page );
11225
+ }
11226
+
11227
+ return 0 ;
11228
+ }
11229
+
11154
11230
static void vmx_enable_log_dirty_pt_masked (struct kvm * kvm ,
11155
11231
struct kvm_memory_slot * memslot ,
11156
11232
gfn_t offset , unsigned long mask )
@@ -11510,6 +11586,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
11510
11586
.slot_disable_log_dirty = vmx_slot_disable_log_dirty ,
11511
11587
.flush_log_dirty = vmx_flush_log_dirty ,
11512
11588
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked ,
11589
+ .write_log_dirty = vmx_write_pml_buffer ,
11513
11590
11514
11591
.pre_block = vmx_pre_block ,
11515
11592
.post_block = vmx_post_block ,
0 commit comments