Skip to content

Commit 761e416

Browse files
Krish Sadhukhanbonzini
authored andcommitted
KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests
According to section "Canonicalization and Consistency Checks" in APM vol. 2 the following guest state is illegal: "Any MBZ bit of CR3 is set." "Any MBZ bit of CR4 is set." Suggeted-by: Paolo Bonzini <[email protected]> Signed-off-by: Krish Sadhukhan <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 53efe52 commit 761e416

File tree

4 files changed

+31
-4
lines changed

4 files changed

+31
-4
lines changed

arch/x86/kvm/svm/nested.c

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -222,8 +222,9 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
222222
return true;
223223
}
224224

225-
static bool nested_vmcb_checks(struct vmcb *vmcb)
225+
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
226226
{
227+
bool nested_vmcb_lma;
227228
if ((vmcb->save.efer & EFER_SVME) == 0)
228229
return false;
229230

@@ -234,6 +235,27 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
234235
if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7))
235236
return false;
236237

238+
nested_vmcb_lma =
239+
(vmcb->save.efer & EFER_LME) &&
240+
(vmcb->save.cr0 & X86_CR0_PG);
241+
242+
if (!nested_vmcb_lma) {
243+
if (vmcb->save.cr4 & X86_CR4_PAE) {
244+
if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
245+
return false;
246+
} else {
247+
if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
248+
return false;
249+
}
250+
} else {
251+
if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
252+
!(vmcb->save.cr0 & X86_CR0_PE) ||
253+
(vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
254+
return false;
255+
}
256+
if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
257+
return false;
258+
237259
return nested_vmcb_check_controls(&vmcb->control);
238260
}
239261

@@ -419,7 +441,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
419441

420442
nested_vmcb = map.hva;
421443

422-
if (!nested_vmcb_checks(nested_vmcb)) {
444+
if (!nested_vmcb_checks(svm, nested_vmcb)) {
423445
nested_vmcb->control.exit_code = SVM_EXIT_ERR;
424446
nested_vmcb->control.exit_code_hi = 0;
425447
nested_vmcb->control.exit_info_1 = 0;

arch/x86/kvm/svm/svm.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,10 @@ static inline bool gif_set(struct vcpu_svm *svm)
343343
}
344344

345345
/* svm.c */
346-
#define MSR_INVALID 0xffffffffU
346+
#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
347+
#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
348+
#define MSR_CR3_LONG_RESERVED_MASK 0xfff0000000000fe7U
349+
#define MSR_INVALID 0xffffffffU
347350

348351
u32 svm_msrpm_offset(u32 msr);
349352
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);

arch/x86/kvm/x86.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -955,7 +955,7 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
955955
}
956956
EXPORT_SYMBOL_GPL(kvm_set_xcr);
957957

958-
static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
958+
int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
959959
{
960960
if (cr4 & cr4_reserved_bits)
961961
return -EINVAL;
@@ -965,6 +965,7 @@ static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
965965

966966
return 0;
967967
}
968+
EXPORT_SYMBOL_GPL(kvm_valid_cr4);
968969

969970
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
970971
{

arch/x86/kvm/x86.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -369,6 +369,7 @@ static inline bool kvm_dr6_valid(u64 data)
369369
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
370370
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
371371
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
372+
int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
372373
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
373374

374375
#define KVM_MSR_RET_INVALID 2

0 commit comments

Comments
 (0)