Skip to content

Commit 82a11e9

Browse files
whitebrandybonzini
authored andcommitted
KVM: SVM: Add emulation support for #GP triggered by SVM instructions
While running SVM related instructions (VMRUN/VMSAVE/VMLOAD), some AMD CPUs check EAX against reserved memory regions (e.g. SMM memory on host) before checking VMCB's instruction intercept. If EAX falls into such memory areas, #GP is triggered before VMEXIT. This causes problem under nested virtualization. To solve this problem, KVM needs to trap #GP and check the instructions triggering #GP. For VM execution instructions, KVM emulates these instructions. Co-developed-by: Wei Huang <[email protected]> Signed-off-by: Wei Huang <[email protected]> Signed-off-by: Bandan Das <[email protected]> Message-Id: <[email protected]> [Conditionally enable #GP intercept. - Paolo] Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 4aa2691 commit 82a11e9

File tree

1 file changed

+91
-18
lines changed

1 file changed

+91
-18
lines changed

arch/x86/kvm/svm/svm.c

Lines changed: 91 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,8 @@ module_param(sev_es, int, 0444);
200200
bool __read_mostly dump_invalid_vmcb;
201201
module_param(dump_invalid_vmcb, bool, 0644);
202202

203+
bool svm_gp_erratum_intercept = true;
204+
203205
static u8 rsm_ins_bytes[] = "\x0f\xaa";
204206

205207
static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -288,6 +290,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
288290
if (!(efer & EFER_SVME)) {
289291
svm_leave_nested(svm);
290292
svm_set_gif(svm, true);
293+
/* #GP intercept is still needed for vmware backdoor */
294+
if (!enable_vmware_backdoor)
295+
clr_exception_intercept(svm, GP_VECTOR);
291296

292297
/*
293298
* Free the nested guest state, unless we are in SMM.
@@ -304,6 +309,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
304309
vcpu->arch.efer = old_efer;
305310
return ret;
306311
}
312+
313+
if (svm_gp_erratum_intercept)
314+
set_exception_intercept(svm, GP_VECTOR);
307315
}
308316
}
309317

@@ -1962,24 +1970,6 @@ static int ac_interception(struct vcpu_svm *svm)
19621970
return 1;
19631971
}
19641972

1965-
static int gp_interception(struct vcpu_svm *svm)
1966-
{
1967-
struct kvm_vcpu *vcpu = &svm->vcpu;
1968-
u32 error_code = svm->vmcb->control.exit_info_1;
1969-
1970-
WARN_ON_ONCE(!enable_vmware_backdoor);
1971-
1972-
/*
1973-
* VMware backdoor emulation on #GP interception only handles IN{S},
1974-
* OUT{S}, and RDPMC, none of which generate a non-zero error code.
1975-
*/
1976-
if (error_code) {
1977-
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1978-
return 1;
1979-
}
1980-
return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
1981-
}
1982-
19831973
static bool is_erratum_383(void)
19841974
{
19851975
int err, i;
@@ -2178,6 +2168,89 @@ static int vmrun_interception(struct vcpu_svm *svm)
21782168
return nested_svm_vmrun(svm);
21792169
}
21802170

2171+
enum {
2172+
NONE_SVM_INSTR,
2173+
SVM_INSTR_VMRUN,
2174+
SVM_INSTR_VMLOAD,
2175+
SVM_INSTR_VMSAVE,
2176+
};
2177+
2178+
/* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2179+
static int svm_instr_opcode(struct kvm_vcpu *vcpu)
2180+
{
2181+
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
2182+
2183+
if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
2184+
return NONE_SVM_INSTR;
2185+
2186+
switch (ctxt->modrm) {
2187+
case 0xd8: /* VMRUN */
2188+
return SVM_INSTR_VMRUN;
2189+
case 0xda: /* VMLOAD */
2190+
return SVM_INSTR_VMLOAD;
2191+
case 0xdb: /* VMSAVE */
2192+
return SVM_INSTR_VMSAVE;
2193+
default:
2194+
break;
2195+
}
2196+
2197+
return NONE_SVM_INSTR;
2198+
}
2199+
2200+
static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
2201+
{
2202+
int (*const svm_instr_handlers[])(struct vcpu_svm *svm) = {
2203+
[SVM_INSTR_VMRUN] = vmrun_interception,
2204+
[SVM_INSTR_VMLOAD] = vmload_interception,
2205+
[SVM_INSTR_VMSAVE] = vmsave_interception,
2206+
};
2207+
struct vcpu_svm *svm = to_svm(vcpu);
2208+
2209+
return svm_instr_handlers[opcode](svm);
2210+
}
2211+
2212+
/*
2213+
* #GP handling code. Note that #GP can be triggered under the following two
2214+
* cases:
2215+
* 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2216+
* some AMD CPUs when EAX of these instructions are in the reserved memory
2217+
* regions (e.g. SMM memory on host).
2218+
* 2) VMware backdoor
2219+
*/
2220+
static int gp_interception(struct vcpu_svm *svm)
2221+
{
2222+
struct kvm_vcpu *vcpu = &svm->vcpu;
2223+
u32 error_code = svm->vmcb->control.exit_info_1;
2224+
int opcode;
2225+
2226+
/* Both #GP cases have zero error_code */
2227+
if (error_code)
2228+
goto reinject;
2229+
2230+
/* Decode the instruction for usage later */
2231+
if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
2232+
goto reinject;
2233+
2234+
opcode = svm_instr_opcode(vcpu);
2235+
2236+
if (opcode == NONE_SVM_INSTR) {
2237+
if (!enable_vmware_backdoor)
2238+
goto reinject;
2239+
2240+
/*
2241+
* VMware backdoor emulation on #GP interception only handles
2242+
* IN{S}, OUT{S}, and RDPMC.
2243+
*/
2244+
return kvm_emulate_instruction(vcpu,
2245+
EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2246+
} else
2247+
return emulate_svm_instr(vcpu, opcode);
2248+
2249+
reinject:
2250+
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2251+
return 1;
2252+
}
2253+
21812254
void svm_set_gif(struct vcpu_svm *svm, bool value)
21822255
{
21832256
if (value) {

0 commit comments

Comments
 (0)