@@ -200,6 +200,8 @@ module_param(sev_es, int, 0444);
200
200
bool __read_mostly dump_invalid_vmcb ;
201
201
module_param (dump_invalid_vmcb , bool , 0644 );
202
202
203
+ bool svm_gp_erratum_intercept = true;
204
+
203
205
static u8 rsm_ins_bytes [] = "\x0f\xaa" ;
204
206
205
207
static void svm_complete_interrupts (struct vcpu_svm * svm );
@@ -288,6 +290,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
288
290
if (!(efer & EFER_SVME )) {
289
291
svm_leave_nested (svm );
290
292
svm_set_gif (svm , true);
293
+ /* #GP intercept is still needed for vmware backdoor */
294
+ if (!enable_vmware_backdoor )
295
+ clr_exception_intercept (svm , GP_VECTOR );
291
296
292
297
/*
293
298
* Free the nested guest state, unless we are in SMM.
@@ -304,6 +309,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
304
309
vcpu -> arch .efer = old_efer ;
305
310
return ret ;
306
311
}
312
+
313
+ if (svm_gp_erratum_intercept )
314
+ set_exception_intercept (svm , GP_VECTOR );
307
315
}
308
316
}
309
317
@@ -1962,24 +1970,6 @@ static int ac_interception(struct vcpu_svm *svm)
1962
1970
return 1 ;
1963
1971
}
1964
1972
1965
- static int gp_interception (struct vcpu_svm * svm )
1966
- {
1967
- struct kvm_vcpu * vcpu = & svm -> vcpu ;
1968
- u32 error_code = svm -> vmcb -> control .exit_info_1 ;
1969
-
1970
- WARN_ON_ONCE (!enable_vmware_backdoor );
1971
-
1972
- /*
1973
- * VMware backdoor emulation on #GP interception only handles IN{S},
1974
- * OUT{S}, and RDPMC, none of which generate a non-zero error code.
1975
- */
1976
- if (error_code ) {
1977
- kvm_queue_exception_e (vcpu , GP_VECTOR , error_code );
1978
- return 1 ;
1979
- }
1980
- return kvm_emulate_instruction (vcpu , EMULTYPE_VMWARE_GP );
1981
- }
1982
-
1983
1973
static bool is_erratum_383 (void )
1984
1974
{
1985
1975
int err , i ;
@@ -2178,6 +2168,89 @@ static int vmrun_interception(struct vcpu_svm *svm)
2178
2168
return nested_svm_vmrun (svm );
2179
2169
}
2180
2170
2171
+ enum {
2172
+ NONE_SVM_INSTR ,
2173
+ SVM_INSTR_VMRUN ,
2174
+ SVM_INSTR_VMLOAD ,
2175
+ SVM_INSTR_VMSAVE ,
2176
+ };
2177
+
2178
+ /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2179
+ static int svm_instr_opcode (struct kvm_vcpu * vcpu )
2180
+ {
2181
+ struct x86_emulate_ctxt * ctxt = vcpu -> arch .emulate_ctxt ;
2182
+
2183
+ if (ctxt -> b != 0x1 || ctxt -> opcode_len != 2 )
2184
+ return NONE_SVM_INSTR ;
2185
+
2186
+ switch (ctxt -> modrm ) {
2187
+ case 0xd8 : /* VMRUN */
2188
+ return SVM_INSTR_VMRUN ;
2189
+ case 0xda : /* VMLOAD */
2190
+ return SVM_INSTR_VMLOAD ;
2191
+ case 0xdb : /* VMSAVE */
2192
+ return SVM_INSTR_VMSAVE ;
2193
+ default :
2194
+ break ;
2195
+ }
2196
+
2197
+ return NONE_SVM_INSTR ;
2198
+ }
2199
+
2200
+ static int emulate_svm_instr (struct kvm_vcpu * vcpu , int opcode )
2201
+ {
2202
+ int (* const svm_instr_handlers [])(struct vcpu_svm * svm ) = {
2203
+ [SVM_INSTR_VMRUN ] = vmrun_interception ,
2204
+ [SVM_INSTR_VMLOAD ] = vmload_interception ,
2205
+ [SVM_INSTR_VMSAVE ] = vmsave_interception ,
2206
+ };
2207
+ struct vcpu_svm * svm = to_svm (vcpu );
2208
+
2209
+ return svm_instr_handlers [opcode ](svm );
2210
+ }
2211
+
2212
+ /*
2213
+ * #GP handling code. Note that #GP can be triggered under the following two
2214
+ * cases:
2215
+ * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2216
+ * some AMD CPUs when EAX of these instructions are in the reserved memory
2217
+ * regions (e.g. SMM memory on host).
2218
+ * 2) VMware backdoor
2219
+ */
2220
+ static int gp_interception (struct vcpu_svm * svm )
2221
+ {
2222
+ struct kvm_vcpu * vcpu = & svm -> vcpu ;
2223
+ u32 error_code = svm -> vmcb -> control .exit_info_1 ;
2224
+ int opcode ;
2225
+
2226
+ /* Both #GP cases have zero error_code */
2227
+ if (error_code )
2228
+ goto reinject ;
2229
+
2230
+ /* Decode the instruction for usage later */
2231
+ if (x86_decode_emulated_instruction (vcpu , 0 , NULL , 0 ) != EMULATION_OK )
2232
+ goto reinject ;
2233
+
2234
+ opcode = svm_instr_opcode (vcpu );
2235
+
2236
+ if (opcode == NONE_SVM_INSTR ) {
2237
+ if (!enable_vmware_backdoor )
2238
+ goto reinject ;
2239
+
2240
+ /*
2241
+ * VMware backdoor emulation on #GP interception only handles
2242
+ * IN{S}, OUT{S}, and RDPMC.
2243
+ */
2244
+ return kvm_emulate_instruction (vcpu ,
2245
+ EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE );
2246
+ } else
2247
+ return emulate_svm_instr (vcpu , opcode );
2248
+
2249
+ reinject :
2250
+ kvm_queue_exception_e (vcpu , GP_VECTOR , error_code );
2251
+ return 1 ;
2252
+ }
2253
+
2181
2254
void svm_set_gif (struct vcpu_svm * svm , bool value )
2182
2255
{
2183
2256
if (value ) {
0 commit comments