@@ -249,24 +249,22 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
249
249
return 0 ;
250
250
}
251
251
252
- /*
253
- * The function is based on mtrr_type_lookup() in
254
- * arch/x86/kernel/cpu/mtrr/generic.c
255
- */
256
- static int get_mtrr_type (struct kvm_mtrr * mtrr_state ,
257
- u64 start , u64 end )
252
+ u8 kvm_mtrr_get_guest_memory_type (struct kvm_vcpu * vcpu , gfn_t gfn )
258
253
{
259
- u64 base , mask ;
260
- u8 prev_match , curr_match ;
261
- int i , num_var_ranges = KVM_NR_VAR_MTRR ;
254
+ struct kvm_mtrr * mtrr_state = & vcpu -> arch .mtrr_state ;
255
+ u64 base , mask , start ;
256
+ int i , num_var_ranges , type ;
257
+ const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK )
258
+ | (1 << MTRR_TYPE_WRTHROUGH );
259
+
260
+ start = gfn_to_gpa (gfn );
261
+ num_var_ranges = KVM_NR_VAR_MTRR ;
262
+ type = -1 ;
262
263
263
264
/* MTRR is completely disabled, use UC for all of physical memory. */
264
265
if (!mtrr_is_enabled (mtrr_state ))
265
266
return MTRR_TYPE_UNCACHABLE ;
266
267
267
- /* Make end inclusive end, instead of exclusive */
268
- end -- ;
269
-
270
268
/* Look in fixed ranges. Just return the type as per start */
271
269
if (fixed_mtrr_is_enabled (mtrr_state ) && (start < 0x100000 )) {
272
270
int idx ;
@@ -291,60 +289,66 @@ static int get_mtrr_type(struct kvm_mtrr *mtrr_state,
291
289
* Look of multiple ranges matching this address and pick type
292
290
* as per MTRR precedence
293
291
*/
294
- prev_match = 0xFF ;
295
292
for (i = 0 ; i < num_var_ranges ; ++ i ) {
296
- unsigned short start_state , end_state ;
293
+ int curr_type ;
297
294
298
295
if (!(mtrr_state -> var_ranges [i ].mask & (1 << 11 )))
299
296
continue ;
300
297
301
298
base = mtrr_state -> var_ranges [i ].base & PAGE_MASK ;
302
299
mask = mtrr_state -> var_ranges [i ].mask & PAGE_MASK ;
303
300
304
- start_state = ((start & mask ) == (base & mask ));
305
- end_state = ((end & mask ) == (base & mask ));
306
- if (start_state != end_state )
307
- return 0xFE ;
308
-
309
301
if ((start & mask ) != (base & mask ))
310
302
continue ;
311
303
312
- curr_match = mtrr_state -> var_ranges [i ].base & 0xff ;
313
- if (prev_match == 0xFF ) {
314
- prev_match = curr_match ;
304
+ /*
305
+ * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
306
+ * Precedences.
307
+ */
308
+
309
+ curr_type = mtrr_state -> var_ranges [i ].base & 0xff ;
310
+ if (type == -1 ) {
311
+ type = curr_type ;
315
312
continue ;
316
313
}
317
314
318
- if (prev_match == MTRR_TYPE_UNCACHABLE ||
319
- curr_match == MTRR_TYPE_UNCACHABLE )
315
+ /*
316
+ * If two or more variable memory ranges match and the
317
+ * memory types are identical, then that memory type is
318
+ * used.
319
+ */
320
+ if (type == curr_type )
321
+ continue ;
322
+
323
+ /*
324
+ * If two or more variable memory ranges match and one of
325
+ * the memory types is UC, the UC memory type used.
326
+ */
327
+ if (curr_type == MTRR_TYPE_UNCACHABLE )
320
328
return MTRR_TYPE_UNCACHABLE ;
321
329
322
- if ((prev_match == MTRR_TYPE_WRBACK &&
323
- curr_match == MTRR_TYPE_WRTHROUGH ) ||
324
- (prev_match == MTRR_TYPE_WRTHROUGH &&
325
- curr_match == MTRR_TYPE_WRBACK )) {
326
- prev_match = MTRR_TYPE_WRTHROUGH ;
327
- curr_match = MTRR_TYPE_WRTHROUGH ;
330
+ /*
331
+ * If two or more variable memory ranges match and the
332
+ * memory types are WT and WB, the WT memory type is used.
333
+ */
334
+ if (((1 << type ) & wt_wb_mask ) &&
335
+ ((1 << curr_type ) & wt_wb_mask )) {
336
+ type = MTRR_TYPE_WRTHROUGH ;
337
+ continue ;
328
338
}
329
339
330
- if (prev_match != curr_match )
331
- return MTRR_TYPE_UNCACHABLE ;
340
+ /*
341
+ * For overlaps not defined by the above rules, processor
342
+ * behavior is undefined.
343
+ */
344
+
345
+ /* We use WB for this undefined behavior. :( */
346
+ return MTRR_TYPE_WRBACK ;
332
347
}
333
348
334
- if (prev_match != 0xFF )
335
- return prev_match ;
349
+ if (type != -1 )
350
+ return type ;
336
351
337
352
return mtrr_default_type (mtrr_state );
338
353
}
339
-
340
- u8 kvm_mtrr_get_guest_memory_type (struct kvm_vcpu * vcpu , gfn_t gfn )
341
- {
342
- u8 mtrr ;
343
-
344
- mtrr = get_mtrr_type (& vcpu -> arch .mtrr_state , gfn << PAGE_SHIFT ,
345
- (gfn << PAGE_SHIFT ) + PAGE_SIZE );
346
- if (mtrr == 0xfe || mtrr == 0xff )
347
- mtrr = MTRR_TYPE_WRBACK ;
348
- return mtrr ;
349
- }
350
354
EXPORT_SYMBOL_GPL (kvm_mtrr_get_guest_memory_type );
0 commit comments