Skip to content

Commit fa61213

Browse files
Xiao Guangrongbonzini
authored andcommitted
KVM: MTRR: simplify kvm_mtrr_get_guest_memory_type
mtrr_for_each_mem_type() is ready now, use it to simplify kvm_mtrr_get_guest_memory_type() Signed-off-by: Xiao Guangrong <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent f571c09 commit fa61213

File tree

1 file changed

+16
-48
lines changed

1 file changed

+16
-48
lines changed

arch/x86/kvm/mtrr.c

Lines changed: 16 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -600,61 +600,23 @@ static void mtrr_lookup_next(struct mtrr_iter *iter)
600600
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
601601
{
602602
struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
603-
u64 base, mask, start;
604-
int i, num_var_ranges, type;
603+
struct mtrr_iter iter;
604+
u64 start, end;
605+
int type = -1;
605606
const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
606607
| (1 << MTRR_TYPE_WRTHROUGH);
607608

608609
start = gfn_to_gpa(gfn);
609-
num_var_ranges = KVM_NR_VAR_MTRR;
610-
type = -1;
611-
612-
/* MTRR is completely disabled, use UC for all of physical memory. */
613-
if (!mtrr_is_enabled(mtrr_state))
614-
return MTRR_TYPE_UNCACHABLE;
615-
616-
/* Look in fixed ranges. Just return the type as per start */
617-
if (fixed_mtrr_is_enabled(mtrr_state) && (start < 0x100000)) {
618-
int idx;
619-
620-
if (start < 0x80000) {
621-
idx = 0;
622-
idx += (start >> 16);
623-
return mtrr_state->fixed_ranges[idx];
624-
} else if (start < 0xC0000) {
625-
idx = 1 * 8;
626-
idx += ((start - 0x80000) >> 14);
627-
return mtrr_state->fixed_ranges[idx];
628-
} else if (start < 0x1000000) {
629-
idx = 3 * 8;
630-
idx += ((start - 0xC0000) >> 12);
631-
return mtrr_state->fixed_ranges[idx];
632-
}
633-
}
634-
635-
/*
636-
* Look in variable ranges
637-
* Look of multiple ranges matching this address and pick type
638-
* as per MTRR precedence
639-
*/
640-
for (i = 0; i < num_var_ranges; ++i) {
641-
int curr_type;
642-
643-
if (!(mtrr_state->var_ranges[i].mask & (1 << 11)))
644-
continue;
645-
646-
base = mtrr_state->var_ranges[i].base & PAGE_MASK;
647-
mask = mtrr_state->var_ranges[i].mask & PAGE_MASK;
610+
end = start + PAGE_SIZE;
648611

649-
if ((start & mask) != (base & mask))
650-
continue;
612+
mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
613+
int curr_type = iter.mem_type;
651614

652615
/*
653616
* Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
654617
* Precedences.
655618
*/
656619

657-
curr_type = mtrr_state->var_ranges[i].base & 0xff;
658620
if (type == -1) {
659621
type = curr_type;
660622
continue;
@@ -694,9 +656,15 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
694656
return MTRR_TYPE_WRBACK;
695657
}
696658

697-
if (type != -1)
698-
return type;
699-
700-
return mtrr_default_type(mtrr_state);
659+
/* It is not covered by MTRRs. */
660+
if (iter.partial_map) {
661+
/*
662+
* We just check one page, partially covered by MTRRs is
663+
* impossible.
664+
*/
665+
WARN_ON(type != -1);
666+
type = mtrr_default_type(mtrr_state);
667+
}
668+
return type;
701669
}
702670
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);

0 commit comments

Comments
 (0)