Skip to content

Commit 8eef912

Browse files
Ard Biesheuvelchazy
authored andcommitted
arm/arm64: KVM: map MMIO regions at creation time
There is really no point in faulting in memory regions page by page if they are not backed by demand paged system RAM but by a linear passthrough mapping of a host MMIO region. So instead, detect such regions at setup time and install the mappings for the backing all at once. Acked-by: Marc Zyngier <[email protected]> Reviewed-by: Christoffer Dall <[email protected]> Signed-off-by: Ard Biesheuvel <[email protected]> Signed-off-by: Christoffer Dall <[email protected]>
1 parent 4a513fb commit 8eef912

File tree

1 file changed

+69
-8
lines changed

1 file changed

+69
-8
lines changed

arch/arm/kvm/mmu.c

Lines changed: 69 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1134,21 +1134,76 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
11341134
const struct kvm_memory_slot *old,
11351135
enum kvm_mr_change change)
11361136
{
1137-
gpa_t gpa = old->base_gfn << PAGE_SHIFT;
1138-
phys_addr_t size = old->npages << PAGE_SHIFT;
1139-
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1140-
spin_lock(&kvm->mmu_lock);
1141-
unmap_stage2_range(kvm, gpa, size);
1142-
spin_unlock(&kvm->mmu_lock);
1143-
}
11441137
}
11451138

11461139
int kvm_arch_prepare_memory_region(struct kvm *kvm,
11471140
struct kvm_memory_slot *memslot,
11481141
struct kvm_userspace_memory_region *mem,
11491142
enum kvm_mr_change change)
11501143
{
1151-
return 0;
1144+
hva_t hva = mem->userspace_addr;
1145+
hva_t reg_end = hva + mem->memory_size;
1146+
bool writable = !(mem->flags & KVM_MEM_READONLY);
1147+
int ret = 0;
1148+
1149+
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE)
1150+
return 0;
1151+
1152+
/*
1153+
* A memory region could potentially cover multiple VMAs, and any holes
1154+
* between them, so iterate over all of them to find out if we can map
1155+
* any of them right now.
1156+
*
1157+
* +--------------------------------------------+
1158+
* +---------------+----------------+ +----------------+
1159+
* | : VMA 1 | VMA 2 | | VMA 3 : |
1160+
* +---------------+----------------+ +----------------+
1161+
* | memory region |
1162+
* +--------------------------------------------+
1163+
*/
1164+
do {
1165+
struct vm_area_struct *vma = find_vma(current->mm, hva);
1166+
hva_t vm_start, vm_end;
1167+
1168+
if (!vma || vma->vm_start >= reg_end)
1169+
break;
1170+
1171+
/*
1172+
* Mapping a read-only VMA is only allowed if the
1173+
* memory region is configured as read-only.
1174+
*/
1175+
if (writable && !(vma->vm_flags & VM_WRITE)) {
1176+
ret = -EPERM;
1177+
break;
1178+
}
1179+
1180+
/*
1181+
* Take the intersection of this VMA with the memory region
1182+
*/
1183+
vm_start = max(hva, vma->vm_start);
1184+
vm_end = min(reg_end, vma->vm_end);
1185+
1186+
if (vma->vm_flags & VM_PFNMAP) {
1187+
gpa_t gpa = mem->guest_phys_addr +
1188+
(vm_start - mem->userspace_addr);
1189+
phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
1190+
vm_start - vma->vm_start;
1191+
1192+
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1193+
vm_end - vm_start,
1194+
writable);
1195+
if (ret)
1196+
break;
1197+
}
1198+
hva = vm_end;
1199+
} while (hva < reg_end);
1200+
1201+
if (ret) {
1202+
spin_lock(&kvm->mmu_lock);
1203+
unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1204+
spin_unlock(&kvm->mmu_lock);
1205+
}
1206+
return ret;
11521207
}
11531208

11541209
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -1173,4 +1228,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
11731228
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
11741229
struct kvm_memory_slot *slot)
11751230
{
1231+
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1232+
phys_addr_t size = slot->npages << PAGE_SHIFT;
1233+
1234+
spin_lock(&kvm->mmu_lock);
1235+
unmap_stage2_range(kvm, gpa, size);
1236+
spin_unlock(&kvm->mmu_lock);
11761237
}

0 commit comments

Comments
 (0)