@@ -1134,21 +1134,76 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
1134
1134
const struct kvm_memory_slot * old ,
1135
1135
enum kvm_mr_change change )
1136
1136
{
1137
- gpa_t gpa = old -> base_gfn << PAGE_SHIFT ;
1138
- phys_addr_t size = old -> npages << PAGE_SHIFT ;
1139
- if (change == KVM_MR_DELETE || change == KVM_MR_MOVE ) {
1140
- spin_lock (& kvm -> mmu_lock );
1141
- unmap_stage2_range (kvm , gpa , size );
1142
- spin_unlock (& kvm -> mmu_lock );
1143
- }
1144
1137
}
1145
1138
1146
1139
int kvm_arch_prepare_memory_region (struct kvm * kvm ,
1147
1140
struct kvm_memory_slot * memslot ,
1148
1141
struct kvm_userspace_memory_region * mem ,
1149
1142
enum kvm_mr_change change )
1150
1143
{
1151
- return 0 ;
1144
+ hva_t hva = mem -> userspace_addr ;
1145
+ hva_t reg_end = hva + mem -> memory_size ;
1146
+ bool writable = !(mem -> flags & KVM_MEM_READONLY );
1147
+ int ret = 0 ;
1148
+
1149
+ if (change != KVM_MR_CREATE && change != KVM_MR_MOVE )
1150
+ return 0 ;
1151
+
1152
+ /*
1153
+ * A memory region could potentially cover multiple VMAs, and any holes
1154
+ * between them, so iterate over all of them to find out if we can map
1155
+ * any of them right now.
1156
+ *
1157
+ * +--------------------------------------------+
1158
+ * +---------------+----------------+ +----------------+
1159
+ * | : VMA 1 | VMA 2 | | VMA 3 : |
1160
+ * +---------------+----------------+ +----------------+
1161
+ * | memory region |
1162
+ * +--------------------------------------------+
1163
+ */
1164
+ do {
1165
+ struct vm_area_struct * vma = find_vma (current -> mm , hva );
1166
+ hva_t vm_start , vm_end ;
1167
+
1168
+ if (!vma || vma -> vm_start >= reg_end )
1169
+ break ;
1170
+
1171
+ /*
1172
+ * Mapping a read-only VMA is only allowed if the
1173
+ * memory region is configured as read-only.
1174
+ */
1175
+ if (writable && !(vma -> vm_flags & VM_WRITE )) {
1176
+ ret = - EPERM ;
1177
+ break ;
1178
+ }
1179
+
1180
+ /*
1181
+ * Take the intersection of this VMA with the memory region
1182
+ */
1183
+ vm_start = max (hva , vma -> vm_start );
1184
+ vm_end = min (reg_end , vma -> vm_end );
1185
+
1186
+ if (vma -> vm_flags & VM_PFNMAP ) {
1187
+ gpa_t gpa = mem -> guest_phys_addr +
1188
+ (vm_start - mem -> userspace_addr );
1189
+ phys_addr_t pa = (vma -> vm_pgoff << PAGE_SHIFT ) +
1190
+ vm_start - vma -> vm_start ;
1191
+
1192
+ ret = kvm_phys_addr_ioremap (kvm , gpa , pa ,
1193
+ vm_end - vm_start ,
1194
+ writable );
1195
+ if (ret )
1196
+ break ;
1197
+ }
1198
+ hva = vm_end ;
1199
+ } while (hva < reg_end );
1200
+
1201
+ if (ret ) {
1202
+ spin_lock (& kvm -> mmu_lock );
1203
+ unmap_stage2_range (kvm , mem -> guest_phys_addr , mem -> memory_size );
1204
+ spin_unlock (& kvm -> mmu_lock );
1205
+ }
1206
+ return ret ;
1152
1207
}
1153
1208
1154
1209
void kvm_arch_free_memslot (struct kvm * kvm , struct kvm_memory_slot * free ,
@@ -1173,4 +1228,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
1173
1228
void kvm_arch_flush_shadow_memslot (struct kvm * kvm ,
1174
1229
struct kvm_memory_slot * slot )
1175
1230
{
1231
+ gpa_t gpa = slot -> base_gfn << PAGE_SHIFT ;
1232
+ phys_addr_t size = slot -> npages << PAGE_SHIFT ;
1233
+
1234
+ spin_lock (& kvm -> mmu_lock );
1235
+ unmap_stage2_range (kvm , gpa , size );
1236
+ spin_unlock (& kvm -> mmu_lock );
1176
1237
}
0 commit comments