@@ -68,6 +68,7 @@ struct vma_remap_struct {
68
68
bool mlocked ; /* Was the VMA mlock()'d? */
69
69
enum mremap_type remap_type ; /* expand, shrink, etc. */
70
70
bool mmap_locked ; /* Is mm currently write-locked? */
71
+ unsigned long charged ; /* If VM_ACCOUNT, # pages to account. */
71
72
};
72
73
73
74
static pud_t * get_old_pud (struct mm_struct * mm , unsigned long addr )
@@ -816,35 +817,88 @@ static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm)
816
817
return 0 ;
817
818
}
818
819
819
- static unsigned long move_vma (struct vm_area_struct * vma ,
820
- unsigned long old_addr , unsigned long old_len ,
821
- unsigned long new_len , unsigned long new_addr ,
822
- bool * mlocked , unsigned long flags ,
823
- struct vm_userfaultfd_ctx * uf , struct list_head * uf_unmap )
820
+ /*
821
+ * Keep track of pages which have been added to the memory mapping. If the VMA
822
+ * is accounted, also check to see if there is sufficient memory.
823
+ *
824
+ * Returns true on success, false if insufficient memory to charge.
825
+ */
826
+ static bool vrm_charge (struct vma_remap_struct * vrm )
824
827
{
825
- long to_account = new_len - old_len ;
826
- struct mm_struct * mm = vma -> vm_mm ;
827
- struct vm_area_struct * new_vma ;
828
- unsigned long vm_flags = vma -> vm_flags ;
829
- unsigned long new_pgoff ;
830
- unsigned long moved_len ;
831
- bool account_start = false;
832
- bool account_end = false;
833
- unsigned long hiwater_vm ;
834
- int err = 0 ;
835
- bool need_rmap_locks ;
836
- struct vma_iterator vmi ;
828
+ unsigned long charged ;
829
+
830
+ if (!(vrm -> vma -> vm_flags & VM_ACCOUNT ))
831
+ return true;
832
+
833
+ /*
834
+ * If we don't unmap the old mapping, then we account the entirety of
835
+ * the length of the new one. Otherwise it's just the delta in size.
836
+ */
837
+ if (vrm -> flags & MREMAP_DONTUNMAP )
838
+ charged = vrm -> new_len >> PAGE_SHIFT ;
839
+ else
840
+ charged = vrm -> delta >> PAGE_SHIFT ;
841
+
842
+
843
+ /* This accounts 'charged' pages of memory. */
844
+ if (security_vm_enough_memory_mm (current -> mm , charged ))
845
+ return false;
846
+
847
+ vrm -> charged = charged ;
848
+ return true;
849
+ }
850
+
851
+ /*
852
+ * an error has occurred so we will not be using vrm->charged memory. Unaccount
853
+ * this memory if the VMA is accounted.
854
+ */
855
+ static void vrm_uncharge (struct vma_remap_struct * vrm )
856
+ {
857
+ if (!(vrm -> vma -> vm_flags & VM_ACCOUNT ))
858
+ return ;
859
+
860
+ vm_unacct_memory (vrm -> charged );
861
+ vrm -> charged = 0 ;
862
+ }
863
+
864
+ /*
865
+ * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to
866
+ * account for 'bytes' memory used, and if locked, indicate this in the VRM so
867
+ * we can handle this correctly later.
868
+ */
869
+ static void vrm_stat_account (struct vma_remap_struct * vrm ,
870
+ unsigned long bytes )
871
+ {
872
+ unsigned long pages = bytes >> PAGE_SHIFT ;
873
+ struct mm_struct * mm = current -> mm ;
874
+ struct vm_area_struct * vma = vrm -> vma ;
875
+
876
+ vm_stat_account (mm , vma -> vm_flags , pages );
877
+ if (vma -> vm_flags & VM_LOCKED ) {
878
+ mm -> locked_vm += pages ;
879
+ vrm -> mlocked = true;
880
+ }
881
+ }
882
+
883
+ /*
884
+ * Perform checks before attempting to write a VMA prior to it being
885
+ * moved.
886
+ */
887
+ static unsigned long prep_move_vma (struct vma_remap_struct * vrm ,
888
+ unsigned long * vm_flags_ptr )
889
+ {
890
+ unsigned long err = 0 ;
891
+ struct vm_area_struct * vma = vrm -> vma ;
892
+ unsigned long old_addr = vrm -> addr ;
893
+ unsigned long old_len = vrm -> old_len ;
837
894
838
895
/*
839
896
* We'd prefer to avoid failure later on in do_munmap:
840
897
* which may split one vma into three before unmapping.
841
898
*/
842
- if (mm -> map_count >= sysctl_max_map_count - 3 )
899
+ if (current -> mm -> map_count >= sysctl_max_map_count - 3 )
843
900
return - ENOMEM ;
844
901
845
- if (unlikely (flags & MREMAP_DONTUNMAP ))
846
- to_account = new_len ;
847
-
848
902
if (vma -> vm_ops && vma -> vm_ops -> may_split ) {
849
903
if (vma -> vm_start != old_addr )
850
904
err = vma -> vm_ops -> may_split (vma , old_addr );
@@ -862,22 +916,46 @@ static unsigned long move_vma(struct vm_area_struct *vma,
862
916
* so KSM can come around to merge on vma and new_vma afterwards.
863
917
*/
864
918
err = ksm_madvise (vma , old_addr , old_addr + old_len ,
865
- MADV_UNMERGEABLE , & vm_flags );
919
+ MADV_UNMERGEABLE , vm_flags_ptr );
866
920
if (err )
867
921
return err ;
868
922
869
- if (vm_flags & VM_ACCOUNT ) {
870
- if (security_vm_enough_memory_mm (mm , to_account >> PAGE_SHIFT ))
871
- return - ENOMEM ;
872
- }
923
+ return 0 ;
924
+ }
925
+
926
+ static unsigned long move_vma (struct vma_remap_struct * vrm )
927
+ {
928
+ struct mm_struct * mm = current -> mm ;
929
+ struct vm_area_struct * vma = vrm -> vma ;
930
+ struct vm_area_struct * new_vma ;
931
+ unsigned long vm_flags = vma -> vm_flags ;
932
+ unsigned long old_addr = vrm -> addr , new_addr = vrm -> new_addr ;
933
+ unsigned long old_len = vrm -> old_len , new_len = vrm -> new_len ;
934
+ unsigned long new_pgoff ;
935
+ unsigned long moved_len ;
936
+ unsigned long account_start = false;
937
+ unsigned long account_end = false;
938
+ unsigned long hiwater_vm ;
939
+ int err ;
940
+ bool need_rmap_locks ;
941
+ struct vma_iterator vmi ;
942
+
943
+ err = prep_move_vma (vrm , & vm_flags );
944
+ if (err )
945
+ return err ;
946
+
947
+ /* If accounted, charge the number of bytes the operation will use. */
948
+ if (!vrm_charge (vrm ))
949
+ return - ENOMEM ;
873
950
874
951
vma_start_write (vma );
875
952
new_pgoff = vma -> vm_pgoff + ((old_addr - vma -> vm_start ) >> PAGE_SHIFT );
876
- new_vma = copy_vma (& vma , new_addr , new_len , new_pgoff ,
953
+ new_vma = copy_vma (& vrm -> vma , new_addr , new_len , new_pgoff ,
877
954
& need_rmap_locks );
955
+ /* This may have been updated. */
956
+ vma = vrm -> vma ;
878
957
if (!new_vma ) {
879
- if (vm_flags & VM_ACCOUNT )
880
- vm_unacct_memory (to_account >> PAGE_SHIFT );
958
+ vrm_uncharge (vrm );
881
959
return - ENOMEM ;
882
960
}
883
961
@@ -902,15 +980,15 @@ static unsigned long move_vma(struct vm_area_struct *vma,
902
980
old_addr = new_addr ;
903
981
new_addr = err ;
904
982
} else {
905
- mremap_userfaultfd_prep (new_vma , uf );
983
+ mremap_userfaultfd_prep (new_vma , vrm -> uf );
906
984
}
907
985
908
986
if (is_vm_hugetlb_page (vma )) {
909
987
clear_vma_resv_huge_pages (vma );
910
988
}
911
989
912
990
/* Conceal VM_ACCOUNT so old reservation is not undone */
913
- if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP )) {
991
+ if (vm_flags & VM_ACCOUNT && !(vrm -> flags & MREMAP_DONTUNMAP )) {
914
992
vm_flags_clear (vma , VM_ACCOUNT );
915
993
if (vma -> vm_start < old_addr )
916
994
account_start = true;
@@ -928,13 +1006,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
928
1006
* If this were a serious issue, we'd add a flag to do_munmap().
929
1007
*/
930
1008
hiwater_vm = mm -> hiwater_vm ;
931
- vm_stat_account (mm , vma -> vm_flags , new_len >> PAGE_SHIFT );
932
1009
933
1010
/* Tell pfnmap has moved from this vma */
934
1011
if (unlikely (vma -> vm_flags & VM_PFNMAP ))
935
1012
untrack_pfn_clear (vma );
936
1013
937
- if (unlikely (!err && (flags & MREMAP_DONTUNMAP ))) {
1014
+ if (unlikely (!err && (vrm -> flags & MREMAP_DONTUNMAP ))) {
938
1015
/* We always clear VM_LOCKED[ONFAULT] on the old vma */
939
1016
vm_flags_clear (vma , VM_LOCKED_MASK );
940
1017
@@ -947,22 +1024,20 @@ static unsigned long move_vma(struct vm_area_struct *vma,
947
1024
unlink_anon_vmas (vma );
948
1025
949
1026
/* Because we won't unmap we don't need to touch locked_vm */
1027
+ vrm_stat_account (vrm , new_len );
950
1028
return new_addr ;
951
1029
}
952
1030
1031
+ vrm_stat_account (vrm , new_len );
1032
+
953
1033
vma_iter_init (& vmi , mm , old_addr );
954
- if (do_vmi_munmap (& vmi , mm , old_addr , old_len , uf_unmap , false) < 0 ) {
1034
+ if (do_vmi_munmap (& vmi , mm , old_addr , old_len , vrm -> uf_unmap , false) < 0 ) {
955
1035
/* OOM: unable to split vma, just get accounts right */
956
- if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP ))
1036
+ if (vm_flags & VM_ACCOUNT && !(vrm -> flags & MREMAP_DONTUNMAP ))
957
1037
vm_acct_memory (old_len >> PAGE_SHIFT );
958
1038
account_start = account_end = false;
959
1039
}
960
1040
961
- if (vm_flags & VM_LOCKED ) {
962
- mm -> locked_vm += new_len >> PAGE_SHIFT ;
963
- * mlocked = true;
964
- }
965
-
966
1041
mm -> hiwater_vm = hiwater_vm ;
967
1042
968
1043
/* Restore VM_ACCOUNT if one or two pieces of vma left */
@@ -1141,9 +1216,7 @@ static unsigned long mremap_to(struct vma_remap_struct *vrm)
1141
1216
if (err )
1142
1217
return err ;
1143
1218
1144
- return move_vma (vrm -> vma , vrm -> addr , vrm -> old_len , vrm -> new_len ,
1145
- vrm -> new_addr , & vrm -> mlocked , vrm -> flags ,
1146
- vrm -> uf , vrm -> uf_unmap );
1219
+ return move_vma (vrm );
1147
1220
}
1148
1221
1149
1222
static int vma_expandable (struct vm_area_struct * vma , unsigned long delta )
@@ -1248,17 +1321,11 @@ static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
1248
1321
static unsigned long expand_vma_in_place (struct vma_remap_struct * vrm )
1249
1322
{
1250
1323
struct mm_struct * mm = current -> mm ;
1251
- long pages = vrm -> delta >> PAGE_SHIFT ;
1252
1324
struct vm_area_struct * vma = vrm -> vma ;
1253
1325
VMA_ITERATOR (vmi , mm , vma -> vm_end );
1254
- long charged = 0 ;
1255
-
1256
- if (vma -> vm_flags & VM_ACCOUNT ) {
1257
- if (security_vm_enough_memory_mm (mm , pages ))
1258
- return - ENOMEM ;
1259
1326
1260
- charged = pages ;
1261
- }
1327
+ if (! vrm_charge ( vrm ))
1328
+ return - ENOMEM ;
1262
1329
1263
1330
/*
1264
1331
* Function vma_merge_extend() is called on the
@@ -1271,15 +1338,11 @@ static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm)
1271
1338
*/
1272
1339
vma = vrm -> vma = vma_merge_extend (& vmi , vma , vrm -> delta );
1273
1340
if (!vma ) {
1274
- vm_unacct_memory ( charged );
1341
+ vrm_uncharge ( vrm );
1275
1342
return - ENOMEM ;
1276
1343
}
1277
1344
1278
- vm_stat_account (mm , vma -> vm_flags , pages );
1279
- if (vma -> vm_flags & VM_LOCKED ) {
1280
- mm -> locked_vm += pages ;
1281
- vrm -> mlocked = true;
1282
- }
1345
+ vrm_stat_account (vrm , vrm -> delta );
1283
1346
1284
1347
return 0 ;
1285
1348
}
@@ -1319,11 +1382,7 @@ static bool align_hugetlb(struct vma_remap_struct *vrm)
1319
1382
static unsigned long expand_vma (struct vma_remap_struct * vrm )
1320
1383
{
1321
1384
unsigned long err ;
1322
- struct vm_area_struct * vma = vrm -> vma ;
1323
1385
unsigned long addr = vrm -> addr ;
1324
- unsigned long old_len = vrm -> old_len ;
1325
- unsigned long new_len = vrm -> new_len ;
1326
- unsigned long flags = vrm -> flags ;
1327
1386
1328
1387
err = resize_is_valid (vrm );
1329
1388
if (err )
@@ -1356,16 +1415,15 @@ static unsigned long expand_vma(struct vma_remap_struct *vrm)
1356
1415
*/
1357
1416
1358
1417
/* We're not allowed to move the VMA, so error out. */
1359
- if (!(flags & MREMAP_MAYMOVE ))
1418
+ if (!(vrm -> flags & MREMAP_MAYMOVE ))
1360
1419
return - ENOMEM ;
1361
1420
1362
1421
/* Find a new location to move the VMA to. */
1363
1422
err = vrm_set_new_addr (vrm );
1364
1423
if (err )
1365
1424
return err ;
1366
1425
1367
- return move_vma (vma , addr , old_len , new_len , vrm -> new_addr ,
1368
- & vrm -> mlocked , flags , vrm -> uf , vrm -> uf_unmap );
1426
+ return move_vma (vrm );
1369
1427
}
1370
1428
1371
1429
/*
0 commit comments