Skip to content

Commit 3b0e81a

Browse files
howlettakpm00
authored andcommitted
mmap: change zeroing of maple tree in __vma_adjust()
Only write to the maple tree if we are not inserting or the insert isn't going to overwrite the area to clear. This avoids spanning writes and node coealescing when unnecessary. The change requires a custom search for the linked list addition to find the correct VMA for the prev link. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Liam R. Howlett <[email protected]> Tested-by: Yu Zhao <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: David Howells <[email protected]> Cc: Davidlohr Bueso <[email protected]> Cc: "Matthew Wilcox (Oracle)" <[email protected]> Cc: SeongJae Park <[email protected]> Cc: Sven Schnelle <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 524e00b commit 3b0e81a

File tree

1 file changed

+22
-8
lines changed

1 file changed

+22
-8
lines changed

mm/mmap.c

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -567,11 +567,11 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
567567
* mm's list and the mm tree. It has already been inserted into the interval tree.
568568
*/
569569
static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas,
570-
struct vm_area_struct *vma)
570+
struct vm_area_struct *vma, unsigned long location)
571571
{
572572
struct vm_area_struct *prev;
573573

574-
mas_set(mas, vma->vm_start);
574+
mas_set(mas, location);
575575
prev = mas_prev(mas, 0);
576576
vma_mas_store(vma, mas);
577577
__vma_link_list(mm, vma, prev);
@@ -601,6 +601,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
601601
int remove_next = 0;
602602
MA_STATE(mas, &mm->mm_mt, 0, 0);
603603
struct vm_area_struct *exporter = NULL, *importer = NULL;
604+
unsigned long ll_prev = vma->vm_start; /* linked list prev. */
604605

605606
if (next && !insert) {
606607
if (end >= next->vm_end) {
@@ -728,15 +729,27 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
728729
}
729730

730731
if (start != vma->vm_start) {
731-
if (vma->vm_start < start)
732+
if ((vma->vm_start < start) &&
733+
(!insert || (insert->vm_end != start))) {
732734
vma_mas_szero(&mas, vma->vm_start, start);
733-
vma_changed = true;
735+
VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
736+
} else {
737+
vma_changed = true;
738+
}
734739
vma->vm_start = start;
735740
}
736741
if (end != vma->vm_end) {
737-
if (vma->vm_end > end)
738-
vma_mas_szero(&mas, end, vma->vm_end);
739-
vma_changed = true;
742+
if (vma->vm_end > end) {
743+
if (!insert || (insert->vm_start != end)) {
744+
vma_mas_szero(&mas, end, vma->vm_end);
745+
VM_WARN_ON(insert &&
746+
insert->vm_end < vma->vm_end);
747+
} else if (insert->vm_start == end) {
748+
ll_prev = vma->vm_end;
749+
}
750+
} else {
751+
vma_changed = true;
752+
}
740753
vma->vm_end = end;
741754
if (!next)
742755
mm->highest_vm_end = vm_end_gap(vma);
@@ -783,7 +796,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
783796
* us to insert it before dropping the locks
784797
* (it may either follow vma or precede it).
785798
*/
786-
__insert_vm_struct(mm, &mas, insert);
799+
__insert_vm_struct(mm, &mas, insert, ll_prev);
787800
}
788801

789802
if (anon_vma) {
@@ -870,6 +883,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
870883
if (insert && file)
871884
uprobe_mmap(insert);
872885

886+
mas_destroy(&mas);
873887
validate_mm(mm);
874888
return 0;
875889
}

0 commit comments

Comments
 (0)