Skip to content

Commit 8782fb6

Browse files
Steven Pricetorvalds
authored andcommitted
mm: pagewalk: Fix race between unmap and page walker
The mmap lock protects the page walker from changes to the page tables during the walk. However a read lock is insufficient to protect those areas which don't have a VMA as munmap() detaches the VMAs before downgrading to a read lock and actually tearing down PTEs/page tables. For users of walk_page_range() the solution is to simply call pte_hole() immediately without checking the actual page tables when a VMA is not present. We now never call __walk_page_range() without a valid vma. For walk_page_range_novma() the locking requirements are tightened to require the mmap write lock to be taken, and then walking the pgd directly with 'no_vma' set. This in turn means that all page walkers either have a valid vma, or it's that special 'novma' case for page table debugging. As a result, all the odd '(!walk->vma && !walk->no_vma)' tests can be removed. Fixes: dd2283f ("mm: mmap: zap pages with read mmap_sem in munmap") Reported-by: Jann Horn <[email protected]> Signed-off-by: Steven Price <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Thomas Hellström <[email protected]> Cc: Konstantin Khlebnikov <[email protected]> Cc: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent d895ec7 commit 8782fb6

File tree

3 files changed

+16
-13
lines changed

3 files changed

+16
-13
lines changed

arch/riscv/mm/pageattr.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,10 +118,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
118118
if (!numpages)
119119
return 0;
120120

121-
mmap_read_lock(&init_mm);
121+
mmap_write_lock(&init_mm);
122122
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
123123
&masks);
124-
mmap_read_unlock(&init_mm);
124+
mmap_write_unlock(&init_mm);
125125

126126
flush_tlb_kernel_range(start, end);
127127

mm/pagewalk.c

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
110110
do {
111111
again:
112112
next = pmd_addr_end(addr, end);
113-
if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
113+
if (pmd_none(*pmd)) {
114114
if (ops->pte_hole)
115115
err = ops->pte_hole(addr, next, depth, walk);
116116
if (err)
@@ -171,7 +171,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
171171
do {
172172
again:
173173
next = pud_addr_end(addr, end);
174-
if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
174+
if (pud_none(*pud)) {
175175
if (ops->pte_hole)
176176
err = ops->pte_hole(addr, next, depth, walk);
177177
if (err)
@@ -366,19 +366,19 @@ static int __walk_page_range(unsigned long start, unsigned long end,
366366
struct vm_area_struct *vma = walk->vma;
367367
const struct mm_walk_ops *ops = walk->ops;
368368

369-
if (vma && ops->pre_vma) {
369+
if (ops->pre_vma) {
370370
err = ops->pre_vma(start, end, walk);
371371
if (err)
372372
return err;
373373
}
374374

375-
if (vma && is_vm_hugetlb_page(vma)) {
375+
if (is_vm_hugetlb_page(vma)) {
376376
if (ops->hugetlb_entry)
377377
err = walk_hugetlb_range(start, end, walk);
378378
} else
379379
err = walk_pgd_range(start, end, walk);
380380

381-
if (vma && ops->post_vma)
381+
if (ops->post_vma)
382382
ops->post_vma(walk);
383383

384384
return err;
@@ -450,9 +450,13 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
450450
if (!vma) { /* after the last vma */
451451
walk.vma = NULL;
452452
next = end;
453+
if (ops->pte_hole)
454+
err = ops->pte_hole(start, next, -1, &walk);
453455
} else if (start < vma->vm_start) { /* outside vma */
454456
walk.vma = NULL;
455457
next = min(end, vma->vm_start);
458+
if (ops->pte_hole)
459+
err = ops->pte_hole(start, next, -1, &walk);
456460
} else { /* inside vma */
457461
walk.vma = vma;
458462
next = min(end, vma->vm_end);
@@ -470,9 +474,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
470474
}
471475
if (err < 0)
472476
break;
473-
}
474-
if (walk.vma || walk.ops->pte_hole)
475477
err = __walk_page_range(start, next, &walk);
478+
}
476479
if (err)
477480
break;
478481
} while (start = next, start < end);
@@ -501,9 +504,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
501504
if (start >= end || !walk.mm)
502505
return -EINVAL;
503506

504-
mmap_assert_locked(walk.mm);
507+
mmap_assert_write_locked(walk.mm);
505508

506-
return __walk_page_range(start, end, &walk);
509+
return walk_pgd_range(start, end, &walk);
507510
}
508511

509512
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,

mm/ptdump.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -152,13 +152,13 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
152152
{
153153
const struct ptdump_range *range = st->range;
154154

155-
mmap_read_lock(mm);
155+
mmap_write_lock(mm);
156156
while (range->start != range->end) {
157157
walk_page_range_novma(mm, range->start, range->end,
158158
&ptdump_ops, pgd, st);
159159
range++;
160160
}
161-
mmap_read_unlock(mm);
161+
mmap_write_unlock(mm);
162162

163163
/* Flush out the last page */
164164
st->note_page(st, 0, -1, 0);

0 commit comments

Comments
 (0)