Skip to content

Commit 494334e

Browse files
Hugh Dickinstorvalds
authored andcommitted
mm/thp: fix vma_address() if virtual address below file offset
Running certain tests with a DEBUG_VM kernel would crash within hours, on the total_mapcount BUG() in split_huge_page_to_list(), while trying to free up some memory by punching a hole in a shmem huge page: split's try_to_unmap() was unable to find all the mappings of the page (which, on a !DEBUG_VM kernel, would then keep the huge page pinned in memory). When that BUG() was changed to a WARN(), it would later crash on the VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma) in mm/internal.h:vma_address(), used by rmap_walk_file() for try_to_unmap(). vma_address() is usually correct, but there's a wraparound case when the vm_start address is unusually low, but vm_pgoff not so low: vma_address() chooses max(start, vma->vm_start), but that decides on the wrong address, because start has become almost ULONG_MAX. Rewrite vma_address() to be more careful about vm_pgoff; move the VM_BUG_ON_VMA() out of it, returning -EFAULT for errors, so that it can be safely used from page_mapped_in_vma() and page_address_in_vma() too. Add vma_address_end() to apply similar care to end address calculation, in page_vma_mapped_walk() and page_mkclean_one() and try_to_unmap_one(); though it raises a question of whether callers would do better to supply pvmw->end to page_vma_mapped_walk() - I chose not, for a smaller patch. An irritation is that their apparent generality breaks down on KSM pages, which cannot be located by the page->index that page_to_pgoff() uses: as commit 4b0ece6 ("mm: migrate: fix remove_migration_pte() for ksm pages") once discovered. I dithered over the best thing to do about that, and have ended up with a VM_BUG_ON_PAGE(PageKsm) in both vma_address() and vma_address_end(); though the only place in danger of using it on them was try_to_unmap_one(). Sidenote: vma_address() and vma_address_end() now use compound_nr() on a head page, instead of thp_size(): to make the right calculation on a hugetlbfs page, whether or not THPs are configured. try_to_unmap() is used on hugetlbfs pages, but perhaps the wrong calculation never mattered. Link: https://lkml.kernel.org/r/[email protected] Fixes: a8fa41a ("mm, rmap: check all VMAs that PTE-mapped THP can be part of") Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Kirill A. Shutemov <[email protected]> Cc: Alistair Popple <[email protected]> Cc: Jan Kara <[email protected]> Cc: Jue Wang <[email protected]> Cc: "Matthew Wilcox (Oracle)" <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Peter Xu <[email protected]> Cc: Ralph Campbell <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: Wang Yugui <[email protected]> Cc: Yang Shi <[email protected]> Cc: Zi Yan <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 732ed55 commit 494334e

File tree

3 files changed

+53
-32
lines changed

3 files changed

+53
-32
lines changed

mm/internal.h

Lines changed: 39 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -384,27 +384,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
384384
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
385385

386386
/*
387-
* At what user virtual address is page expected in @vma?
387+
* At what user virtual address is page expected in vma?
388+
* Returns -EFAULT if all of the page is outside the range of vma.
389+
* If page is a compound head, the entire compound page is considered.
388390
*/
389391
static inline unsigned long
390-
__vma_address(struct page *page, struct vm_area_struct *vma)
392+
vma_address(struct page *page, struct vm_area_struct *vma)
391393
{
392-
pgoff_t pgoff = page_to_pgoff(page);
393-
return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
394+
pgoff_t pgoff;
395+
unsigned long address;
396+
397+
VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
398+
pgoff = page_to_pgoff(page);
399+
if (pgoff >= vma->vm_pgoff) {
400+
address = vma->vm_start +
401+
((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
402+
/* Check for address beyond vma (or wrapped through 0?) */
403+
if (address < vma->vm_start || address >= vma->vm_end)
404+
address = -EFAULT;
405+
} else if (PageHead(page) &&
406+
pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
407+
/* Test above avoids possibility of wrap to 0 on 32-bit */
408+
address = vma->vm_start;
409+
} else {
410+
address = -EFAULT;
411+
}
412+
return address;
394413
}
395414

415+
/*
416+
* Then at what user virtual address will none of the page be found in vma?
417+
* Assumes that vma_address() already returned a good starting address.
418+
* If page is a compound head, the entire compound page is considered.
419+
*/
396420
static inline unsigned long
397-
vma_address(struct page *page, struct vm_area_struct *vma)
421+
vma_address_end(struct page *page, struct vm_area_struct *vma)
398422
{
399-
unsigned long start, end;
400-
401-
start = __vma_address(page, vma);
402-
end = start + thp_size(page) - PAGE_SIZE;
403-
404-
/* page should be within @vma mapping range */
405-
VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
406-
407-
return max(start, vma->vm_start);
423+
pgoff_t pgoff;
424+
unsigned long address;
425+
426+
VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
427+
pgoff = page_to_pgoff(page) + compound_nr(page);
428+
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
429+
/* Check for address beyond vma (or wrapped through 0?) */
430+
if (address < vma->vm_start || address > vma->vm_end)
431+
address = vma->vm_end;
432+
return address;
408433
}
409434

410435
static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,

mm/page_vma_mapped.c

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -228,18 +228,18 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
228228
if (!map_pte(pvmw))
229229
goto next_pte;
230230
while (1) {
231+
unsigned long end;
232+
231233
if (check_pte(pvmw))
232234
return true;
233235
next_pte:
234236
/* Seek to next pte only makes sense for THP */
235237
if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
236238
return not_found(pvmw);
239+
end = vma_address_end(pvmw->page, pvmw->vma);
237240
do {
238241
pvmw->address += PAGE_SIZE;
239-
if (pvmw->address >= pvmw->vma->vm_end ||
240-
pvmw->address >=
241-
__vma_address(pvmw->page, pvmw->vma) +
242-
thp_size(pvmw->page))
242+
if (pvmw->address >= end)
243243
return not_found(pvmw);
244244
/* Did we cross page table boundary? */
245245
if (pvmw->address % PMD_SIZE == 0) {
@@ -277,14 +277,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
277277
.vma = vma,
278278
.flags = PVMW_SYNC,
279279
};
280-
unsigned long start, end;
281-
282-
start = __vma_address(page, vma);
283-
end = start + thp_size(page) - PAGE_SIZE;
284280

285-
if (unlikely(end < vma->vm_start || start >= vma->vm_end))
281+
pvmw.address = vma_address(page, vma);
282+
if (pvmw.address == -EFAULT)
286283
return 0;
287-
pvmw.address = max(start, vma->vm_start);
288284
if (!page_vma_mapped_walk(&pvmw))
289285
return 0;
290286
page_vma_mapped_walk_done(&pvmw);

mm/rmap.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -707,7 +707,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
707707
*/
708708
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
709709
{
710-
unsigned long address;
711710
if (PageAnon(page)) {
712711
struct anon_vma *page__anon_vma = page_anon_vma(page);
713712
/*
@@ -722,10 +721,8 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
722721
return -EFAULT;
723722
} else
724723
return -EFAULT;
725-
address = __vma_address(page, vma);
726-
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
727-
return -EFAULT;
728-
return address;
724+
725+
return vma_address(page, vma);
729726
}
730727

731728
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
@@ -919,7 +916,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
919916
*/
920917
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
921918
0, vma, vma->vm_mm, address,
922-
min(vma->vm_end, address + page_size(page)));
919+
vma_address_end(page, vma));
923920
mmu_notifier_invalidate_range_start(&range);
924921

925922
while (page_vma_mapped_walk(&pvmw)) {
@@ -1435,9 +1432,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14351432
* Note that the page can not be free in this function as call of
14361433
* try_to_unmap() must hold a reference on the page.
14371434
*/
1435+
range.end = PageKsm(page) ?
1436+
address + PAGE_SIZE : vma_address_end(page, vma);
14381437
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1439-
address,
1440-
min(vma->vm_end, address + page_size(page)));
1438+
address, range.end);
14411439
if (PageHuge(page)) {
14421440
/*
14431441
* If sharing is possible, start and end will be adjusted
@@ -1889,6 +1887,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
18891887
struct vm_area_struct *vma = avc->vma;
18901888
unsigned long address = vma_address(page, vma);
18911889

1890+
VM_BUG_ON_VMA(address == -EFAULT, vma);
18921891
cond_resched();
18931892

18941893
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
@@ -1943,6 +1942,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
19431942
pgoff_start, pgoff_end) {
19441943
unsigned long address = vma_address(page, vma);
19451944

1945+
VM_BUG_ON_VMA(address == -EFAULT, vma);
19461946
cond_resched();
19471947

19481948
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))

0 commit comments

Comments
 (0)