Skip to content

Commit 86c2ad1

Browse files
walken-googletorvalds
authored andcommitted
mm rmap: remove vma_address check for address inside vma
In file and anon rmap, we use interval trees to find potentially relevant vmas and then call vma_address() to find the virtual address the given page might be found at in these vmas. vma_address() used to include a check that the returned address falls within the limits of the vma, but this check isn't necessary now that we always use interval trees in rmap: the interval tree just doesn't return any vmas which this check would find to be irrelevant. As a result, we can replace the use of -EFAULT error code (which then needed to be checked in every call site) with a VM_BUG_ON(). Signed-off-by: Michel Lespinasse <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Daniel Santos <[email protected]> Cc: Hugh Dickins <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent bf181b9 commit 86c2ad1

File tree

2 files changed

+21
-31
lines changed

2 files changed

+21
-31
lines changed

mm/huge_memory.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1386,8 +1386,6 @@ static void __split_huge_page(struct page *page,
13861386
struct vm_area_struct *vma = avc->vma;
13871387
unsigned long addr = vma_address(page, vma);
13881388
BUG_ON(is_vma_temporary_stack(vma));
1389-
if (addr == -EFAULT)
1390-
continue;
13911389
mapcount += __split_huge_page_splitting(page, vma, addr);
13921390
}
13931391
/*
@@ -1412,8 +1410,6 @@ static void __split_huge_page(struct page *page,
14121410
struct vm_area_struct *vma = avc->vma;
14131411
unsigned long addr = vma_address(page, vma);
14141412
BUG_ON(is_vma_temporary_stack(vma));
1415-
if (addr == -EFAULT)
1416-
continue;
14171413
mapcount2 += __split_huge_page_map(page, vma, addr);
14181414
}
14191415
if (mapcount != mapcount2)

mm/rmap.c

Lines changed: 21 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
510510

511511
/*
512512
* At what user virtual address is page expected in @vma?
513-
* Returns virtual address or -EFAULT if page's index/offset is not
514-
* within the range mapped the @vma.
515513
*/
516-
inline unsigned long
517-
vma_address(struct page *page, struct vm_area_struct *vma)
514+
static inline unsigned long
515+
__vma_address(struct page *page, struct vm_area_struct *vma)
518516
{
519517
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
520-
unsigned long address;
521518

522519
if (unlikely(is_vm_hugetlb_page(vma)))
523520
pgoff = page->index << huge_page_order(page_hstate(page));
524-
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
525-
if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
526-
/* page should be within @vma mapping range */
527-
return -EFAULT;
528-
}
521+
522+
return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
523+
}
524+
525+
inline unsigned long
526+
vma_address(struct page *page, struct vm_area_struct *vma)
527+
{
528+
unsigned long address = __vma_address(page, vma);
529+
530+
/* page should be within @vma mapping range */
531+
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
532+
529533
return address;
530534
}
531535

@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
535539
*/
536540
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
537541
{
542+
unsigned long address;
538543
if (PageAnon(page)) {
539544
struct anon_vma *page__anon_vma = page_anon_vma(page);
540545
/*
@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
550555
return -EFAULT;
551556
} else
552557
return -EFAULT;
553-
return vma_address(page, vma);
558+
address = __vma_address(page, vma);
559+
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
560+
return -EFAULT;
561+
return address;
554562
}
555563

556564
/*
@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
624632
pte_t *pte;
625633
spinlock_t *ptl;
626634

627-
address = vma_address(page, vma);
628-
if (address == -EFAULT) /* out of vma range */
635+
address = __vma_address(page, vma);
636+
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
629637
return 0;
630638
pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
631639
if (!pte) /* the page is not in this mm */
@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page,
732740
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
733741
struct vm_area_struct *vma = avc->vma;
734742
unsigned long address = vma_address(page, vma);
735-
if (address == -EFAULT)
736-
continue;
737743
/*
738744
* If we are reclaiming on behalf of a cgroup, skip
739745
* counting on behalf of references from different
@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page,
799805

800806
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
801807
unsigned long address = vma_address(page, vma);
802-
if (address == -EFAULT)
803-
continue;
804808
/*
805809
* If we are reclaiming on behalf of a cgroup, skip
806810
* counting on behalf of references from different
@@ -904,8 +908,6 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
904908
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
905909
if (vma->vm_flags & VM_SHARED) {
906910
unsigned long address = vma_address(page, vma);
907-
if (address == -EFAULT)
908-
continue;
909911
ret += page_mkclean_one(page, vma, address);
910912
}
911913
}
@@ -1468,8 +1470,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
14681470
continue;
14691471

14701472
address = vma_address(page, vma);
1471-
if (address == -EFAULT)
1472-
continue;
14731473
ret = try_to_unmap_one(page, vma, address, flags);
14741474
if (ret != SWAP_AGAIN || !page_mapped(page))
14751475
break;
@@ -1508,8 +1508,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
15081508
mutex_lock(&mapping->i_mmap_mutex);
15091509
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
15101510
unsigned long address = vma_address(page, vma);
1511-
if (address == -EFAULT)
1512-
continue;
15131511
ret = try_to_unmap_one(page, vma, address, flags);
15141512
if (ret != SWAP_AGAIN || !page_mapped(page))
15151513
goto out;
@@ -1684,8 +1682,6 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
16841682
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
16851683
struct vm_area_struct *vma = avc->vma;
16861684
unsigned long address = vma_address(page, vma);
1687-
if (address == -EFAULT)
1688-
continue;
16891685
ret = rmap_one(page, vma, address, arg);
16901686
if (ret != SWAP_AGAIN)
16911687
break;
@@ -1707,8 +1703,6 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
17071703
mutex_lock(&mapping->i_mmap_mutex);
17081704
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
17091705
unsigned long address = vma_address(page, vma);
1710-
if (address == -EFAULT)
1711-
continue;
17121706
ret = rmap_one(page, vma, address, arg);
17131707
if (ret != SWAP_AGAIN)
17141708
break;

0 commit comments

Comments
 (0)