@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
510
510
511
511
/*
512
512
* At what user virtual address is page expected in @vma?
513
- * Returns virtual address or -EFAULT if page's index/offset is not
514
- * within the range mapped the @vma.
515
513
*/
516
- inline unsigned long
517
- vma_address (struct page * page , struct vm_area_struct * vma )
514
+ static inline unsigned long
515
+ __vma_address (struct page * page , struct vm_area_struct * vma )
518
516
{
519
517
pgoff_t pgoff = page -> index << (PAGE_CACHE_SHIFT - PAGE_SHIFT );
520
- unsigned long address ;
521
518
522
519
if (unlikely (is_vm_hugetlb_page (vma )))
523
520
pgoff = page -> index << huge_page_order (page_hstate (page ));
524
- address = vma -> vm_start + ((pgoff - vma -> vm_pgoff ) << PAGE_SHIFT );
525
- if (unlikely (address < vma -> vm_start || address >= vma -> vm_end )) {
526
- /* page should be within @vma mapping range */
527
- return - EFAULT ;
528
- }
521
+
522
+ return vma -> vm_start + ((pgoff - vma -> vm_pgoff ) << PAGE_SHIFT );
523
+ }
524
+
525
+ inline unsigned long
526
+ vma_address (struct page * page , struct vm_area_struct * vma )
527
+ {
528
+ unsigned long address = __vma_address (page , vma );
529
+
530
+ /* page should be within @vma mapping range */
531
+ VM_BUG_ON (address < vma -> vm_start || address >= vma -> vm_end );
532
+
529
533
return address ;
530
534
}
531
535
@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
535
539
*/
536
540
unsigned long page_address_in_vma (struct page * page , struct vm_area_struct * vma )
537
541
{
542
+ unsigned long address ;
538
543
if (PageAnon (page )) {
539
544
struct anon_vma * page__anon_vma = page_anon_vma (page );
540
545
/*
@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
550
555
return - EFAULT ;
551
556
} else
552
557
return - EFAULT ;
553
- return vma_address (page , vma );
558
+ address = __vma_address (page , vma );
559
+ if (unlikely (address < vma -> vm_start || address >= vma -> vm_end ))
560
+ return - EFAULT ;
561
+ return address ;
554
562
}
555
563
556
564
/*
@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
624
632
pte_t * pte ;
625
633
spinlock_t * ptl ;
626
634
627
- address = vma_address (page , vma );
628
- if (address == - EFAULT ) /* out of vma range */
635
+ address = __vma_address (page , vma );
636
+ if (unlikely ( address < vma -> vm_start || address >= vma -> vm_end ))
629
637
return 0 ;
630
638
pte = page_check_address (page , vma -> vm_mm , address , & ptl , 1 );
631
639
if (!pte ) /* the page is not in this mm */
@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page,
732
740
anon_vma_interval_tree_foreach (avc , & anon_vma -> rb_root , pgoff , pgoff ) {
733
741
struct vm_area_struct * vma = avc -> vma ;
734
742
unsigned long address = vma_address (page , vma );
735
- if (address == - EFAULT )
736
- continue ;
737
743
/*
738
744
* If we are reclaiming on behalf of a cgroup, skip
739
745
* counting on behalf of references from different
@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page,
799
805
800
806
vma_interval_tree_foreach (vma , & mapping -> i_mmap , pgoff , pgoff ) {
801
807
unsigned long address = vma_address (page , vma );
802
- if (address == - EFAULT )
803
- continue ;
804
808
/*
805
809
* If we are reclaiming on behalf of a cgroup, skip
806
810
* counting on behalf of references from different
@@ -904,8 +908,6 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
904
908
vma_interval_tree_foreach (vma , & mapping -> i_mmap , pgoff , pgoff ) {
905
909
if (vma -> vm_flags & VM_SHARED ) {
906
910
unsigned long address = vma_address (page , vma );
907
- if (address == - EFAULT )
908
- continue ;
909
911
ret += page_mkclean_one (page , vma , address );
910
912
}
911
913
}
@@ -1468,8 +1470,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1468
1470
continue ;
1469
1471
1470
1472
address = vma_address (page , vma );
1471
- if (address == - EFAULT )
1472
- continue ;
1473
1473
ret = try_to_unmap_one (page , vma , address , flags );
1474
1474
if (ret != SWAP_AGAIN || !page_mapped (page ))
1475
1475
break ;
@@ -1508,8 +1508,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1508
1508
mutex_lock (& mapping -> i_mmap_mutex );
1509
1509
vma_interval_tree_foreach (vma , & mapping -> i_mmap , pgoff , pgoff ) {
1510
1510
unsigned long address = vma_address (page , vma );
1511
- if (address == - EFAULT )
1512
- continue ;
1513
1511
ret = try_to_unmap_one (page , vma , address , flags );
1514
1512
if (ret != SWAP_AGAIN || !page_mapped (page ))
1515
1513
goto out ;
@@ -1684,8 +1682,6 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1684
1682
anon_vma_interval_tree_foreach (avc , & anon_vma -> rb_root , pgoff , pgoff ) {
1685
1683
struct vm_area_struct * vma = avc -> vma ;
1686
1684
unsigned long address = vma_address (page , vma );
1687
- if (address == - EFAULT )
1688
- continue ;
1689
1685
ret = rmap_one (page , vma , address , arg );
1690
1686
if (ret != SWAP_AGAIN )
1691
1687
break ;
@@ -1707,8 +1703,6 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1707
1703
mutex_lock (& mapping -> i_mmap_mutex );
1708
1704
vma_interval_tree_foreach (vma , & mapping -> i_mmap , pgoff , pgoff ) {
1709
1705
unsigned long address = vma_address (page , vma );
1710
- if (address == - EFAULT )
1711
- continue ;
1712
1706
ret = rmap_one (page , vma , address , arg );
1713
1707
if (ret != SWAP_AGAIN )
1714
1708
break ;
0 commit comments