Skip to content

Commit f995ece

Browse files
Naoya Horiguchitorvalds
authored andcommitted
pagemap: use walk->vma instead of calling find_vma()
Page table walker has the information of the current vma in mm_walk, so we don't have to call find_vma() in each pagemap_(pte|hugetlb)_range() call any longer. Currently pagemap_pte_range() does vma loop itself, so this patch reduces many lines of code. NULL-vma check is omitted because we assume that we never run these callbacks on any address outside vma. And even if it were broken, NULL pointer dereference would be detected, so we can get enough information for debugging. Signed-off-by: Naoya Horiguchi <[email protected]> Cc: "Kirill A. Shutemov" <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Cyrill Gorcunov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Pavel Emelyanov <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 5c64f52 commit f995ece

File tree

1 file changed

+14
-54
lines changed

1 file changed

+14
-54
lines changed

fs/proc/task_mmu.c

Lines changed: 14 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1047,15 +1047,13 @@ static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemap
10471047
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
10481048
struct mm_walk *walk)
10491049
{
1050-
struct vm_area_struct *vma;
1050+
struct vm_area_struct *vma = walk->vma;
10511051
struct pagemapread *pm = walk->private;
10521052
spinlock_t *ptl;
10531053
pte_t *pte, *orig_pte;
10541054
int err = 0;
10551055

1056-
/* find the first VMA at or above 'addr' */
1057-
vma = find_vma(walk->mm, addr);
1058-
if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1056+
if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
10591057
int pmd_flags2;
10601058

10611059
if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
@@ -1081,55 +1079,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
10811079
if (pmd_trans_unstable(pmd))
10821080
return 0;
10831081

1084-
while (1) {
1085-
/* End of address space hole, which we mark as non-present. */
1086-
unsigned long hole_end;
1087-
1088-
if (vma)
1089-
hole_end = min(end, vma->vm_start);
1090-
else
1091-
hole_end = end;
1092-
1093-
for (; addr < hole_end; addr += PAGE_SIZE) {
1094-
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
1095-
1096-
err = add_to_pagemap(addr, &pme, pm);
1097-
if (err)
1098-
return err;
1099-
}
1100-
1101-
if (!vma || vma->vm_start >= end)
1102-
break;
1103-
/*
1104-
* We can't possibly be in a hugetlb VMA. In general,
1105-
* for a mm_walk with a pmd_entry and a hugetlb_entry,
1106-
* the pmd_entry can only be called on addresses in a
1107-
* hugetlb if the walk starts in a non-hugetlb VMA and
1108-
* spans a hugepage VMA. Since pagemap_read walks are
1109-
* PMD-sized and PMD-aligned, this will never be true.
1110-
*/
1111-
BUG_ON(is_vm_hugetlb_page(vma));
1112-
1113-
/* Addresses in the VMA. */
1114-
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1115-
for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
1116-
pagemap_entry_t pme;
1117-
1118-
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
1119-
err = add_to_pagemap(addr, &pme, pm);
1120-
if (err)
1121-
break;
1122-
}
1123-
pte_unmap_unlock(orig_pte, ptl);
1082+
/*
1083+
* We can assume that @vma always points to a valid one and @end never
1084+
* goes beyond vma->vm_end.
1085+
*/
1086+
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1087+
for (; addr < end; pte++, addr += PAGE_SIZE) {
1088+
pagemap_entry_t pme;
11241089

1090+
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
1091+
err = add_to_pagemap(addr, &pme, pm);
11251092
if (err)
1126-
return err;
1127-
1128-
if (addr == end)
11291093
break;
1130-
1131-
vma = find_vma(walk->mm, addr);
11321094
}
1095+
pte_unmap_unlock(orig_pte, ptl);
11331096

11341097
cond_resched();
11351098

@@ -1155,15 +1118,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
11551118
struct mm_walk *walk)
11561119
{
11571120
struct pagemapread *pm = walk->private;
1158-
struct vm_area_struct *vma;
1121+
struct vm_area_struct *vma = walk->vma;
11591122
int err = 0;
11601123
int flags2;
11611124
pagemap_entry_t pme;
11621125

1163-
vma = find_vma(walk->mm, addr);
1164-
WARN_ON_ONCE(!vma);
1165-
1166-
if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1126+
if (vma->vm_flags & VM_SOFTDIRTY)
11671127
flags2 = __PM_SOFT_DIRTY;
11681128
else
11691129
flags2 = 0;

0 commit comments

Comments
 (0)