Skip to content

Commit 3fe8796

Browse files
kiryltorvalds
authored andcommitted
mm: convert remove_migration_pte() to use page_vma_mapped_walk()
remove_migration_pte() also can easily be converted to page_vma_mapped_walk(). [[email protected]: coding-style fixes] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Kirill A. Shutemov <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Srikar Dronamraju <[email protected]> Cc: Vladimir Davydov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent d53a8b4 commit 3fe8796

File tree

1 file changed

+41
-61
lines changed

1 file changed

+41
-61
lines changed

mm/migrate.c

Lines changed: 41 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -193,82 +193,62 @@ void putback_movable_pages(struct list_head *l)
193193
/*
194194
* Restore a potential migration pte to a working pte entry
195195
*/
196-
static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
196+
static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
197197
unsigned long addr, void *old)
198198
{
199-
struct mm_struct *mm = vma->vm_mm;
199+
struct page_vma_mapped_walk pvmw = {
200+
.page = old,
201+
.vma = vma,
202+
.address = addr,
203+
.flags = PVMW_SYNC | PVMW_MIGRATION,
204+
};
205+
struct page *new;
206+
pte_t pte;
200207
swp_entry_t entry;
201-
pmd_t *pmd;
202-
pte_t *ptep, pte;
203-
spinlock_t *ptl;
204208

205-
if (unlikely(PageHuge(new))) {
206-
ptep = huge_pte_offset(mm, addr);
207-
if (!ptep)
208-
goto out;
209-
ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
210-
} else {
211-
pmd = mm_find_pmd(mm, addr);
212-
if (!pmd)
213-
goto out;
209+
VM_BUG_ON_PAGE(PageTail(page), page);
210+
while (page_vma_mapped_walk(&pvmw)) {
211+
new = page - pvmw.page->index +
212+
linear_page_index(vma, pvmw.address);
214213

215-
ptep = pte_offset_map(pmd, addr);
214+
get_page(new);
215+
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
216+
if (pte_swp_soft_dirty(*pvmw.pte))
217+
pte = pte_mksoft_dirty(pte);
216218

217219
/*
218-
* Peek to check is_swap_pte() before taking ptlock? No, we
219-
* can race mremap's move_ptes(), which skips anon_vma lock.
220+
* Recheck VMA as permissions can change since migration started
220221
*/
221-
222-
ptl = pte_lockptr(mm, pmd);
223-
}
224-
225-
spin_lock(ptl);
226-
pte = *ptep;
227-
if (!is_swap_pte(pte))
228-
goto unlock;
229-
230-
entry = pte_to_swp_entry(pte);
231-
232-
if (!is_migration_entry(entry) ||
233-
migration_entry_to_page(entry) != old)
234-
goto unlock;
235-
236-
get_page(new);
237-
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
238-
if (pte_swp_soft_dirty(*ptep))
239-
pte = pte_mksoft_dirty(pte);
240-
241-
/* Recheck VMA as permissions can change since migration started */
242-
if (is_write_migration_entry(entry))
243-
pte = maybe_mkwrite(pte, vma);
222+
entry = pte_to_swp_entry(*pvmw.pte);
223+
if (is_write_migration_entry(entry))
224+
pte = maybe_mkwrite(pte, vma);
244225

245226
#ifdef CONFIG_HUGETLB_PAGE
246-
if (PageHuge(new)) {
247-
pte = pte_mkhuge(pte);
248-
pte = arch_make_huge_pte(pte, vma, new, 0);
249-
}
227+
if (PageHuge(new)) {
228+
pte = pte_mkhuge(pte);
229+
pte = arch_make_huge_pte(pte, vma, new, 0);
230+
}
250231
#endif
251-
flush_dcache_page(new);
252-
set_pte_at(mm, addr, ptep, pte);
232+
flush_dcache_page(new);
233+
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
253234

254-
if (PageHuge(new)) {
255-
if (PageAnon(new))
256-
hugepage_add_anon_rmap(new, vma, addr);
235+
if (PageHuge(new)) {
236+
if (PageAnon(new))
237+
hugepage_add_anon_rmap(new, vma, pvmw.address);
238+
else
239+
page_dup_rmap(new, true);
240+
} else if (PageAnon(new))
241+
page_add_anon_rmap(new, vma, pvmw.address, false);
257242
else
258-
page_dup_rmap(new, true);
259-
} else if (PageAnon(new))
260-
page_add_anon_rmap(new, vma, addr, false);
261-
else
262-
page_add_file_rmap(new, false);
243+
page_add_file_rmap(new, false);
263244

264-
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
265-
mlock_vma_page(new);
245+
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
246+
mlock_vma_page(new);
247+
248+
/* No need to invalidate - it was non-present before */
249+
update_mmu_cache(vma, pvmw.address, pvmw.pte);
250+
}
266251

267-
/* No need to invalidate - it was non-present before */
268-
update_mmu_cache(vma, addr, ptep);
269-
unlock:
270-
pte_unmap_unlock(ptep, ptl);
271-
out:
272252
return SWAP_AGAIN;
273253
}
274254

0 commit comments

Comments
 (0)