Skip to content

Commit 248db92

Browse files
kiryltorvalds
authored andcommitted
migrate_pages: try to split pages on queuing
We are not able to migrate THPs. It means it's not enough to split only PMD on migration -- we need to split compound page under it too. Signed-off-by: Kirill A. Shutemov <[email protected]> Tested-by: Aneesh Kumar K.V <[email protected]> Acked-by: Jerome Marchand <[email protected]> Cc: Sasha Levin <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Steve Capper <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: David Rientjes <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent e9b61f1 commit 248db92

File tree

1 file changed

+38
-4
lines changed

1 file changed

+38
-4
lines changed

mm/mempolicy.c

Lines changed: 38 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -489,14 +489,33 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
489489
struct page *page;
490490
struct queue_pages *qp = walk->private;
491491
unsigned long flags = qp->flags;
492-
int nid;
492+
int nid, ret;
493493
pte_t *pte;
494494
spinlock_t *ptl;
495495

496-
split_huge_pmd(vma, pmd, addr);
497-
if (pmd_trans_unstable(pmd))
498-
return 0;
496+
if (pmd_trans_huge(*pmd)) {
497+
ptl = pmd_lock(walk->mm, pmd);
498+
if (pmd_trans_huge(*pmd)) {
499+
page = pmd_page(*pmd);
500+
if (is_huge_zero_page(page)) {
501+
spin_unlock(ptl);
502+
split_huge_pmd(vma, pmd, addr);
503+
} else {
504+
get_page(page);
505+
spin_unlock(ptl);
506+
lock_page(page);
507+
ret = split_huge_page(page);
508+
unlock_page(page);
509+
put_page(page);
510+
if (ret)
511+
return 0;
512+
}
513+
} else {
514+
spin_unlock(ptl);
515+
}
516+
}
499517

518+
retry:
500519
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
501520
for (; addr != end; pte++, addr += PAGE_SIZE) {
502521
if (!pte_present(*pte))
@@ -513,6 +532,21 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
513532
nid = page_to_nid(page);
514533
if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
515534
continue;
535+
if (PageTail(page) && PageAnon(page)) {
536+
get_page(page);
537+
pte_unmap_unlock(pte, ptl);
538+
lock_page(page);
539+
ret = split_huge_page(page);
540+
unlock_page(page);
541+
put_page(page);
542+
/* Failed to split -- skip. */
543+
if (ret) {
544+
pte = pte_offset_map_lock(walk->mm, pmd,
545+
addr, &ptl);
546+
continue;
547+
}
548+
goto retry;
549+
}
516550

517551
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
518552
migrate_page_add(page, qp->pagelist, flags);

0 commit comments

Comments
 (0)