@@ -489,14 +489,33 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
489
489
struct page * page ;
490
490
struct queue_pages * qp = walk -> private ;
491
491
unsigned long flags = qp -> flags ;
492
- int nid ;
492
+ int nid , ret ;
493
493
pte_t * pte ;
494
494
spinlock_t * ptl ;
495
495
496
- split_huge_pmd (vma , pmd , addr );
497
- if (pmd_trans_unstable (pmd ))
498
- return 0 ;
496
+ if (pmd_trans_huge (* pmd )) {
497
+ ptl = pmd_lock (walk -> mm , pmd );
498
+ if (pmd_trans_huge (* pmd )) {
499
+ page = pmd_page (* pmd );
500
+ if (is_huge_zero_page (page )) {
501
+ spin_unlock (ptl );
502
+ split_huge_pmd (vma , pmd , addr );
503
+ } else {
504
+ get_page (page );
505
+ spin_unlock (ptl );
506
+ lock_page (page );
507
+ ret = split_huge_page (page );
508
+ unlock_page (page );
509
+ put_page (page );
510
+ if (ret )
511
+ return 0 ;
512
+ }
513
+ } else {
514
+ spin_unlock (ptl );
515
+ }
516
+ }
499
517
518
+ retry :
500
519
pte = pte_offset_map_lock (walk -> mm , pmd , addr , & ptl );
501
520
for (; addr != end ; pte ++ , addr += PAGE_SIZE ) {
502
521
if (!pte_present (* pte ))
@@ -513,6 +532,21 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
513
532
nid = page_to_nid (page );
514
533
if (node_isset (nid , * qp -> nmask ) == !!(flags & MPOL_MF_INVERT ))
515
534
continue ;
535
+ if (PageTail (page ) && PageAnon (page )) {
536
+ get_page (page );
537
+ pte_unmap_unlock (pte , ptl );
538
+ lock_page (page );
539
+ ret = split_huge_page (page );
540
+ unlock_page (page );
541
+ put_page (page );
542
+ /* Failed to split -- skip. */
543
+ if (ret ) {
544
+ pte = pte_offset_map_lock (walk -> mm , pmd ,
545
+ addr , & ptl );
546
+ continue ;
547
+ }
548
+ goto retry ;
549
+ }
516
550
517
551
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL ))
518
552
migrate_page_add (page , qp -> pagelist , flags );
0 commit comments