@@ -1414,6 +1414,11 @@ static inline int try_split_folio(struct folio *folio, struct list_head *split_f
1414
1414
return rc ;
1415
1415
}
1416
1416
1417
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1418
+ #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1419
+ #else
1420
+ #define NR_MAX_BATCHED_MIGRATION 512
1421
+ #endif
1417
1422
#define NR_MAX_MIGRATE_PAGES_RETRY 10
1418
1423
1419
1424
struct migrate_pages_stats {
@@ -1515,61 +1520,25 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
1515
1520
return nr_failed ;
1516
1521
}
1517
1522
1518
- /*
1519
- * migrate_pages - migrate the folios specified in a list, to the free folios
1520
- * supplied as the target for the page migration
1521
- *
1522
- * @from: The list of folios to be migrated.
1523
- * @get_new_page: The function used to allocate free folios to be used
1524
- * as the target of the folio migration.
1525
- * @put_new_page: The function used to free target folios if migration
1526
- * fails, or NULL if no special handling is necessary.
1527
- * @private: Private data to be passed on to get_new_page()
1528
- * @mode: The migration mode that specifies the constraints for
1529
- * folio migration, if any.
1530
- * @reason: The reason for folio migration.
1531
- * @ret_succeeded: Set to the number of folios migrated successfully if
1532
- * the caller passes a non-NULL pointer.
1533
- *
1534
- * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1535
- * are movable any more because the list has become empty or no retryable folios
1536
- * exist any more. It is caller's responsibility to call putback_movable_pages()
1537
- * only if ret != 0.
1538
- *
1539
- * Returns the number of {normal folio, large folio, hugetlb} that were not
1540
- * migrated, or an error code. The number of large folio splits will be
1541
- * considered as the number of non-migrated large folio, no matter how many
1542
- * split folios of the large folio are migrated successfully.
1543
- */
1544
- int migrate_pages (struct list_head * from , new_page_t get_new_page ,
1523
+ static int migrate_pages_batch (struct list_head * from , new_page_t get_new_page ,
1545
1524
free_page_t put_new_page , unsigned long private ,
1546
- enum migrate_mode mode , int reason , unsigned int * ret_succeeded )
1525
+ enum migrate_mode mode , int reason , struct list_head * ret_folios ,
1526
+ struct migrate_pages_stats * stats )
1547
1527
{
1548
1528
int retry = 1 ;
1549
1529
int large_retry = 1 ;
1550
1530
int thp_retry = 1 ;
1551
- int nr_failed ;
1531
+ int nr_failed = 0 ;
1552
1532
int nr_retry_pages = 0 ;
1553
1533
int nr_large_failed = 0 ;
1554
1534
int pass = 0 ;
1555
1535
bool is_large = false;
1556
1536
bool is_thp = false;
1557
1537
struct folio * folio , * folio2 ;
1558
1538
int rc , nr_pages ;
1559
- LIST_HEAD (ret_folios );
1560
1539
LIST_HEAD (split_folios );
1561
1540
bool nosplit = (reason == MR_NUMA_MISPLACED );
1562
1541
bool no_split_folio_counting = false;
1563
- struct migrate_pages_stats stats ;
1564
-
1565
- trace_mm_migrate_pages_start (mode , reason );
1566
-
1567
- memset (& stats , 0 , sizeof (stats ));
1568
- rc = migrate_hugetlbs (from , get_new_page , put_new_page , private , mode , reason ,
1569
- & stats , & ret_folios );
1570
- if (rc < 0 )
1571
- goto out ;
1572
- nr_failed = rc ;
1573
1542
1574
1543
split_folio_migration :
1575
1544
for (pass = 0 ;
@@ -1581,12 +1550,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1581
1550
nr_retry_pages = 0 ;
1582
1551
1583
1552
list_for_each_entry_safe (folio , folio2 , from , lru ) {
1584
- /* Retried hugetlb folios will be kept in list */
1585
- if (folio_test_hugetlb (folio )) {
1586
- list_move_tail (& folio -> lru , & ret_folios );
1587
- continue ;
1588
- }
1589
-
1590
1553
/*
1591
1554
* Large folio statistics is based on the source large
1592
1555
* folio. Capture required information that might get
@@ -1600,15 +1563,14 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1600
1563
1601
1564
rc = unmap_and_move (get_new_page , put_new_page ,
1602
1565
private , folio , pass > 2 , mode ,
1603
- reason , & ret_folios );
1566
+ reason , ret_folios );
1604
1567
/*
1605
1568
* The rules are:
1606
1569
* Success: folio will be freed
1607
1570
* -EAGAIN: stay on the from list
1608
1571
* -ENOMEM: stay on the from list
1609
1572
* -ENOSYS: stay on the from list
1610
- * Other errno: put on ret_folios list then splice to
1611
- * from list
1573
+ * Other errno: put on ret_folios list
1612
1574
*/
1613
1575
switch (rc ) {
1614
1576
/*
@@ -1625,17 +1587,17 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1625
1587
/* Large folio migration is unsupported */
1626
1588
if (is_large ) {
1627
1589
nr_large_failed ++ ;
1628
- stats . nr_thp_failed += is_thp ;
1590
+ stats -> nr_thp_failed += is_thp ;
1629
1591
if (!try_split_folio (folio , & split_folios )) {
1630
- stats . nr_thp_split += is_thp ;
1592
+ stats -> nr_thp_split += is_thp ;
1631
1593
break ;
1632
1594
}
1633
1595
} else if (!no_split_folio_counting ) {
1634
1596
nr_failed ++ ;
1635
1597
}
1636
1598
1637
- stats . nr_failed_pages += nr_pages ;
1638
- list_move_tail (& folio -> lru , & ret_folios );
1599
+ stats -> nr_failed_pages += nr_pages ;
1600
+ list_move_tail (& folio -> lru , ret_folios );
1639
1601
break ;
1640
1602
case - ENOMEM :
1641
1603
/*
@@ -1644,13 +1606,13 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1644
1606
*/
1645
1607
if (is_large ) {
1646
1608
nr_large_failed ++ ;
1647
- stats . nr_thp_failed += is_thp ;
1609
+ stats -> nr_thp_failed += is_thp ;
1648
1610
/* Large folio NUMA faulting doesn't split to retry. */
1649
1611
if (!nosplit ) {
1650
1612
int ret = try_split_folio (folio , & split_folios );
1651
1613
1652
1614
if (!ret ) {
1653
- stats . nr_thp_split += is_thp ;
1615
+ stats -> nr_thp_split += is_thp ;
1654
1616
break ;
1655
1617
} else if (reason == MR_LONGTERM_PIN &&
1656
1618
ret == - EAGAIN ) {
@@ -1668,17 +1630,17 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1668
1630
nr_failed ++ ;
1669
1631
}
1670
1632
1671
- stats . nr_failed_pages += nr_pages + nr_retry_pages ;
1633
+ stats -> nr_failed_pages += nr_pages + nr_retry_pages ;
1672
1634
/*
1673
1635
* There might be some split folios of fail-to-migrate large
1674
- * folios left in split_folios list. Move them back to migration
1636
+ * folios left in split_folios list. Move them to ret_folios
1675
1637
* list so that they could be put back to the right list by
1676
1638
* the caller otherwise the folio refcnt will be leaked.
1677
1639
*/
1678
- list_splice_init (& split_folios , from );
1640
+ list_splice_init (& split_folios , ret_folios );
1679
1641
/* nr_failed isn't updated for not used */
1680
1642
nr_large_failed += large_retry ;
1681
- stats . nr_thp_failed += thp_retry ;
1643
+ stats -> nr_thp_failed += thp_retry ;
1682
1644
goto out ;
1683
1645
case - EAGAIN :
1684
1646
if (is_large ) {
@@ -1690,8 +1652,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1690
1652
nr_retry_pages += nr_pages ;
1691
1653
break ;
1692
1654
case MIGRATEPAGE_SUCCESS :
1693
- stats . nr_succeeded += nr_pages ;
1694
- stats . nr_thp_succeeded += is_thp ;
1655
+ stats -> nr_succeeded += nr_pages ;
1656
+ stats -> nr_thp_succeeded += is_thp ;
1695
1657
break ;
1696
1658
default :
1697
1659
/*
@@ -1702,20 +1664,20 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1702
1664
*/
1703
1665
if (is_large ) {
1704
1666
nr_large_failed ++ ;
1705
- stats . nr_thp_failed += is_thp ;
1667
+ stats -> nr_thp_failed += is_thp ;
1706
1668
} else if (!no_split_folio_counting ) {
1707
1669
nr_failed ++ ;
1708
1670
}
1709
1671
1710
- stats . nr_failed_pages += nr_pages ;
1672
+ stats -> nr_failed_pages += nr_pages ;
1711
1673
break ;
1712
1674
}
1713
1675
}
1714
1676
}
1715
1677
nr_failed += retry ;
1716
1678
nr_large_failed += large_retry ;
1717
- stats . nr_thp_failed += thp_retry ;
1718
- stats . nr_failed_pages += nr_retry_pages ;
1679
+ stats -> nr_thp_failed += thp_retry ;
1680
+ stats -> nr_failed_pages += nr_retry_pages ;
1719
1681
/*
1720
1682
* Try to migrate split folios of fail-to-migrate large folios, no
1721
1683
* nr_failed counting in this round, since all split folios of a
@@ -1726,14 +1688,90 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1726
1688
* Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
1727
1689
* retries) to ret_folios to avoid migrating them again.
1728
1690
*/
1729
- list_splice_init (from , & ret_folios );
1691
+ list_splice_init (from , ret_folios );
1730
1692
list_splice_init (& split_folios , from );
1731
1693
no_split_folio_counting = true;
1732
1694
retry = 1 ;
1733
1695
goto split_folio_migration ;
1734
1696
}
1735
1697
1736
1698
rc = nr_failed + nr_large_failed ;
1699
+ out :
1700
+ return rc ;
1701
+ }
1702
+
1703
+ /*
1704
+ * migrate_pages - migrate the folios specified in a list, to the free folios
1705
+ * supplied as the target for the page migration
1706
+ *
1707
+ * @from: The list of folios to be migrated.
1708
+ * @get_new_page: The function used to allocate free folios to be used
1709
+ * as the target of the folio migration.
1710
+ * @put_new_page: The function used to free target folios if migration
1711
+ * fails, or NULL if no special handling is necessary.
1712
+ * @private: Private data to be passed on to get_new_page()
1713
+ * @mode: The migration mode that specifies the constraints for
1714
+ * folio migration, if any.
1715
+ * @reason: The reason for folio migration.
1716
+ * @ret_succeeded: Set to the number of folios migrated successfully if
1717
+ * the caller passes a non-NULL pointer.
1718
+ *
1719
+ * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1720
+ * are movable any more because the list has become empty or no retryable folios
1721
+ * exist any more. It is caller's responsibility to call putback_movable_pages()
1722
+ * only if ret != 0.
1723
+ *
1724
+ * Returns the number of {normal folio, large folio, hugetlb} that were not
1725
+ * migrated, or an error code. The number of large folio splits will be
1726
+ * considered as the number of non-migrated large folio, no matter how many
1727
+ * split folios of the large folio are migrated successfully.
1728
+ */
1729
+ int migrate_pages (struct list_head * from , new_page_t get_new_page ,
1730
+ free_page_t put_new_page , unsigned long private ,
1731
+ enum migrate_mode mode , int reason , unsigned int * ret_succeeded )
1732
+ {
1733
+ int rc , rc_gather ;
1734
+ int nr_pages ;
1735
+ struct folio * folio , * folio2 ;
1736
+ LIST_HEAD (folios );
1737
+ LIST_HEAD (ret_folios );
1738
+ struct migrate_pages_stats stats ;
1739
+
1740
+ trace_mm_migrate_pages_start (mode , reason );
1741
+
1742
+ memset (& stats , 0 , sizeof (stats ));
1743
+
1744
+ rc_gather = migrate_hugetlbs (from , get_new_page , put_new_page , private ,
1745
+ mode , reason , & stats , & ret_folios );
1746
+ if (rc_gather < 0 )
1747
+ goto out ;
1748
+ again :
1749
+ nr_pages = 0 ;
1750
+ list_for_each_entry_safe (folio , folio2 , from , lru ) {
1751
+ /* Retried hugetlb folios will be kept in list */
1752
+ if (folio_test_hugetlb (folio )) {
1753
+ list_move_tail (& folio -> lru , & ret_folios );
1754
+ continue ;
1755
+ }
1756
+
1757
+ nr_pages += folio_nr_pages (folio );
1758
+ if (nr_pages > NR_MAX_BATCHED_MIGRATION )
1759
+ break ;
1760
+ }
1761
+ if (nr_pages > NR_MAX_BATCHED_MIGRATION )
1762
+ list_cut_before (& folios , from , & folio -> lru );
1763
+ else
1764
+ list_splice_init (from , & folios );
1765
+ rc = migrate_pages_batch (& folios , get_new_page , put_new_page , private ,
1766
+ mode , reason , & ret_folios , & stats );
1767
+ list_splice_tail_init (& folios , & ret_folios );
1768
+ if (rc < 0 ) {
1769
+ rc_gather = rc ;
1770
+ goto out ;
1771
+ }
1772
+ rc_gather += rc ;
1773
+ if (!list_empty (from ))
1774
+ goto again ;
1737
1775
out :
1738
1776
/*
1739
1777
* Put the permanent failure folio back to migration list, they
@@ -1746,7 +1784,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1746
1784
* are migrated successfully.
1747
1785
*/
1748
1786
if (list_empty (from ))
1749
- rc = 0 ;
1787
+ rc_gather = 0 ;
1750
1788
1751
1789
count_vm_events (PGMIGRATE_SUCCESS , stats .nr_succeeded );
1752
1790
count_vm_events (PGMIGRATE_FAIL , stats .nr_failed_pages );
@@ -1760,7 +1798,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1760
1798
if (ret_succeeded )
1761
1799
* ret_succeeded = stats .nr_succeeded ;
1762
1800
1763
- return rc ;
1801
+ return rc_gather ;
1764
1802
}
1765
1803
1766
1804
struct page * alloc_migration_target (struct page * page , unsigned long private )
0 commit comments