Skip to content

Commit 42012e0

Browse files
yhuang-intelakpm00
authored andcommitted
migrate_pages: restrict number of pages to migrate in batch
This is a preparation patch to batch the folio unmapping and moving for non-hugetlb folios. If we had batched the folio unmapping, all folios to be migrated would be unmapped before copying the contents and flags of the folios. If the folios that were passed to migrate_pages() were too many in unit of pages, the execution of the processes would be stopped for too long time, thus too long latency. For example, migrate_pages() syscall will call migrate_pages() with all folios of a process. To avoid this possible issue, in this patch, we restrict the number of pages to be migrated to be no more than HPAGE_PMD_NR. That is, the influence is at the same level of THP migration. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: "Huang, Ying" <[email protected]> Reviewed-by: Baolin Wang <[email protected]> Cc: Zi Yan <[email protected]> Cc: Yang Shi <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Bharata B Rao <[email protected]> Cc: Alistair Popple <[email protected]> Cc: Xin Hao <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Hyeonggon Yoo <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent e5bfff8 commit 42012e0

File tree

1 file changed

+106
-68
lines changed

1 file changed

+106
-68
lines changed

mm/migrate.c

Lines changed: 106 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1414,6 +1414,11 @@ static inline int try_split_folio(struct folio *folio, struct list_head *split_f
14141414
return rc;
14151415
}
14161416

1417+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1418+
#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1419+
#else
1420+
#define NR_MAX_BATCHED_MIGRATION 512
1421+
#endif
14171422
#define NR_MAX_MIGRATE_PAGES_RETRY 10
14181423

14191424
struct migrate_pages_stats {
@@ -1515,61 +1520,25 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
15151520
return nr_failed;
15161521
}
15171522

1518-
/*
1519-
* migrate_pages - migrate the folios specified in a list, to the free folios
1520-
* supplied as the target for the page migration
1521-
*
1522-
* @from: The list of folios to be migrated.
1523-
* @get_new_page: The function used to allocate free folios to be used
1524-
* as the target of the folio migration.
1525-
* @put_new_page: The function used to free target folios if migration
1526-
* fails, or NULL if no special handling is necessary.
1527-
* @private: Private data to be passed on to get_new_page()
1528-
* @mode: The migration mode that specifies the constraints for
1529-
* folio migration, if any.
1530-
* @reason: The reason for folio migration.
1531-
* @ret_succeeded: Set to the number of folios migrated successfully if
1532-
* the caller passes a non-NULL pointer.
1533-
*
1534-
* The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1535-
* are movable any more because the list has become empty or no retryable folios
1536-
* exist any more. It is caller's responsibility to call putback_movable_pages()
1537-
* only if ret != 0.
1538-
*
1539-
* Returns the number of {normal folio, large folio, hugetlb} that were not
1540-
* migrated, or an error code. The number of large folio splits will be
1541-
* considered as the number of non-migrated large folio, no matter how many
1542-
* split folios of the large folio are migrated successfully.
1543-
*/
1544-
int migrate_pages(struct list_head *from, new_page_t get_new_page,
1523+
static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
15451524
free_page_t put_new_page, unsigned long private,
1546-
enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1525+
enum migrate_mode mode, int reason, struct list_head *ret_folios,
1526+
struct migrate_pages_stats *stats)
15471527
{
15481528
int retry = 1;
15491529
int large_retry = 1;
15501530
int thp_retry = 1;
1551-
int nr_failed;
1531+
int nr_failed = 0;
15521532
int nr_retry_pages = 0;
15531533
int nr_large_failed = 0;
15541534
int pass = 0;
15551535
bool is_large = false;
15561536
bool is_thp = false;
15571537
struct folio *folio, *folio2;
15581538
int rc, nr_pages;
1559-
LIST_HEAD(ret_folios);
15601539
LIST_HEAD(split_folios);
15611540
bool nosplit = (reason == MR_NUMA_MISPLACED);
15621541
bool no_split_folio_counting = false;
1563-
struct migrate_pages_stats stats;
1564-
1565-
trace_mm_migrate_pages_start(mode, reason);
1566-
1567-
memset(&stats, 0, sizeof(stats));
1568-
rc = migrate_hugetlbs(from, get_new_page, put_new_page, private, mode, reason,
1569-
&stats, &ret_folios);
1570-
if (rc < 0)
1571-
goto out;
1572-
nr_failed = rc;
15731542

15741543
split_folio_migration:
15751544
for (pass = 0;
@@ -1581,12 +1550,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
15811550
nr_retry_pages = 0;
15821551

15831552
list_for_each_entry_safe(folio, folio2, from, lru) {
1584-
/* Retried hugetlb folios will be kept in list */
1585-
if (folio_test_hugetlb(folio)) {
1586-
list_move_tail(&folio->lru, &ret_folios);
1587-
continue;
1588-
}
1589-
15901553
/*
15911554
* Large folio statistics is based on the source large
15921555
* folio. Capture required information that might get
@@ -1600,15 +1563,14 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
16001563

16011564
rc = unmap_and_move(get_new_page, put_new_page,
16021565
private, folio, pass > 2, mode,
1603-
reason, &ret_folios);
1566+
reason, ret_folios);
16041567
/*
16051568
* The rules are:
16061569
* Success: folio will be freed
16071570
* -EAGAIN: stay on the from list
16081571
* -ENOMEM: stay on the from list
16091572
* -ENOSYS: stay on the from list
1610-
* Other errno: put on ret_folios list then splice to
1611-
* from list
1573+
* Other errno: put on ret_folios list
16121574
*/
16131575
switch(rc) {
16141576
/*
@@ -1625,17 +1587,17 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
16251587
/* Large folio migration is unsupported */
16261588
if (is_large) {
16271589
nr_large_failed++;
1628-
stats.nr_thp_failed += is_thp;
1590+
stats->nr_thp_failed += is_thp;
16291591
if (!try_split_folio(folio, &split_folios)) {
1630-
stats.nr_thp_split += is_thp;
1592+
stats->nr_thp_split += is_thp;
16311593
break;
16321594
}
16331595
} else if (!no_split_folio_counting) {
16341596
nr_failed++;
16351597
}
16361598

1637-
stats.nr_failed_pages += nr_pages;
1638-
list_move_tail(&folio->lru, &ret_folios);
1599+
stats->nr_failed_pages += nr_pages;
1600+
list_move_tail(&folio->lru, ret_folios);
16391601
break;
16401602
case -ENOMEM:
16411603
/*
@@ -1644,13 +1606,13 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
16441606
*/
16451607
if (is_large) {
16461608
nr_large_failed++;
1647-
stats.nr_thp_failed += is_thp;
1609+
stats->nr_thp_failed += is_thp;
16481610
/* Large folio NUMA faulting doesn't split to retry. */
16491611
if (!nosplit) {
16501612
int ret = try_split_folio(folio, &split_folios);
16511613

16521614
if (!ret) {
1653-
stats.nr_thp_split += is_thp;
1615+
stats->nr_thp_split += is_thp;
16541616
break;
16551617
} else if (reason == MR_LONGTERM_PIN &&
16561618
ret == -EAGAIN) {
@@ -1668,17 +1630,17 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
16681630
nr_failed++;
16691631
}
16701632

1671-
stats.nr_failed_pages += nr_pages + nr_retry_pages;
1633+
stats->nr_failed_pages += nr_pages + nr_retry_pages;
16721634
/*
16731635
* There might be some split folios of fail-to-migrate large
1674-
* folios left in split_folios list. Move them back to migration
1636+
* folios left in split_folios list. Move them to ret_folios
16751637
* list so that they could be put back to the right list by
16761638
* the caller otherwise the folio refcnt will be leaked.
16771639
*/
1678-
list_splice_init(&split_folios, from);
1640+
list_splice_init(&split_folios, ret_folios);
16791641
/* nr_failed isn't updated for not used */
16801642
nr_large_failed += large_retry;
1681-
stats.nr_thp_failed += thp_retry;
1643+
stats->nr_thp_failed += thp_retry;
16821644
goto out;
16831645
case -EAGAIN:
16841646
if (is_large) {
@@ -1690,8 +1652,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
16901652
nr_retry_pages += nr_pages;
16911653
break;
16921654
case MIGRATEPAGE_SUCCESS:
1693-
stats.nr_succeeded += nr_pages;
1694-
stats.nr_thp_succeeded += is_thp;
1655+
stats->nr_succeeded += nr_pages;
1656+
stats->nr_thp_succeeded += is_thp;
16951657
break;
16961658
default:
16971659
/*
@@ -1702,20 +1664,20 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
17021664
*/
17031665
if (is_large) {
17041666
nr_large_failed++;
1705-
stats.nr_thp_failed += is_thp;
1667+
stats->nr_thp_failed += is_thp;
17061668
} else if (!no_split_folio_counting) {
17071669
nr_failed++;
17081670
}
17091671

1710-
stats.nr_failed_pages += nr_pages;
1672+
stats->nr_failed_pages += nr_pages;
17111673
break;
17121674
}
17131675
}
17141676
}
17151677
nr_failed += retry;
17161678
nr_large_failed += large_retry;
1717-
stats.nr_thp_failed += thp_retry;
1718-
stats.nr_failed_pages += nr_retry_pages;
1679+
stats->nr_thp_failed += thp_retry;
1680+
stats->nr_failed_pages += nr_retry_pages;
17191681
/*
17201682
* Try to migrate split folios of fail-to-migrate large folios, no
17211683
* nr_failed counting in this round, since all split folios of a
@@ -1726,14 +1688,90 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
17261688
* Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
17271689
* retries) to ret_folios to avoid migrating them again.
17281690
*/
1729-
list_splice_init(from, &ret_folios);
1691+
list_splice_init(from, ret_folios);
17301692
list_splice_init(&split_folios, from);
17311693
no_split_folio_counting = true;
17321694
retry = 1;
17331695
goto split_folio_migration;
17341696
}
17351697

17361698
rc = nr_failed + nr_large_failed;
1699+
out:
1700+
return rc;
1701+
}
1702+
1703+
/*
1704+
* migrate_pages - migrate the folios specified in a list, to the free folios
1705+
* supplied as the target for the page migration
1706+
*
1707+
* @from: The list of folios to be migrated.
1708+
* @get_new_page: The function used to allocate free folios to be used
1709+
* as the target of the folio migration.
1710+
* @put_new_page: The function used to free target folios if migration
1711+
* fails, or NULL if no special handling is necessary.
1712+
* @private: Private data to be passed on to get_new_page()
1713+
* @mode: The migration mode that specifies the constraints for
1714+
* folio migration, if any.
1715+
* @reason: The reason for folio migration.
1716+
* @ret_succeeded: Set to the number of folios migrated successfully if
1717+
* the caller passes a non-NULL pointer.
1718+
*
1719+
* The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1720+
* are movable any more because the list has become empty or no retryable folios
1721+
* exist any more. It is caller's responsibility to call putback_movable_pages()
1722+
* only if ret != 0.
1723+
*
1724+
* Returns the number of {normal folio, large folio, hugetlb} that were not
1725+
* migrated, or an error code. The number of large folio splits will be
1726+
* considered as the number of non-migrated large folio, no matter how many
1727+
* split folios of the large folio are migrated successfully.
1728+
*/
1729+
int migrate_pages(struct list_head *from, new_page_t get_new_page,
1730+
free_page_t put_new_page, unsigned long private,
1731+
enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1732+
{
1733+
int rc, rc_gather;
1734+
int nr_pages;
1735+
struct folio *folio, *folio2;
1736+
LIST_HEAD(folios);
1737+
LIST_HEAD(ret_folios);
1738+
struct migrate_pages_stats stats;
1739+
1740+
trace_mm_migrate_pages_start(mode, reason);
1741+
1742+
memset(&stats, 0, sizeof(stats));
1743+
1744+
rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private,
1745+
mode, reason, &stats, &ret_folios);
1746+
if (rc_gather < 0)
1747+
goto out;
1748+
again:
1749+
nr_pages = 0;
1750+
list_for_each_entry_safe(folio, folio2, from, lru) {
1751+
/* Retried hugetlb folios will be kept in list */
1752+
if (folio_test_hugetlb(folio)) {
1753+
list_move_tail(&folio->lru, &ret_folios);
1754+
continue;
1755+
}
1756+
1757+
nr_pages += folio_nr_pages(folio);
1758+
if (nr_pages > NR_MAX_BATCHED_MIGRATION)
1759+
break;
1760+
}
1761+
if (nr_pages > NR_MAX_BATCHED_MIGRATION)
1762+
list_cut_before(&folios, from, &folio->lru);
1763+
else
1764+
list_splice_init(from, &folios);
1765+
rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
1766+
mode, reason, &ret_folios, &stats);
1767+
list_splice_tail_init(&folios, &ret_folios);
1768+
if (rc < 0) {
1769+
rc_gather = rc;
1770+
goto out;
1771+
}
1772+
rc_gather += rc;
1773+
if (!list_empty(from))
1774+
goto again;
17371775
out:
17381776
/*
17391777
* Put the permanent failure folio back to migration list, they
@@ -1746,7 +1784,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
17461784
* are migrated successfully.
17471785
*/
17481786
if (list_empty(from))
1749-
rc = 0;
1787+
rc_gather = 0;
17501788

17511789
count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
17521790
count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
@@ -1760,7 +1798,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
17601798
if (ret_succeeded)
17611799
*ret_succeeded = stats.nr_succeeded;
17621800

1763-
return rc;
1801+
return rc_gather;
17641802
}
17651803

17661804
struct page *alloc_migration_target(struct page *page, unsigned long private)

0 commit comments

Comments
 (0)