Skip to content

Commit f752e67

Browse files
Byungchul Parkakpm00
authored andcommitted
mm: separate move/undo parts from migrate_pages_batch()
Functionally, no change. This is a preparation for luf mechanism that requires to use separated folio lists for its own handling during migration. Refactored migrate_pages_batch() so as to separate move/undo parts from migrate_pages_batch(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Byungchul Park <[email protected]> Reviewed-by: Shivank Garg <[email protected]> Reviewed-by: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent ff9b7e0 commit f752e67

File tree

1 file changed

+83
-51
lines changed

1 file changed

+83
-51
lines changed

mm/migrate.c

Lines changed: 83 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1687,6 +1687,81 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
16871687
return nr_failed;
16881688
}
16891689

1690+
static void migrate_folios_move(struct list_head *src_folios,
1691+
struct list_head *dst_folios,
1692+
free_folio_t put_new_folio, unsigned long private,
1693+
enum migrate_mode mode, int reason,
1694+
struct list_head *ret_folios,
1695+
struct migrate_pages_stats *stats,
1696+
int *retry, int *thp_retry, int *nr_failed,
1697+
int *nr_retry_pages)
1698+
{
1699+
struct folio *folio, *folio2, *dst, *dst2;
1700+
bool is_thp;
1701+
int nr_pages;
1702+
int rc;
1703+
1704+
dst = list_first_entry(dst_folios, struct folio, lru);
1705+
dst2 = list_next_entry(dst, lru);
1706+
list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1707+
is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1708+
nr_pages = folio_nr_pages(folio);
1709+
1710+
cond_resched();
1711+
1712+
rc = migrate_folio_move(put_new_folio, private,
1713+
folio, dst, mode,
1714+
reason, ret_folios);
1715+
/*
1716+
* The rules are:
1717+
* Success: folio will be freed
1718+
* -EAGAIN: stay on the unmap_folios list
1719+
* Other errno: put on ret_folios list
1720+
*/
1721+
switch (rc) {
1722+
case -EAGAIN:
1723+
*retry += 1;
1724+
*thp_retry += is_thp;
1725+
*nr_retry_pages += nr_pages;
1726+
break;
1727+
case MIGRATEPAGE_SUCCESS:
1728+
stats->nr_succeeded += nr_pages;
1729+
stats->nr_thp_succeeded += is_thp;
1730+
break;
1731+
default:
1732+
*nr_failed += 1;
1733+
stats->nr_thp_failed += is_thp;
1734+
stats->nr_failed_pages += nr_pages;
1735+
break;
1736+
}
1737+
dst = dst2;
1738+
dst2 = list_next_entry(dst, lru);
1739+
}
1740+
}
1741+
1742+
static void migrate_folios_undo(struct list_head *src_folios,
1743+
struct list_head *dst_folios,
1744+
free_folio_t put_new_folio, unsigned long private,
1745+
struct list_head *ret_folios)
1746+
{
1747+
struct folio *folio, *folio2, *dst, *dst2;
1748+
1749+
dst = list_first_entry(dst_folios, struct folio, lru);
1750+
dst2 = list_next_entry(dst, lru);
1751+
list_for_each_entry_safe(folio, folio2, src_folios, lru) {
1752+
int old_page_state = 0;
1753+
struct anon_vma *anon_vma = NULL;
1754+
1755+
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1756+
migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1757+
anon_vma, true, ret_folios);
1758+
list_del(&dst->lru);
1759+
migrate_folio_undo_dst(dst, true, put_new_folio, private);
1760+
dst = dst2;
1761+
dst2 = list_next_entry(dst, lru);
1762+
}
1763+
}
1764+
16901765
/*
16911766
* migrate_pages_batch() first unmaps folios in the from list as many as
16921767
* possible, then move the unmapped folios.
@@ -1709,7 +1784,7 @@ static int migrate_pages_batch(struct list_head *from,
17091784
int pass = 0;
17101785
bool is_thp = false;
17111786
bool is_large = false;
1712-
struct folio *folio, *folio2, *dst = NULL, *dst2;
1787+
struct folio *folio, *folio2, *dst = NULL;
17131788
int rc, rc_saved = 0, nr_pages;
17141789
LIST_HEAD(unmap_folios);
17151790
LIST_HEAD(dst_folios);
@@ -1880,42 +1955,11 @@ static int migrate_pages_batch(struct list_head *from,
18801955
thp_retry = 0;
18811956
nr_retry_pages = 0;
18821957

1883-
dst = list_first_entry(&dst_folios, struct folio, lru);
1884-
dst2 = list_next_entry(dst, lru);
1885-
list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1886-
is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1887-
nr_pages = folio_nr_pages(folio);
1888-
1889-
cond_resched();
1890-
1891-
rc = migrate_folio_move(put_new_folio, private,
1892-
folio, dst, mode,
1893-
reason, ret_folios);
1894-
/*
1895-
* The rules are:
1896-
* Success: folio will be freed
1897-
* -EAGAIN: stay on the unmap_folios list
1898-
* Other errno: put on ret_folios list
1899-
*/
1900-
switch(rc) {
1901-
case -EAGAIN:
1902-
retry++;
1903-
thp_retry += is_thp;
1904-
nr_retry_pages += nr_pages;
1905-
break;
1906-
case MIGRATEPAGE_SUCCESS:
1907-
stats->nr_succeeded += nr_pages;
1908-
stats->nr_thp_succeeded += is_thp;
1909-
break;
1910-
default:
1911-
nr_failed++;
1912-
stats->nr_thp_failed += is_thp;
1913-
stats->nr_failed_pages += nr_pages;
1914-
break;
1915-
}
1916-
dst = dst2;
1917-
dst2 = list_next_entry(dst, lru);
1918-
}
1958+
/* Move the unmapped folios */
1959+
migrate_folios_move(&unmap_folios, &dst_folios,
1960+
put_new_folio, private, mode, reason,
1961+
ret_folios, stats, &retry, &thp_retry,
1962+
&nr_failed, &nr_retry_pages);
19191963
}
19201964
nr_failed += retry;
19211965
stats->nr_thp_failed += thp_retry;
@@ -1924,20 +1968,8 @@ static int migrate_pages_batch(struct list_head *from,
19241968
rc = rc_saved ? : nr_failed;
19251969
out:
19261970
/* Cleanup remaining folios */
1927-
dst = list_first_entry(&dst_folios, struct folio, lru);
1928-
dst2 = list_next_entry(dst, lru);
1929-
list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1930-
int old_page_state = 0;
1931-
struct anon_vma *anon_vma = NULL;
1932-
1933-
__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1934-
migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1935-
anon_vma, true, ret_folios);
1936-
list_del(&dst->lru);
1937-
migrate_folio_undo_dst(dst, true, put_new_folio, private);
1938-
dst = dst2;
1939-
dst2 = list_next_entry(dst, lru);
1940-
}
1971+
migrate_folios_undo(&unmap_folios, &dst_folios,
1972+
put_new_folio, private, ret_folios);
19411973

19421974
return rc;
19431975
}

0 commit comments

Comments
 (0)