Skip to content

Commit 682a71a

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
migrate: convert __unmap_and_move() to use folios
Removes a lot of calls to compound_head(). Also remove a VM_BUG_ON that can never trigger as the PageAnon bit is the bottom bit of page->mapping. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 595af4c commit 682a71a

File tree

1 file changed

+37
-38
lines changed

1 file changed

+37
-38
lines changed

mm/migrate.c

Lines changed: 37 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -993,17 +993,15 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
993993
return rc;
994994
}
995995

996-
static int __unmap_and_move(struct page *page, struct page *newpage,
996+
static int __unmap_and_move(struct folio *src, struct folio *dst,
997997
int force, enum migrate_mode mode)
998998
{
999-
struct folio *folio = page_folio(page);
1000-
struct folio *dst = page_folio(newpage);
1001999
int rc = -EAGAIN;
10021000
bool page_was_mapped = false;
10031001
struct anon_vma *anon_vma = NULL;
1004-
bool is_lru = !__PageMovable(page);
1002+
bool is_lru = !__PageMovable(&src->page);
10051003

1006-
if (!trylock_page(page)) {
1004+
if (!folio_trylock(src)) {
10071005
if (!force || mode == MIGRATE_ASYNC)
10081006
goto out;
10091007

@@ -1023,10 +1021,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
10231021
if (current->flags & PF_MEMALLOC)
10241022
goto out;
10251023

1026-
lock_page(page);
1024+
folio_lock(src);
10271025
}
10281026

1029-
if (PageWriteback(page)) {
1027+
if (folio_test_writeback(src)) {
10301028
/*
10311029
* Only in the case of a full synchronous migration is it
10321030
* necessary to wait for PageWriteback. In the async case,
@@ -1043,12 +1041,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
10431041
}
10441042
if (!force)
10451043
goto out_unlock;
1046-
wait_on_page_writeback(page);
1044+
folio_wait_writeback(src);
10471045
}
10481046

10491047
/*
1050-
* By try_to_migrate(), page->mapcount goes down to 0 here. In this case,
1051-
* we cannot notice that anon_vma is freed while we migrates a page.
1048+
* By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1049+
* we cannot notice that anon_vma is freed while we migrate a page.
10521050
* This get_anon_vma() delays freeing anon_vma pointer until the end
10531051
* of migration. File cache pages are no problem because of page_lock()
10541052
* File Caches may use write_page() or lock_page() in migration, then,
@@ -1060,88 +1058,87 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
10601058
* because that implies that the anon page is no longer mapped
10611059
* (and cannot be remapped so long as we hold the page lock).
10621060
*/
1063-
if (PageAnon(page) && !PageKsm(page))
1064-
anon_vma = page_get_anon_vma(page);
1061+
if (folio_test_anon(src) && !folio_test_ksm(src))
1062+
anon_vma = page_get_anon_vma(&src->page);
10651063

10661064
/*
10671065
* Block others from accessing the new page when we get around to
10681066
* establishing additional references. We are usually the only one
1069-
* holding a reference to newpage at this point. We used to have a BUG
1070-
* here if trylock_page(newpage) fails, but would like to allow for
1071-
* cases where there might be a race with the previous use of newpage.
1067+
* holding a reference to dst at this point. We used to have a BUG
1068+
* here if folio_trylock(dst) fails, but would like to allow for
1069+
* cases where there might be a race with the previous use of dst.
10721070
* This is much like races on refcount of oldpage: just don't BUG().
10731071
*/
1074-
if (unlikely(!trylock_page(newpage)))
1072+
if (unlikely(!folio_trylock(dst)))
10751073
goto out_unlock;
10761074

10771075
if (unlikely(!is_lru)) {
1078-
rc = move_to_new_folio(dst, folio, mode);
1076+
rc = move_to_new_folio(dst, src, mode);
10791077
goto out_unlock_both;
10801078
}
10811079

10821080
/*
10831081
* Corner case handling:
10841082
* 1. When a new swap-cache page is read into, it is added to the LRU
10851083
* and treated as swapcache but it has no rmap yet.
1086-
* Calling try_to_unmap() against a page->mapping==NULL page will
1084+
* Calling try_to_unmap() against a src->mapping==NULL page will
10871085
* trigger a BUG. So handle it here.
10881086
* 2. An orphaned page (see truncate_cleanup_page) might have
10891087
* fs-private metadata. The page can be picked up due to memory
10901088
* offlining. Everywhere else except page reclaim, the page is
10911089
* invisible to the vm, so the page can not be migrated. So try to
10921090
* free the metadata, so the page can be freed.
10931091
*/
1094-
if (!page->mapping) {
1095-
VM_BUG_ON_PAGE(PageAnon(page), page);
1096-
if (page_has_private(page)) {
1097-
try_to_free_buffers(folio);
1092+
if (!src->mapping) {
1093+
if (folio_test_private(src)) {
1094+
try_to_free_buffers(src);
10981095
goto out_unlock_both;
10991096
}
1100-
} else if (page_mapped(page)) {
1097+
} else if (folio_mapped(src)) {
11011098
/* Establish migration ptes */
1102-
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1103-
page);
1104-
try_to_migrate(folio, 0);
1099+
VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1100+
!folio_test_ksm(src) && !anon_vma, src);
1101+
try_to_migrate(src, 0);
11051102
page_was_mapped = true;
11061103
}
11071104

1108-
if (!page_mapped(page))
1109-
rc = move_to_new_folio(dst, folio, mode);
1105+
if (!folio_mapped(src))
1106+
rc = move_to_new_folio(dst, src, mode);
11101107

11111108
/*
1112-
* When successful, push newpage to LRU immediately: so that if it
1109+
* When successful, push dst to LRU immediately: so that if it
11131110
* turns out to be an mlocked page, remove_migration_ptes() will
1114-
* automatically build up the correct newpage->mlock_count for it.
1111+
* automatically build up the correct dst->mlock_count for it.
11151112
*
11161113
* We would like to do something similar for the old page, when
11171114
* unsuccessful, and other cases when a page has been temporarily
11181115
* isolated from the unevictable LRU: but this case is the easiest.
11191116
*/
11201117
if (rc == MIGRATEPAGE_SUCCESS) {
1121-
lru_cache_add(newpage);
1118+
folio_add_lru(dst);
11221119
if (page_was_mapped)
11231120
lru_add_drain();
11241121
}
11251122

11261123
if (page_was_mapped)
1127-
remove_migration_ptes(folio,
1128-
rc == MIGRATEPAGE_SUCCESS ? dst : folio, false);
1124+
remove_migration_ptes(src,
1125+
rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
11291126

11301127
out_unlock_both:
1131-
unlock_page(newpage);
1128+
folio_unlock(dst);
11321129
out_unlock:
11331130
/* Drop an anon_vma reference if we took one */
11341131
if (anon_vma)
11351132
put_anon_vma(anon_vma);
1136-
unlock_page(page);
1133+
folio_unlock(src);
11371134
out:
11381135
/*
1139-
* If migration is successful, decrease refcount of the newpage,
1136+
* If migration is successful, decrease refcount of dst,
11401137
* which will not free the page because new page owner increased
11411138
* refcounter.
11421139
*/
11431140
if (rc == MIGRATEPAGE_SUCCESS)
1144-
put_page(newpage);
1141+
folio_put(dst);
11451142

11461143
return rc;
11471144
}
@@ -1157,6 +1154,7 @@ static int unmap_and_move(new_page_t get_new_page,
11571154
enum migrate_reason reason,
11581155
struct list_head *ret)
11591156
{
1157+
struct folio *dst, *src = page_folio(page);
11601158
int rc = MIGRATEPAGE_SUCCESS;
11611159
struct page *newpage = NULL;
11621160

@@ -1174,9 +1172,10 @@ static int unmap_and_move(new_page_t get_new_page,
11741172
newpage = get_new_page(page, private);
11751173
if (!newpage)
11761174
return -ENOMEM;
1175+
dst = page_folio(newpage);
11771176

11781177
newpage->private = 0;
1179-
rc = __unmap_and_move(page, newpage, force, mode);
1178+
rc = __unmap_and_move(src, dst, force, mode);
11801179
if (rc == MIGRATEPAGE_SUCCESS)
11811180
set_page_owner_migrate_reason(newpage, reason);
11821181

0 commit comments

Comments
 (0)