@@ -993,17 +993,15 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
993
993
return rc ;
994
994
}
995
995
996
- static int __unmap_and_move (struct page * page , struct page * newpage ,
996
+ static int __unmap_and_move (struct folio * src , struct folio * dst ,
997
997
int force , enum migrate_mode mode )
998
998
{
999
- struct folio * folio = page_folio (page );
1000
- struct folio * dst = page_folio (newpage );
1001
999
int rc = - EAGAIN ;
1002
1000
bool page_was_mapped = false;
1003
1001
struct anon_vma * anon_vma = NULL ;
1004
- bool is_lru = !__PageMovable (page );
1002
+ bool is_lru = !__PageMovable (& src -> page );
1005
1003
1006
- if (!trylock_page ( page )) {
1004
+ if (!folio_trylock ( src )) {
1007
1005
if (!force || mode == MIGRATE_ASYNC )
1008
1006
goto out ;
1009
1007
@@ -1023,10 +1021,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
1023
1021
if (current -> flags & PF_MEMALLOC )
1024
1022
goto out ;
1025
1023
1026
- lock_page ( page );
1024
+ folio_lock ( src );
1027
1025
}
1028
1026
1029
- if (PageWriteback ( page )) {
1027
+ if (folio_test_writeback ( src )) {
1030
1028
/*
1031
1029
* Only in the case of a full synchronous migration is it
1032
1030
* necessary to wait for PageWriteback. In the async case,
@@ -1043,12 +1041,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
1043
1041
}
1044
1042
if (!force )
1045
1043
goto out_unlock ;
1046
- wait_on_page_writeback ( page );
1044
+ folio_wait_writeback ( src );
1047
1045
}
1048
1046
1049
1047
/*
1050
- * By try_to_migrate(), page ->mapcount goes down to 0 here. In this case,
1051
- * we cannot notice that anon_vma is freed while we migrates a page.
1048
+ * By try_to_migrate(), src ->mapcount goes down to 0 here. In this case,
1049
+ * we cannot notice that anon_vma is freed while we migrate a page.
1052
1050
* This get_anon_vma() delays freeing anon_vma pointer until the end
1053
1051
* of migration. File cache pages are no problem because of page_lock()
1054
1052
* File Caches may use write_page() or lock_page() in migration, then,
@@ -1060,88 +1058,87 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
1060
1058
* because that implies that the anon page is no longer mapped
1061
1059
* (and cannot be remapped so long as we hold the page lock).
1062
1060
*/
1063
- if (PageAnon ( page ) && !PageKsm ( page ))
1064
- anon_vma = page_get_anon_vma (page );
1061
+ if (folio_test_anon ( src ) && !folio_test_ksm ( src ))
1062
+ anon_vma = page_get_anon_vma (& src -> page );
1065
1063
1066
1064
/*
1067
1065
* Block others from accessing the new page when we get around to
1068
1066
* establishing additional references. We are usually the only one
1069
- * holding a reference to newpage at this point. We used to have a BUG
1070
- * here if trylock_page(newpage ) fails, but would like to allow for
1071
- * cases where there might be a race with the previous use of newpage .
1067
+ * holding a reference to dst at this point. We used to have a BUG
1068
+ * here if folio_trylock(dst ) fails, but would like to allow for
1069
+ * cases where there might be a race with the previous use of dst .
1072
1070
* This is much like races on refcount of oldpage: just don't BUG().
1073
1071
*/
1074
- if (unlikely (!trylock_page ( newpage )))
1072
+ if (unlikely (!folio_trylock ( dst )))
1075
1073
goto out_unlock ;
1076
1074
1077
1075
if (unlikely (!is_lru )) {
1078
- rc = move_to_new_folio (dst , folio , mode );
1076
+ rc = move_to_new_folio (dst , src , mode );
1079
1077
goto out_unlock_both ;
1080
1078
}
1081
1079
1082
1080
/*
1083
1081
* Corner case handling:
1084
1082
* 1. When a new swap-cache page is read into, it is added to the LRU
1085
1083
* and treated as swapcache but it has no rmap yet.
1086
- * Calling try_to_unmap() against a page ->mapping==NULL page will
1084
+ * Calling try_to_unmap() against a src ->mapping==NULL page will
1087
1085
* trigger a BUG. So handle it here.
1088
1086
* 2. An orphaned page (see truncate_cleanup_page) might have
1089
1087
* fs-private metadata. The page can be picked up due to memory
1090
1088
* offlining. Everywhere else except page reclaim, the page is
1091
1089
* invisible to the vm, so the page can not be migrated. So try to
1092
1090
* free the metadata, so the page can be freed.
1093
1091
*/
1094
- if (!page -> mapping ) {
1095
- VM_BUG_ON_PAGE (PageAnon (page ), page );
1096
- if (page_has_private (page )) {
1097
- try_to_free_buffers (folio );
1092
+ if (!src -> mapping ) {
1093
+ if (folio_test_private (src )) {
1094
+ try_to_free_buffers (src );
1098
1095
goto out_unlock_both ;
1099
1096
}
1100
- } else if (page_mapped ( page )) {
1097
+ } else if (folio_mapped ( src )) {
1101
1098
/* Establish migration ptes */
1102
- VM_BUG_ON_PAGE ( PageAnon ( page ) && ! PageKsm ( page ) && ! anon_vma ,
1103
- page );
1104
- try_to_migrate (folio , 0 );
1099
+ VM_BUG_ON_FOLIO ( folio_test_anon ( src ) &&
1100
+ ! folio_test_ksm ( src ) && ! anon_vma , src );
1101
+ try_to_migrate (src , 0 );
1105
1102
page_was_mapped = true;
1106
1103
}
1107
1104
1108
- if (!page_mapped ( page ))
1109
- rc = move_to_new_folio (dst , folio , mode );
1105
+ if (!folio_mapped ( src ))
1106
+ rc = move_to_new_folio (dst , src , mode );
1110
1107
1111
1108
/*
1112
- * When successful, push newpage to LRU immediately: so that if it
1109
+ * When successful, push dst to LRU immediately: so that if it
1113
1110
* turns out to be an mlocked page, remove_migration_ptes() will
1114
- * automatically build up the correct newpage ->mlock_count for it.
1111
+ * automatically build up the correct dst ->mlock_count for it.
1115
1112
*
1116
1113
* We would like to do something similar for the old page, when
1117
1114
* unsuccessful, and other cases when a page has been temporarily
1118
1115
* isolated from the unevictable LRU: but this case is the easiest.
1119
1116
*/
1120
1117
if (rc == MIGRATEPAGE_SUCCESS ) {
1121
- lru_cache_add ( newpage );
1118
+ folio_add_lru ( dst );
1122
1119
if (page_was_mapped )
1123
1120
lru_add_drain ();
1124
1121
}
1125
1122
1126
1123
if (page_was_mapped )
1127
- remove_migration_ptes (folio ,
1128
- rc == MIGRATEPAGE_SUCCESS ? dst : folio , false);
1124
+ remove_migration_ptes (src ,
1125
+ rc == MIGRATEPAGE_SUCCESS ? dst : src , false);
1129
1126
1130
1127
out_unlock_both :
1131
- unlock_page ( newpage );
1128
+ folio_unlock ( dst );
1132
1129
out_unlock :
1133
1130
/* Drop an anon_vma reference if we took one */
1134
1131
if (anon_vma )
1135
1132
put_anon_vma (anon_vma );
1136
- unlock_page ( page );
1133
+ folio_unlock ( src );
1137
1134
out :
1138
1135
/*
1139
- * If migration is successful, decrease refcount of the newpage ,
1136
+ * If migration is successful, decrease refcount of dst ,
1140
1137
* which will not free the page because new page owner increased
1141
1138
* refcounter.
1142
1139
*/
1143
1140
if (rc == MIGRATEPAGE_SUCCESS )
1144
- put_page ( newpage );
1141
+ folio_put ( dst );
1145
1142
1146
1143
return rc ;
1147
1144
}
@@ -1157,6 +1154,7 @@ static int unmap_and_move(new_page_t get_new_page,
1157
1154
enum migrate_reason reason ,
1158
1155
struct list_head * ret )
1159
1156
{
1157
+ struct folio * dst , * src = page_folio (page );
1160
1158
int rc = MIGRATEPAGE_SUCCESS ;
1161
1159
struct page * newpage = NULL ;
1162
1160
@@ -1174,9 +1172,10 @@ static int unmap_and_move(new_page_t get_new_page,
1174
1172
newpage = get_new_page (page , private );
1175
1173
if (!newpage )
1176
1174
return - ENOMEM ;
1175
+ dst = page_folio (newpage );
1177
1176
1178
1177
newpage -> private = 0 ;
1179
- rc = __unmap_and_move (page , newpage , force , mode );
1178
+ rc = __unmap_and_move (src , dst , force , mode );
1180
1179
if (rc == MIGRATEPAGE_SUCCESS )
1181
1180
set_page_owner_migrate_reason (newpage , reason );
1182
1181
0 commit comments