@@ -822,6 +822,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
822
822
struct lruvec * lruvec ;
823
823
unsigned long flags = 0 ;
824
824
struct lruvec * locked = NULL ;
825
+ struct folio * folio = NULL ;
825
826
struct page * page = NULL , * valid_page = NULL ;
826
827
struct address_space * mapping ;
827
828
unsigned long start_pfn = low_pfn ;
@@ -918,7 +919,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
918
919
if (!valid_page && pageblock_aligned (low_pfn )) {
919
920
if (!isolation_suitable (cc , page )) {
920
921
low_pfn = end_pfn ;
921
- page = NULL ;
922
+ folio = NULL ;
922
923
goto isolate_abort ;
923
924
}
924
925
valid_page = page ;
@@ -950,7 +951,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
950
951
* Hugepage was successfully isolated and placed
951
952
* on the cc->migratepages list.
952
953
*/
953
- low_pfn += compound_nr (page ) - 1 ;
954
+ folio = page_folio (page );
955
+ low_pfn += folio_nr_pages (folio ) - 1 ;
954
956
goto isolate_success_no_list ;
955
957
}
956
958
@@ -1018,8 +1020,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
1018
1020
locked = NULL ;
1019
1021
}
1020
1022
1021
- if (isolate_movable_page (page , mode ))
1023
+ if (isolate_movable_page (page , mode )) {
1024
+ folio = page_folio (page );
1022
1025
goto isolate_success ;
1026
+ }
1023
1027
}
1024
1028
1025
1029
goto isolate_fail ;
@@ -1030,16 +1034,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
1030
1034
* sure the page is not being freed elsewhere -- the
1031
1035
* page release code relies on it.
1032
1036
*/
1033
- if (unlikely (!get_page_unless_zero (page )))
1037
+ folio = folio_get_nontail_page (page );
1038
+ if (unlikely (!folio ))
1034
1039
goto isolate_fail ;
1035
1040
1036
1041
/*
1037
1042
* Migration will fail if an anonymous page is pinned in memory,
1038
1043
* so avoid taking lru_lock and isolating it unnecessarily in an
1039
1044
* admittedly racy check.
1040
1045
*/
1041
- mapping = page_mapping ( page );
1042
- if (!mapping && (page_count ( page ) - 1 ) > total_mapcount ( page ))
1046
+ mapping = folio_mapping ( folio );
1047
+ if (!mapping && (folio_ref_count ( folio ) - 1 ) > folio_mapcount ( folio ))
1043
1048
goto isolate_fail_put ;
1044
1049
1045
1050
/*
@@ -1050,11 +1055,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
1050
1055
goto isolate_fail_put ;
1051
1056
1052
1057
/* Only take pages on LRU: a check now makes later tests safe */
1053
- if (!PageLRU ( page ))
1058
+ if (!folio_test_lru ( folio ))
1054
1059
goto isolate_fail_put ;
1055
1060
1056
1061
/* Compaction might skip unevictable pages but CMA takes them */
1057
- if (!(mode & ISOLATE_UNEVICTABLE ) && PageUnevictable ( page ))
1062
+ if (!(mode & ISOLATE_UNEVICTABLE ) && folio_test_unevictable ( folio ))
1058
1063
goto isolate_fail_put ;
1059
1064
1060
1065
/*
@@ -1063,10 +1068,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
1063
1068
* it will be able to migrate without blocking - clean pages
1064
1069
* for the most part. PageWriteback would require blocking.
1065
1070
*/
1066
- if ((mode & ISOLATE_ASYNC_MIGRATE ) && PageWriteback ( page ))
1071
+ if ((mode & ISOLATE_ASYNC_MIGRATE ) && folio_test_writeback ( folio ))
1067
1072
goto isolate_fail_put ;
1068
1073
1069
- if ((mode & ISOLATE_ASYNC_MIGRATE ) && PageDirty ( page )) {
1074
+ if ((mode & ISOLATE_ASYNC_MIGRATE ) && folio_test_dirty ( folio )) {
1070
1075
bool migrate_dirty ;
1071
1076
1072
1077
/*
@@ -1078,22 +1083,22 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
1078
1083
* the page lock until after the page is removed
1079
1084
* from the page cache.
1080
1085
*/
1081
- if (!trylock_page ( page ))
1086
+ if (!folio_trylock ( folio ))
1082
1087
goto isolate_fail_put ;
1083
1088
1084
- mapping = page_mapping ( page );
1089
+ mapping = folio_mapping ( folio );
1085
1090
migrate_dirty = !mapping ||
1086
1091
mapping -> a_ops -> migrate_folio ;
1087
- unlock_page ( page );
1092
+ folio_unlock ( folio );
1088
1093
if (!migrate_dirty )
1089
1094
goto isolate_fail_put ;
1090
1095
}
1091
1096
1092
- /* Try isolate the page */
1093
- if (!TestClearPageLRU ( page ))
1097
+ /* Try isolate the folio */
1098
+ if (!folio_test_clear_lru ( folio ))
1094
1099
goto isolate_fail_put ;
1095
1100
1096
- lruvec = folio_lruvec (page_folio ( page ) );
1101
+ lruvec = folio_lruvec (folio );
1097
1102
1098
1103
/* If we already hold the lock, we can skip some rechecking */
1099
1104
if (lruvec != locked ) {
@@ -1103,7 +1108,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
1103
1108
compact_lock_irqsave (& lruvec -> lru_lock , & flags , cc );
1104
1109
locked = lruvec ;
1105
1110
1106
- lruvec_memcg_debug (lruvec , page_folio ( page ) );
1111
+ lruvec_memcg_debug (lruvec , folio );
1107
1112
1108
1113
/*
1109
1114
* Try get exclusive access under lock. If marked for
@@ -1119,34 +1124,33 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
1119
1124
}
1120
1125
1121
1126
/*
1122
- * Page become compound since the non-locked check,
1123
- * and it's on LRU. It can only be a THP so the order
1124
- * is safe to read and it's 0 for tail pages.
1127
+ * folio become large since the non-locked check,
1128
+ * and it's on LRU.
1125
1129
*/
1126
- if (unlikely (PageCompound ( page ) && !cc -> alloc_contig )) {
1127
- low_pfn += compound_nr ( page ) - 1 ;
1128
- nr_scanned += compound_nr ( page ) - 1 ;
1129
- SetPageLRU ( page );
1130
+ if (unlikely (folio_test_large ( folio ) && !cc -> alloc_contig )) {
1131
+ low_pfn += folio_nr_pages ( folio ) - 1 ;
1132
+ nr_scanned += folio_nr_pages ( folio ) - 1 ;
1133
+ folio_set_lru ( folio );
1130
1134
goto isolate_fail_put ;
1131
1135
}
1132
1136
}
1133
1137
1134
- /* The whole page is taken off the LRU; skip the tail pages. */
1135
- if (PageCompound ( page ))
1136
- low_pfn += compound_nr ( page ) - 1 ;
1138
+ /* The folio is taken off the LRU */
1139
+ if (folio_test_large ( folio ))
1140
+ low_pfn += folio_nr_pages ( folio ) - 1 ;
1137
1141
1138
1142
/* Successfully isolated */
1139
- del_page_from_lru_list ( page , lruvec );
1140
- mod_node_page_state ( page_pgdat ( page ) ,
1141
- NR_ISOLATED_ANON + page_is_file_lru ( page ),
1142
- thp_nr_pages ( page ));
1143
+ lruvec_del_folio ( lruvec , folio );
1144
+ node_stat_mod_folio ( folio ,
1145
+ NR_ISOLATED_ANON + folio_is_file_lru ( folio ),
1146
+ folio_nr_pages ( folio ));
1143
1147
1144
1148
isolate_success :
1145
- list_add (& page -> lru , & cc -> migratepages );
1149
+ list_add (& folio -> lru , & cc -> migratepages );
1146
1150
isolate_success_no_list :
1147
- cc -> nr_migratepages += compound_nr ( page );
1148
- nr_isolated += compound_nr ( page );
1149
- nr_scanned += compound_nr ( page ) - 1 ;
1151
+ cc -> nr_migratepages += folio_nr_pages ( folio );
1152
+ nr_isolated += folio_nr_pages ( folio );
1153
+ nr_scanned += folio_nr_pages ( folio ) - 1 ;
1150
1154
1151
1155
/*
1152
1156
* Avoid isolating too much unless this block is being
@@ -1168,7 +1172,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
1168
1172
unlock_page_lruvec_irqrestore (locked , flags );
1169
1173
locked = NULL ;
1170
1174
}
1171
- put_page ( page );
1175
+ folio_put ( folio );
1172
1176
1173
1177
isolate_fail :
1174
1178
if (!skip_on_failure && ret != - ENOMEM )
@@ -1209,14 +1213,14 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
1209
1213
if (unlikely (low_pfn > end_pfn ))
1210
1214
low_pfn = end_pfn ;
1211
1215
1212
- page = NULL ;
1216
+ folio = NULL ;
1213
1217
1214
1218
isolate_abort :
1215
1219
if (locked )
1216
1220
unlock_page_lruvec_irqrestore (locked , flags );
1217
- if (page ) {
1218
- SetPageLRU ( page );
1219
- put_page ( page );
1221
+ if (folio ) {
1222
+ folio_set_lru ( folio );
1223
+ folio_put ( folio );
1220
1224
}
1221
1225
1222
1226
/*
0 commit comments