28
28
29
29
#include "internal.h"
30
30
31
- struct mlock_pvec {
31
+ struct mlock_fbatch {
32
32
local_lock_t lock ;
33
- struct pagevec vec ;
33
+ struct folio_batch fbatch ;
34
34
};
35
35
36
- static DEFINE_PER_CPU (struct mlock_pvec , mlock_pvec ) = {
36
+ static DEFINE_PER_CPU (struct mlock_fbatch , mlock_fbatch ) = {
37
37
.lock = INIT_LOCAL_LOCK (lock ),
38
38
};
39
39
@@ -48,192 +48,192 @@ bool can_do_mlock(void)
48
48
EXPORT_SYMBOL (can_do_mlock );
49
49
50
50
/*
51
- * Mlocked pages are marked with PageMlocked() flag for efficient testing
51
+ * Mlocked folios are marked with the PG_mlocked flag for efficient testing
52
52
* in vmscan and, possibly, the fault path; and to support semi-accurate
53
53
* statistics.
54
54
*
55
- * An mlocked page [PageMlocked(page )] is unevictable. As such, it will
56
- * be placed on the LRU "unevictable" list, rather than the [in]active lists.
57
- * The unevictable list is an LRU sibling list to the [in]active lists.
58
- * PageUnevictable is set to indicate the unevictable state.
55
+ * An mlocked folio [folio_test_mlocked(folio )] is unevictable. As such, it
56
+ * will be ostensibly placed on the LRU "unevictable" list (actually no such
57
+ * list exists), rather than the [in]active lists. PG_unevictable is set to
58
+ * indicate the unevictable state.
59
59
*/
60
60
61
- static struct lruvec * __mlock_page (struct page * page , struct lruvec * lruvec )
61
+ static struct lruvec * __mlock_folio (struct folio * folio , struct lruvec * lruvec )
62
62
{
63
63
/* There is nothing more we can do while it's off LRU */
64
- if (!TestClearPageLRU ( page ))
64
+ if (!folio_test_clear_lru ( folio ))
65
65
return lruvec ;
66
66
67
- lruvec = folio_lruvec_relock_irq (page_folio ( page ) , lruvec );
67
+ lruvec = folio_lruvec_relock_irq (folio , lruvec );
68
68
69
- if (unlikely (page_evictable ( page ))) {
69
+ if (unlikely (folio_evictable ( folio ))) {
70
70
/*
71
- * This is a little surprising, but quite possible:
72
- * PageMlocked must have got cleared already by another CPU.
73
- * Could this page be on the Unevictable LRU? I'm not sure,
74
- * but move it now if so.
71
+ * This is a little surprising, but quite possible: PG_mlocked
72
+ * must have got cleared already by another CPU. Could this
73
+ * folio be unevictable? I'm not sure, but move it now if so.
75
74
*/
76
- if (PageUnevictable (page )) {
77
- del_page_from_lru_list (page , lruvec );
78
- ClearPageUnevictable (page );
79
- add_page_to_lru_list (page , lruvec );
75
+ if (folio_test_unevictable (folio )) {
76
+ lruvec_del_folio (lruvec , folio );
77
+ folio_clear_unevictable (folio );
78
+ lruvec_add_folio (lruvec , folio );
79
+
80
80
__count_vm_events (UNEVICTABLE_PGRESCUED ,
81
- thp_nr_pages ( page ));
81
+ folio_nr_pages ( folio ));
82
82
}
83
83
goto out ;
84
84
}
85
85
86
- if (PageUnevictable ( page )) {
87
- if (PageMlocked ( page ))
88
- page -> mlock_count ++ ;
86
+ if (folio_test_unevictable ( folio )) {
87
+ if (folio_test_mlocked ( folio ))
88
+ folio -> mlock_count ++ ;
89
89
goto out ;
90
90
}
91
91
92
- del_page_from_lru_list ( page , lruvec );
93
- ClearPageActive ( page );
94
- SetPageUnevictable ( page );
95
- page -> mlock_count = !!PageMlocked ( page );
96
- add_page_to_lru_list ( page , lruvec );
97
- __count_vm_events (UNEVICTABLE_PGCULLED , thp_nr_pages ( page ));
92
+ lruvec_del_folio ( lruvec , folio );
93
+ folio_clear_active ( folio );
94
+ folio_set_unevictable ( folio );
95
+ folio -> mlock_count = !!folio_test_mlocked ( folio );
96
+ lruvec_add_folio ( lruvec , folio );
97
+ __count_vm_events (UNEVICTABLE_PGCULLED , folio_nr_pages ( folio ));
98
98
out :
99
- SetPageLRU ( page );
99
+ folio_set_lru ( folio );
100
100
return lruvec ;
101
101
}
102
102
103
- static struct lruvec * __mlock_new_page (struct page * page , struct lruvec * lruvec )
103
+ static struct lruvec * __mlock_new_folio (struct folio * folio , struct lruvec * lruvec )
104
104
{
105
- VM_BUG_ON_PAGE ( PageLRU ( page ), page );
105
+ VM_BUG_ON_FOLIO ( folio_test_lru ( folio ), folio );
106
106
107
- lruvec = folio_lruvec_relock_irq (page_folio ( page ) , lruvec );
107
+ lruvec = folio_lruvec_relock_irq (folio , lruvec );
108
108
109
109
/* As above, this is a little surprising, but possible */
110
- if (unlikely (page_evictable ( page )))
110
+ if (unlikely (folio_evictable ( folio )))
111
111
goto out ;
112
112
113
- SetPageUnevictable ( page );
114
- page -> mlock_count = !!PageMlocked ( page );
115
- __count_vm_events (UNEVICTABLE_PGCULLED , thp_nr_pages ( page ));
113
+ folio_set_unevictable ( folio );
114
+ folio -> mlock_count = !!folio_test_mlocked ( folio );
115
+ __count_vm_events (UNEVICTABLE_PGCULLED , folio_nr_pages ( folio ));
116
116
out :
117
- add_page_to_lru_list ( page , lruvec );
118
- SetPageLRU ( page );
117
+ lruvec_add_folio ( lruvec , folio );
118
+ folio_set_lru ( folio );
119
119
return lruvec ;
120
120
}
121
121
122
- static struct lruvec * __munlock_page (struct page * page , struct lruvec * lruvec )
122
+ static struct lruvec * __munlock_folio (struct folio * folio , struct lruvec * lruvec )
123
123
{
124
- int nr_pages = thp_nr_pages ( page );
124
+ int nr_pages = folio_nr_pages ( folio );
125
125
bool isolated = false;
126
126
127
- if (!TestClearPageLRU ( page ))
127
+ if (!folio_test_clear_lru ( folio ))
128
128
goto munlock ;
129
129
130
130
isolated = true;
131
- lruvec = folio_lruvec_relock_irq (page_folio ( page ) , lruvec );
131
+ lruvec = folio_lruvec_relock_irq (folio , lruvec );
132
132
133
- if (PageUnevictable ( page )) {
133
+ if (folio_test_unevictable ( folio )) {
134
134
/* Then mlock_count is maintained, but might undercount */
135
- if (page -> mlock_count )
136
- page -> mlock_count -- ;
137
- if (page -> mlock_count )
135
+ if (folio -> mlock_count )
136
+ folio -> mlock_count -- ;
137
+ if (folio -> mlock_count )
138
138
goto out ;
139
139
}
140
140
/* else assume that was the last mlock: reclaim will fix it if not */
141
141
142
142
munlock :
143
- if (TestClearPageMlocked ( page )) {
144
- __mod_zone_page_state ( page_zone ( page ) , NR_MLOCK , - nr_pages );
145
- if (isolated || !PageUnevictable ( page ))
143
+ if (folio_test_clear_mlocked ( folio )) {
144
+ __zone_stat_mod_folio ( folio , NR_MLOCK , - nr_pages );
145
+ if (isolated || !folio_test_unevictable ( folio ))
146
146
__count_vm_events (UNEVICTABLE_PGMUNLOCKED , nr_pages );
147
147
else
148
148
__count_vm_events (UNEVICTABLE_PGSTRANDED , nr_pages );
149
149
}
150
150
151
- /* page_evictable () has to be checked *after* clearing Mlocked */
152
- if (isolated && PageUnevictable ( page ) && page_evictable ( page )) {
153
- del_page_from_lru_list ( page , lruvec );
154
- ClearPageUnevictable ( page );
155
- add_page_to_lru_list ( page , lruvec );
151
+ /* folio_evictable () has to be checked *after* clearing Mlocked */
152
+ if (isolated && folio_test_unevictable ( folio ) && folio_evictable ( folio )) {
153
+ lruvec_del_folio ( lruvec , folio );
154
+ folio_clear_unevictable ( folio );
155
+ lruvec_add_folio ( lruvec , folio );
156
156
__count_vm_events (UNEVICTABLE_PGRESCUED , nr_pages );
157
157
}
158
158
out :
159
159
if (isolated )
160
- SetPageLRU ( page );
160
+ folio_set_lru ( folio );
161
161
return lruvec ;
162
162
}
163
163
164
164
/*
165
- * Flags held in the low bits of a struct page pointer on the mlock_pvec .
165
+ * Flags held in the low bits of a struct folio pointer on the mlock_fbatch .
166
166
*/
167
- #define LRU_PAGE 0x1
168
- #define NEW_PAGE 0x2
169
- static inline struct page * mlock_lru (struct page * page )
167
+ #define LRU_FOLIO 0x1
168
+ #define NEW_FOLIO 0x2
169
+ static inline struct folio * mlock_lru (struct folio * folio )
170
170
{
171
- return (struct page * )((unsigned long )page + LRU_PAGE );
171
+ return (struct folio * )((unsigned long )folio + LRU_FOLIO );
172
172
}
173
173
174
- static inline struct page * mlock_new (struct page * page )
174
+ static inline struct folio * mlock_new (struct folio * folio )
175
175
{
176
- return (struct page * )((unsigned long )page + NEW_PAGE );
176
+ return (struct folio * )((unsigned long )folio + NEW_FOLIO );
177
177
}
178
178
179
179
/*
180
- * mlock_pagevec () is derived from pagevec_lru_move_fn ():
181
- * perhaps that can make use of such page pointer flags in future,
182
- * but for now just keep it for mlock. We could use three separate
183
- * pagevecs instead, but one feels better (munlocking a full pagevec
184
- * does not need to drain mlocking pagevecs first).
180
+ * mlock_folio_batch () is derived from folio_batch_move_lru (): perhaps that can
181
+ * make use of such folio pointer flags in future, but for now just keep it for
182
+ * mlock. We could use three separate folio batches instead, but one feels
183
+ * better (munlocking a full folio batch does not need to drain mlocking folio
184
+ * batches first).
185
185
*/
186
- static void mlock_pagevec (struct pagevec * pvec )
186
+ static void mlock_folio_batch (struct folio_batch * fbatch )
187
187
{
188
188
struct lruvec * lruvec = NULL ;
189
189
unsigned long mlock ;
190
- struct page * page ;
190
+ struct folio * folio ;
191
191
int i ;
192
192
193
- for (i = 0 ; i < pagevec_count ( pvec ); i ++ ) {
194
- page = pvec -> pages [i ];
195
- mlock = (unsigned long )page & (LRU_PAGE | NEW_PAGE );
196
- page = (struct page * )((unsigned long )page - mlock );
197
- pvec -> pages [i ] = page ;
193
+ for (i = 0 ; i < folio_batch_count ( fbatch ); i ++ ) {
194
+ folio = fbatch -> folios [i ];
195
+ mlock = (unsigned long )folio & (LRU_FOLIO | NEW_FOLIO );
196
+ folio = (struct folio * )((unsigned long )folio - mlock );
197
+ fbatch -> folios [i ] = folio ;
198
198
199
- if (mlock & LRU_PAGE )
200
- lruvec = __mlock_page ( page , lruvec );
201
- else if (mlock & NEW_PAGE )
202
- lruvec = __mlock_new_page ( page , lruvec );
199
+ if (mlock & LRU_FOLIO )
200
+ lruvec = __mlock_folio ( folio , lruvec );
201
+ else if (mlock & NEW_FOLIO )
202
+ lruvec = __mlock_new_folio ( folio , lruvec );
203
203
else
204
- lruvec = __munlock_page ( page , lruvec );
204
+ lruvec = __munlock_folio ( folio , lruvec );
205
205
}
206
206
207
207
if (lruvec )
208
208
unlock_page_lruvec_irq (lruvec );
209
- release_pages (pvec -> pages , pvec -> nr );
210
- pagevec_reinit ( pvec );
209
+ release_pages (fbatch -> folios , fbatch -> nr );
210
+ folio_batch_reinit ( fbatch );
211
211
}
212
212
213
213
void mlock_page_drain_local (void )
214
214
{
215
- struct pagevec * pvec ;
215
+ struct folio_batch * fbatch ;
216
216
217
- local_lock (& mlock_pvec .lock );
218
- pvec = this_cpu_ptr (& mlock_pvec . vec );
219
- if (pagevec_count ( pvec ))
220
- mlock_pagevec ( pvec );
221
- local_unlock (& mlock_pvec .lock );
217
+ local_lock (& mlock_fbatch .lock );
218
+ fbatch = this_cpu_ptr (& mlock_fbatch . fbatch );
219
+ if (folio_batch_count ( fbatch ))
220
+ mlock_folio_batch ( fbatch );
221
+ local_unlock (& mlock_fbatch .lock );
222
222
}
223
223
224
224
void mlock_page_drain_remote (int cpu )
225
225
{
226
- struct pagevec * pvec ;
226
+ struct folio_batch * fbatch ;
227
227
228
228
WARN_ON_ONCE (cpu_online (cpu ));
229
- pvec = & per_cpu (mlock_pvec . vec , cpu );
230
- if (pagevec_count ( pvec ))
231
- mlock_pagevec ( pvec );
229
+ fbatch = & per_cpu (mlock_fbatch . fbatch , cpu );
230
+ if (folio_batch_count ( fbatch ))
231
+ mlock_folio_batch ( fbatch );
232
232
}
233
233
234
234
bool need_mlock_page_drain (int cpu )
235
235
{
236
- return pagevec_count (& per_cpu (mlock_pvec . vec , cpu ));
236
+ return folio_batch_count (& per_cpu (mlock_fbatch . fbatch , cpu ));
237
237
}
238
238
239
239
/**
@@ -242,10 +242,10 @@ bool need_mlock_page_drain(int cpu)
242
242
*/
243
243
void mlock_folio (struct folio * folio )
244
244
{
245
- struct pagevec * pvec ;
245
+ struct folio_batch * fbatch ;
246
246
247
- local_lock (& mlock_pvec .lock );
248
- pvec = this_cpu_ptr (& mlock_pvec . vec );
247
+ local_lock (& mlock_fbatch .lock );
248
+ fbatch = this_cpu_ptr (& mlock_fbatch . fbatch );
249
249
250
250
if (!folio_test_set_mlocked (folio )) {
251
251
int nr_pages = folio_nr_pages (folio );
@@ -255,10 +255,10 @@ void mlock_folio(struct folio *folio)
255
255
}
256
256
257
257
folio_get (folio );
258
- if (!pagevec_add ( pvec , mlock_lru (& folio -> page )) ||
258
+ if (!folio_batch_add ( fbatch , mlock_lru (folio )) ||
259
259
folio_test_large (folio ) || lru_cache_disabled ())
260
- mlock_pagevec ( pvec );
261
- local_unlock (& mlock_pvec .lock );
260
+ mlock_folio_batch ( fbatch );
261
+ local_unlock (& mlock_fbatch .lock );
262
262
}
263
263
264
264
/**
@@ -267,20 +267,22 @@ void mlock_folio(struct folio *folio)
267
267
*/
268
268
void mlock_new_page (struct page * page )
269
269
{
270
- struct pagevec * pvec ;
271
- int nr_pages = thp_nr_pages (page );
270
+ struct folio_batch * fbatch ;
271
+ struct folio * folio = page_folio (page );
272
+ int nr_pages = folio_nr_pages (folio );
272
273
273
- local_lock (& mlock_pvec .lock );
274
- pvec = this_cpu_ptr (& mlock_pvec .vec );
275
- SetPageMlocked (page );
276
- mod_zone_page_state (page_zone (page ), NR_MLOCK , nr_pages );
274
+ local_lock (& mlock_fbatch .lock );
275
+ fbatch = this_cpu_ptr (& mlock_fbatch .fbatch );
276
+ folio_set_mlocked (folio );
277
+
278
+ zone_stat_mod_folio (folio , NR_MLOCK , nr_pages );
277
279
__count_vm_events (UNEVICTABLE_PGMLOCKED , nr_pages );
278
280
279
- get_page ( page );
280
- if (!pagevec_add ( pvec , mlock_new (page )) ||
281
- PageHead ( page ) || lru_cache_disabled ())
282
- mlock_pagevec ( pvec );
283
- local_unlock (& mlock_pvec .lock );
281
+ folio_get ( folio );
282
+ if (!folio_batch_add ( fbatch , mlock_new (folio )) ||
283
+ folio_test_large ( folio ) || lru_cache_disabled ())
284
+ mlock_folio_batch ( fbatch );
285
+ local_unlock (& mlock_fbatch .lock );
284
286
}
285
287
286
288
/**
@@ -289,20 +291,20 @@ void mlock_new_page(struct page *page)
289
291
*/
290
292
void munlock_page (struct page * page )
291
293
{
292
- struct pagevec * pvec ;
294
+ struct folio_batch * fbatch ;
295
+ struct folio * folio = page_folio (page );
293
296
294
- local_lock (& mlock_pvec .lock );
295
- pvec = this_cpu_ptr (& mlock_pvec . vec );
297
+ local_lock (& mlock_fbatch .lock );
298
+ fbatch = this_cpu_ptr (& mlock_fbatch . fbatch );
296
299
/*
297
- * TestClearPageMlocked(page ) must be left to __munlock_page (),
298
- * which will check whether the page is multiply mlocked.
300
+ * folio_test_clear_mlocked(folio ) must be left to __munlock_folio (),
301
+ * which will check whether the folio is multiply mlocked.
299
302
*/
300
-
301
- get_page (page );
302
- if (!pagevec_add (pvec , page ) ||
303
- PageHead (page ) || lru_cache_disabled ())
304
- mlock_pagevec (pvec );
305
- local_unlock (& mlock_pvec .lock );
303
+ folio_get (folio );
304
+ if (!folio_batch_add (fbatch , folio ) ||
305
+ folio_test_large (folio ) || lru_cache_disabled ())
306
+ mlock_folio_batch (fbatch );
307
+ local_unlock (& mlock_fbatch .lock );
306
308
}
307
309
308
310
static int mlock_pte_range (pmd_t * pmd , unsigned long addr ,
0 commit comments