@@ -110,12 +110,14 @@ void show_swap_cache_info(void)
110
110
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
111
111
* but sets SwapCache flag and private instead of mapping and index.
112
112
*/
113
- int add_to_swap_cache (struct page * page , swp_entry_t entry , gfp_t gfp )
113
+ int add_to_swap_cache (struct page * page , swp_entry_t entry ,
114
+ gfp_t gfp , void * * shadowp )
114
115
{
115
116
struct address_space * address_space = swap_address_space (entry );
116
117
pgoff_t idx = swp_offset (entry );
117
118
XA_STATE_ORDER (xas , & address_space -> i_pages , idx , compound_order (page ));
118
119
unsigned long i , nr = hpage_nr_pages (page );
120
+ void * old ;
119
121
120
122
VM_BUG_ON_PAGE (!PageLocked (page ), page );
121
123
VM_BUG_ON_PAGE (PageSwapCache (page ), page );
@@ -125,16 +127,25 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
125
127
SetPageSwapCache (page );
126
128
127
129
do {
130
+ unsigned long nr_shadows = 0 ;
131
+
128
132
xas_lock_irq (& xas );
129
133
xas_create_range (& xas );
130
134
if (xas_error (& xas ))
131
135
goto unlock ;
132
136
for (i = 0 ; i < nr ; i ++ ) {
133
137
VM_BUG_ON_PAGE (xas .xa_index != idx + i , page );
138
+ old = xas_load (& xas );
139
+ if (xa_is_value (old )) {
140
+ nr_shadows ++ ;
141
+ if (shadowp )
142
+ * shadowp = old ;
143
+ }
134
144
set_page_private (page + i , entry .val + i );
135
145
xas_store (& xas , page );
136
146
xas_next (& xas );
137
147
}
148
+ address_space -> nrexceptional -= nr_shadows ;
138
149
address_space -> nrpages += nr ;
139
150
__mod_node_page_state (page_pgdat (page ), NR_FILE_PAGES , nr );
140
151
ADD_CACHE_INFO (add_total , nr );
@@ -154,7 +165,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
154
165
* This must be called only on pages that have
155
166
* been verified to be in the swap cache.
156
167
*/
157
- void __delete_from_swap_cache (struct page * page , swp_entry_t entry )
168
+ void __delete_from_swap_cache (struct page * page ,
169
+ swp_entry_t entry , void * shadow )
158
170
{
159
171
struct address_space * address_space = swap_address_space (entry );
160
172
int i , nr = hpage_nr_pages (page );
@@ -166,12 +178,14 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
166
178
VM_BUG_ON_PAGE (PageWriteback (page ), page );
167
179
168
180
for (i = 0 ; i < nr ; i ++ ) {
169
- void * entry = xas_store (& xas , NULL );
181
+ void * entry = xas_store (& xas , shadow );
170
182
VM_BUG_ON_PAGE (entry != page , entry );
171
183
set_page_private (page + i , 0 );
172
184
xas_next (& xas );
173
185
}
174
186
ClearPageSwapCache (page );
187
+ if (shadow )
188
+ address_space -> nrexceptional += nr ;
175
189
address_space -> nrpages -= nr ;
176
190
__mod_node_page_state (page_pgdat (page ), NR_FILE_PAGES , - nr );
177
191
ADD_CACHE_INFO (del_total , nr );
@@ -208,7 +222,7 @@ int add_to_swap(struct page *page)
208
222
* Add it to the swap cache.
209
223
*/
210
224
err = add_to_swap_cache (page , entry ,
211
- __GFP_HIGH |__GFP_NOMEMALLOC |__GFP_NOWARN );
225
+ __GFP_HIGH |__GFP_NOMEMALLOC |__GFP_NOWARN , NULL );
212
226
if (err )
213
227
/*
214
228
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -246,13 +260,44 @@ void delete_from_swap_cache(struct page *page)
246
260
struct address_space * address_space = swap_address_space (entry );
247
261
248
262
xa_lock_irq (& address_space -> i_pages );
249
- __delete_from_swap_cache (page , entry );
263
+ __delete_from_swap_cache (page , entry , NULL );
250
264
xa_unlock_irq (& address_space -> i_pages );
251
265
252
266
put_swap_page (page , entry );
253
267
page_ref_sub (page , hpage_nr_pages (page ));
254
268
}
255
269
270
+ void clear_shadow_from_swap_cache (int type , unsigned long begin ,
271
+ unsigned long end )
272
+ {
273
+ unsigned long curr = begin ;
274
+ void * old ;
275
+
276
+ for (;;) {
277
+ unsigned long nr_shadows = 0 ;
278
+ swp_entry_t entry = swp_entry (type , curr );
279
+ struct address_space * address_space = swap_address_space (entry );
280
+ XA_STATE (xas , & address_space -> i_pages , curr );
281
+
282
+ xa_lock_irq (& address_space -> i_pages );
283
+ xas_for_each (& xas , old , end ) {
284
+ if (!xa_is_value (old ))
285
+ continue ;
286
+ xas_store (& xas , NULL );
287
+ nr_shadows ++ ;
288
+ }
289
+ address_space -> nrexceptional -= nr_shadows ;
290
+ xa_unlock_irq (& address_space -> i_pages );
291
+
292
+ /* search the next swapcache until we meet end */
293
+ curr >>= SWAP_ADDRESS_SPACE_SHIFT ;
294
+ curr ++ ;
295
+ curr <<= SWAP_ADDRESS_SPACE_SHIFT ;
296
+ if (curr > end )
297
+ break ;
298
+ }
299
+ }
300
+
256
301
/*
257
302
* If we are the only user, then try to free up the swap cache.
258
303
*
@@ -429,7 +474,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
429
474
__SetPageSwapBacked (page );
430
475
431
476
/* May fail (-ENOMEM) if XArray node allocation failed. */
432
- if (add_to_swap_cache (page , entry , gfp_mask & GFP_RECLAIM_MASK )) {
477
+ if (add_to_swap_cache (page , entry , gfp_mask & GFP_RECLAIM_MASK , NULL )) {
433
478
put_swap_page (page , entry );
434
479
goto fail_unlock ;
435
480
}
0 commit comments