@@ -177,21 +177,21 @@ void do_invalidatepage(struct page *page, unsigned int offset,
177
177
* its lock, b) when a concurrent invalidate_mapping_pages got there first and
178
178
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
179
179
*/
180
- static void truncate_cleanup_page (struct page * page )
180
+ static void truncate_cleanup_folio (struct folio * folio )
181
181
{
182
- if (page_mapped ( page ))
183
- unmap_mapping_page (page );
182
+ if (folio_mapped ( folio ))
183
+ unmap_mapping_page (& folio -> page );
184
184
185
- if (page_has_private ( page ))
186
- do_invalidatepage (page , 0 , thp_size ( page ));
185
+ if (folio_has_private ( folio ))
186
+ do_invalidatepage (& folio -> page , 0 , folio_size ( folio ));
187
187
188
188
/*
189
189
* Some filesystems seem to re-dirty the page even after
190
190
* the VM has canceled the dirty bit (eg ext3 journaling).
191
191
* Hence dirty accounting check is placed after invalidation.
192
192
*/
193
- cancel_dirty_page ( page );
194
- ClearPageMappedToDisk ( page );
193
+ folio_cancel_dirty ( folio );
194
+ folio_clear_mappedtodisk ( folio );
195
195
}
196
196
197
197
/*
@@ -220,13 +220,14 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
220
220
221
221
int truncate_inode_page (struct address_space * mapping , struct page * page )
222
222
{
223
+ struct folio * folio = page_folio (page );
223
224
VM_BUG_ON_PAGE (PageTail (page ), page );
224
225
225
226
if (page -> mapping != mapping )
226
227
return - EIO ;
227
228
228
- truncate_cleanup_page ( page );
229
- delete_from_page_cache ( page );
229
+ truncate_cleanup_folio ( folio );
230
+ filemap_remove_folio ( folio );
230
231
return 0 ;
231
232
}
232
233
@@ -332,7 +333,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
332
333
index = indices [pagevec_count (& pvec ) - 1 ] + 1 ;
333
334
truncate_exceptional_pvec_entries (mapping , & pvec , indices );
334
335
for (i = 0 ; i < pagevec_count (& pvec ); i ++ )
335
- truncate_cleanup_page ( pvec .pages [i ]);
336
+ truncate_cleanup_folio ( page_folio ( pvec .pages [i ]) );
336
337
delete_from_page_cache_batch (mapping , & pvec );
337
338
for (i = 0 ; i < pagevec_count (& pvec ); i ++ )
338
339
unlock_page (pvec .pages [i ]);
0 commit comments