@@ -1175,14 +1175,14 @@ static void __page_set_anon_rmap(struct folio *folio, struct page *page,
1175
1175
1176
1176
/**
1177
1177
* __page_check_anon_rmap - sanity check anonymous rmap addition
1178
- * @page: the page to add the mapping to
1178
+ * @folio: The folio containing @page.
1179
+ * @page: the page to check the mapping of
1179
1180
* @vma: the vm area in which the mapping is added
1180
1181
* @address: the user virtual address mapped
1181
1182
*/
1182
- static void __page_check_anon_rmap (struct page * page ,
1183
+ static void __page_check_anon_rmap (struct folio * folio , struct page * page ,
1183
1184
struct vm_area_struct * vma , unsigned long address )
1184
1185
{
1185
- struct folio * folio = page_folio (page );
1186
1186
/*
1187
1187
* The page's anon-rmap details (mapping and index) are guaranteed to
1188
1188
* be set up correctly at this point.
@@ -1262,7 +1262,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
1262
1262
__page_set_anon_rmap (folio , page , vma , address ,
1263
1263
!!(flags & RMAP_EXCLUSIVE ));
1264
1264
else
1265
- __page_check_anon_rmap (page , vma , address );
1265
+ __page_check_anon_rmap (folio , page , vma , address );
1266
1266
}
1267
1267
1268
1268
mlock_vma_folio (folio , vma , compound );
0 commit comments