@@ -393,28 +393,23 @@ static int folio_expected_refs(struct address_space *mapping,
393
393
}
394
394
395
395
/*
396
- * Replace the page in the mapping.
396
+ * Replace the folio in the mapping.
397
397
*
398
398
* The number of remaining references must be:
399
- * 1 for anonymous pages without a mapping
400
- * 2 for pages with a mapping
401
- * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
399
+ * 1 for anonymous folios without a mapping
400
+ * 2 for folios with a mapping
401
+ * 3 for folios with a mapping and PagePrivate/PagePrivate2 set.
402
402
*/
403
- int folio_migrate_mapping (struct address_space * mapping ,
404
- struct folio * newfolio , struct folio * folio , int extra_count )
403
+ static int __folio_migrate_mapping (struct address_space * mapping ,
404
+ struct folio * newfolio , struct folio * folio , int expected_count )
405
405
{
406
406
XA_STATE (xas , & mapping -> i_pages , folio_index (folio ));
407
407
struct zone * oldzone , * newzone ;
408
408
int dirty ;
409
- int expected_count = folio_expected_refs (mapping , folio ) + extra_count ;
410
409
long nr = folio_nr_pages (folio );
411
410
long entries , i ;
412
411
413
412
if (!mapping ) {
414
- /* Anonymous page without mapping */
415
- if (folio_ref_count (folio ) != expected_count )
416
- return - EAGAIN ;
417
-
418
413
/* Take off deferred split queue while frozen and memcg set */
419
414
if (folio_test_large (folio ) &&
420
415
folio_test_large_rmappable (folio )) {
@@ -465,7 +460,7 @@ int folio_migrate_mapping(struct address_space *mapping,
465
460
entries = 1 ;
466
461
}
467
462
468
- /* Move dirty while page refs frozen and newpage not yet exposed */
463
+ /* Move dirty while folio refs frozen and newfolio not yet exposed */
469
464
dirty = folio_test_dirty (folio );
470
465
if (dirty ) {
471
466
folio_clear_dirty (folio );
@@ -479,7 +474,7 @@ int folio_migrate_mapping(struct address_space *mapping,
479
474
}
480
475
481
476
/*
482
- * Drop cache reference from old page by unfreezing
477
+ * Drop cache reference from old folio by unfreezing
483
478
* to one less reference.
484
479
* We know this isn't the last reference.
485
480
*/
@@ -490,11 +485,11 @@ int folio_migrate_mapping(struct address_space *mapping,
490
485
491
486
/*
492
487
* If moved to a different zone then also account
493
- * the page for that zone. Other VM counters will be
488
+ * the folio for that zone. Other VM counters will be
494
489
* taken care of when we establish references to the
495
- * new page and drop references to the old page .
490
+ * new folio and drop references to the old folio .
496
491
*
497
- * Note that anonymous pages are accounted for
492
+ * Note that anonymous folios are accounted for
498
493
* via NR_FILE_PAGES and NR_ANON_MAPPED if they
499
494
* are mapped to swap space.
500
495
*/
@@ -534,6 +529,17 @@ int folio_migrate_mapping(struct address_space *mapping,
534
529
535
530
return MIGRATEPAGE_SUCCESS ;
536
531
}
532
+
533
+ int folio_migrate_mapping (struct address_space * mapping ,
534
+ struct folio * newfolio , struct folio * folio , int extra_count )
535
+ {
536
+ int expected_count = folio_expected_refs (mapping , folio ) + extra_count ;
537
+
538
+ if (folio_ref_count (folio ) != expected_count )
539
+ return - EAGAIN ;
540
+
541
+ return __folio_migrate_mapping (mapping , newfolio , folio , expected_count );
542
+ }
537
543
EXPORT_SYMBOL (folio_migrate_mapping );
538
544
539
545
/*
0 commit comments