@@ -1405,9 +1405,89 @@ static bool page_swapped(struct page *page)
1405
1405
return swap_page_trans_huge_swapped (si , entry );
1406
1406
return false;
1407
1407
}
1408
+
1409
+ static int page_trans_huge_map_swapcount (struct page * page , int * total_mapcount ,
1410
+ int * total_swapcount )
1411
+ {
1412
+ int i , map_swapcount , _total_mapcount , _total_swapcount ;
1413
+ unsigned long offset = 0 ;
1414
+ struct swap_info_struct * si ;
1415
+ struct swap_cluster_info * ci = NULL ;
1416
+ unsigned char * map = NULL ;
1417
+ int mapcount , swapcount = 0 ;
1418
+
1419
+ /* hugetlbfs shouldn't call it */
1420
+ VM_BUG_ON_PAGE (PageHuge (page ), page );
1421
+
1422
+ if (likely (!PageTransCompound (page ))) {
1423
+ mapcount = atomic_read (& page -> _mapcount ) + 1 ;
1424
+ if (total_mapcount )
1425
+ * total_mapcount = mapcount ;
1426
+ if (PageSwapCache (page ))
1427
+ swapcount = page_swapcount (page );
1428
+ if (total_swapcount )
1429
+ * total_swapcount = swapcount ;
1430
+ return mapcount + swapcount ;
1431
+ }
1432
+
1433
+ page = compound_head (page );
1434
+
1435
+ _total_mapcount = _total_swapcount = map_swapcount = 0 ;
1436
+ if (PageSwapCache (page )) {
1437
+ swp_entry_t entry ;
1438
+
1439
+ entry .val = page_private (page );
1440
+ si = _swap_info_get (entry );
1441
+ if (si ) {
1442
+ map = si -> swap_map ;
1443
+ offset = swp_offset (entry );
1444
+ }
1445
+ }
1446
+ if (map )
1447
+ ci = lock_cluster (si , offset );
1448
+ for (i = 0 ; i < HPAGE_PMD_NR ; i ++ ) {
1449
+ mapcount = atomic_read (& page [i ]._mapcount ) + 1 ;
1450
+ _total_mapcount += mapcount ;
1451
+ if (map ) {
1452
+ swapcount = swap_count (map [offset + i ]);
1453
+ _total_swapcount += swapcount ;
1454
+ }
1455
+ map_swapcount = max (map_swapcount , mapcount + swapcount );
1456
+ }
1457
+ unlock_cluster (ci );
1458
+ if (PageDoubleMap (page )) {
1459
+ map_swapcount -= 1 ;
1460
+ _total_mapcount -= HPAGE_PMD_NR ;
1461
+ }
1462
+ mapcount = compound_mapcount (page );
1463
+ map_swapcount += mapcount ;
1464
+ _total_mapcount += mapcount ;
1465
+ if (total_mapcount )
1466
+ * total_mapcount = _total_mapcount ;
1467
+ if (total_swapcount )
1468
+ * total_swapcount = _total_swapcount ;
1469
+
1470
+ return map_swapcount ;
1471
+ }
1408
1472
#else
1409
1473
#define swap_page_trans_huge_swapped (si , entry ) swap_swapcount(si, entry)
1410
1474
#define page_swapped (page ) (page_swapcount(page) != 0)
1475
+
1476
+ static int page_trans_huge_map_swapcount (struct page * page , int * total_mapcount ,
1477
+ int * total_swapcount )
1478
+ {
1479
+ int mapcount , swapcount = 0 ;
1480
+
1481
+ /* hugetlbfs shouldn't call it */
1482
+ VM_BUG_ON_PAGE (PageHuge (page ), page );
1483
+
1484
+ mapcount = page_trans_huge_mapcount (page , total_mapcount );
1485
+ if (PageSwapCache (page ))
1486
+ swapcount = page_swapcount (page );
1487
+ if (total_swapcount )
1488
+ * total_swapcount = swapcount ;
1489
+ return mapcount + swapcount ;
1490
+ }
1411
1491
#endif
1412
1492
1413
1493
/*
@@ -1416,23 +1496,27 @@ static bool page_swapped(struct page *page)
1416
1496
* on disk will never be read, and seeking back there to write new content
1417
1497
* later would only waste time away from clustering.
1418
1498
*
1419
- * NOTE: total_mapcount should not be relied upon by the caller if
1499
+ * NOTE: total_map_swapcount should not be relied upon by the caller if
1420
1500
* reuse_swap_page() returns false, but it may be always overwritten
1421
1501
* (see the other implementation for CONFIG_SWAP=n).
1422
1502
*/
1423
- bool reuse_swap_page (struct page * page , int * total_mapcount )
1503
+ bool reuse_swap_page (struct page * page , int * total_map_swapcount )
1424
1504
{
1425
- int count ;
1505
+ int count , total_mapcount , total_swapcount ;
1426
1506
1427
1507
VM_BUG_ON_PAGE (!PageLocked (page ), page );
1428
1508
if (unlikely (PageKsm (page )))
1429
1509
return false;
1430
- count = page_trans_huge_mapcount (page , total_mapcount );
1431
- if (count <= 1 && PageSwapCache (page )) {
1432
- count += page_swapcount (page );
1433
- if (count != 1 )
1434
- goto out ;
1510
+ count = page_trans_huge_map_swapcount (page , & total_mapcount ,
1511
+ & total_swapcount );
1512
+ if (total_map_swapcount )
1513
+ * total_map_swapcount = total_mapcount + total_swapcount ;
1514
+ if (count == 1 && PageSwapCache (page ) &&
1515
+ (likely (!PageTransCompound (page )) ||
1516
+ /* The remaining swap count will be freed soon */
1517
+ total_swapcount == page_swapcount (page ))) {
1435
1518
if (!PageWriteback (page )) {
1519
+ page = compound_head (page );
1436
1520
delete_from_swap_cache (page );
1437
1521
SetPageDirty (page );
1438
1522
} else {
@@ -1448,7 +1532,7 @@ bool reuse_swap_page(struct page *page, int *total_mapcount)
1448
1532
spin_unlock (& p -> lock );
1449
1533
}
1450
1534
}
1451
- out :
1535
+
1452
1536
return count <= 1 ;
1453
1537
}
1454
1538
0 commit comments