@@ -142,6 +142,9 @@ static bool zswap_exclusive_loads_enabled = IS_ENABLED(
142
142
CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON );
143
143
module_param_named (exclusive_loads , zswap_exclusive_loads_enabled , bool , 0644 );
144
144
145
+ /* Number of zpools in zswap_pool (empirically determined for scalability) */
146
+ #define ZSWAP_NR_ZPOOLS 32
147
+
145
148
/*********************************
146
149
* data structures
147
150
**********************************/
@@ -161,7 +164,7 @@ struct crypto_acomp_ctx {
161
164
* needs to be verified that it's still valid in the tree.
162
165
*/
163
166
struct zswap_pool {
164
- struct zpool * zpool ;
167
+ struct zpool * zpools [ ZSWAP_NR_ZPOOLS ] ;
165
168
struct crypto_acomp_ctx __percpu * acomp_ctx ;
166
169
struct kref kref ;
167
170
struct list_head list ;
@@ -248,7 +251,7 @@ static bool zswap_has_pool;
248
251
249
252
#define zswap_pool_debug (msg , p ) \
250
253
pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
251
- zpool_get_type((p)->zpool ))
254
+ zpool_get_type((p)->zpools[0] ))
252
255
253
256
static int zswap_writeback_entry (struct zswap_entry * entry ,
254
257
struct zswap_tree * tree );
@@ -272,11 +275,13 @@ static void zswap_update_total_size(void)
272
275
{
273
276
struct zswap_pool * pool ;
274
277
u64 total = 0 ;
278
+ int i ;
275
279
276
280
rcu_read_lock ();
277
281
278
282
list_for_each_entry_rcu (pool , & zswap_pools , list )
279
- total += zpool_get_total_size (pool -> zpool );
283
+ for (i = 0 ; i < ZSWAP_NR_ZPOOLS ; i ++ )
284
+ total += zpool_get_total_size (pool -> zpools [i ]);
280
285
281
286
rcu_read_unlock ();
282
287
@@ -365,6 +370,16 @@ static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
365
370
return false;
366
371
}
367
372
373
+ static struct zpool * zswap_find_zpool (struct zswap_entry * entry )
374
+ {
375
+ int i = 0 ;
376
+
377
+ if (ZSWAP_NR_ZPOOLS > 1 )
378
+ i = hash_ptr (entry , ilog2 (ZSWAP_NR_ZPOOLS ));
379
+
380
+ return entry -> pool -> zpools [i ];
381
+ }
382
+
368
383
/*
369
384
* Carries out the common pattern of freeing and entry's zpool allocation,
370
385
* freeing the entry itself, and decrementing the number of stored pages.
@@ -381,7 +396,7 @@ static void zswap_free_entry(struct zswap_entry *entry)
381
396
spin_lock (& entry -> pool -> lru_lock );
382
397
list_del (& entry -> lru );
383
398
spin_unlock (& entry -> pool -> lru_lock );
384
- zpool_free (entry -> pool -> zpool , entry -> handle );
399
+ zpool_free (zswap_find_zpool ( entry ) , entry -> handle );
385
400
zswap_pool_put (entry -> pool );
386
401
}
387
402
zswap_entry_cache_free (entry );
@@ -590,7 +605,8 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
590
605
list_for_each_entry_rcu (pool , & zswap_pools , list ) {
591
606
if (strcmp (pool -> tfm_name , compressor ))
592
607
continue ;
593
- if (strcmp (zpool_get_type (pool -> zpool ), type ))
608
+ /* all zpools share the same type */
609
+ if (strcmp (zpool_get_type (pool -> zpools [0 ]), type ))
594
610
continue ;
595
611
/* if we can't get it, it's about to be destroyed */
596
612
if (!zswap_pool_get (pool ))
@@ -695,6 +711,7 @@ static void shrink_worker(struct work_struct *w)
695
711
696
712
static struct zswap_pool * zswap_pool_create (char * type , char * compressor )
697
713
{
714
+ int i ;
698
715
struct zswap_pool * pool ;
699
716
char name [38 ]; /* 'zswap' + 32 char (max) num + \0 */
700
717
gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM ;
@@ -715,15 +732,18 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
715
732
if (!pool )
716
733
return NULL ;
717
734
718
- /* unique name for each pool specifically required by zsmalloc */
719
- snprintf (name , 38 , "zswap%x" , atomic_inc_return (& zswap_pools_count ));
735
+ for (i = 0 ; i < ZSWAP_NR_ZPOOLS ; i ++ ) {
736
+ /* unique name for each pool specifically required by zsmalloc */
737
+ snprintf (name , 38 , "zswap%x" ,
738
+ atomic_inc_return (& zswap_pools_count ));
720
739
721
- pool -> zpool = zpool_create_pool (type , name , gfp );
722
- if (!pool -> zpool ) {
723
- pr_err ("%s zpool not available\n" , type );
724
- goto error ;
740
+ pool -> zpools [i ] = zpool_create_pool (type , name , gfp );
741
+ if (!pool -> zpools [i ]) {
742
+ pr_err ("%s zpool not available\n" , type );
743
+ goto error ;
744
+ }
725
745
}
726
- pr_debug ("using %s zpool\n" , zpool_get_type (pool -> zpool ));
746
+ pr_debug ("using %s zpool\n" , zpool_get_type (pool -> zpools [ 0 ] ));
727
747
728
748
strscpy (pool -> tfm_name , compressor , sizeof (pool -> tfm_name ));
729
749
@@ -755,8 +775,8 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
755
775
error :
756
776
if (pool -> acomp_ctx )
757
777
free_percpu (pool -> acomp_ctx );
758
- if ( pool -> zpool )
759
- zpool_destroy_pool (pool -> zpool );
778
+ while ( i -- )
779
+ zpool_destroy_pool (pool -> zpools [ i ] );
760
780
kfree (pool );
761
781
return NULL ;
762
782
}
@@ -805,11 +825,14 @@ static struct zswap_pool *__zswap_pool_create_fallback(void)
805
825
806
826
static void zswap_pool_destroy (struct zswap_pool * pool )
807
827
{
828
+ int i ;
829
+
808
830
zswap_pool_debug ("destroying" , pool );
809
831
810
832
cpuhp_state_remove_instance (CPUHP_MM_ZSWP_POOL_PREPARE , & pool -> node );
811
833
free_percpu (pool -> acomp_ctx );
812
- zpool_destroy_pool (pool -> zpool );
834
+ for (i = 0 ; i < ZSWAP_NR_ZPOOLS ; i ++ )
835
+ zpool_destroy_pool (pool -> zpools [i ]);
813
836
kfree (pool );
814
837
}
815
838
@@ -1073,7 +1096,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
1073
1096
struct page * page ;
1074
1097
struct scatterlist input , output ;
1075
1098
struct crypto_acomp_ctx * acomp_ctx ;
1076
- struct zpool * pool = entry -> pool -> zpool ;
1099
+ struct zpool * pool = zswap_find_zpool ( entry ) ;
1077
1100
1078
1101
u8 * src , * tmp = NULL ;
1079
1102
unsigned int dlen ;
@@ -1214,6 +1237,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1214
1237
struct crypto_acomp_ctx * acomp_ctx ;
1215
1238
struct obj_cgroup * objcg = NULL ;
1216
1239
struct zswap_pool * pool ;
1240
+ struct zpool * zpool ;
1217
1241
int ret ;
1218
1242
unsigned int dlen = PAGE_SIZE ;
1219
1243
unsigned long handle , value ;
@@ -1324,10 +1348,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1324
1348
}
1325
1349
1326
1350
/* store */
1351
+ zpool = zswap_find_zpool (entry );
1327
1352
gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM ;
1328
- if (zpool_malloc_support_movable (entry -> pool -> zpool ))
1353
+ if (zpool_malloc_support_movable (zpool ))
1329
1354
gfp |= __GFP_HIGHMEM | __GFP_MOVABLE ;
1330
- ret = zpool_malloc (entry -> pool -> zpool , dlen , gfp , & handle );
1355
+ ret = zpool_malloc (zpool , dlen , gfp , & handle );
1331
1356
if (ret == - ENOSPC ) {
1332
1357
zswap_reject_compress_poor ++ ;
1333
1358
goto put_dstmem ;
@@ -1336,9 +1361,9 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1336
1361
zswap_reject_alloc_fail ++ ;
1337
1362
goto put_dstmem ;
1338
1363
}
1339
- buf = zpool_map_handle (entry -> pool -> zpool , handle , ZPOOL_MM_WO );
1364
+ buf = zpool_map_handle (zpool , handle , ZPOOL_MM_WO );
1340
1365
memcpy (buf , dst , dlen );
1341
- zpool_unmap_handle (entry -> pool -> zpool , handle );
1366
+ zpool_unmap_handle (zpool , handle );
1342
1367
mutex_unlock (acomp_ctx -> mutex );
1343
1368
1344
1369
/* populate entry */
@@ -1409,6 +1434,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1409
1434
struct scatterlist input , output ;
1410
1435
struct crypto_acomp_ctx * acomp_ctx ;
1411
1436
u8 * src , * dst , * tmp ;
1437
+ struct zpool * zpool ;
1412
1438
unsigned int dlen ;
1413
1439
int ret ;
1414
1440
@@ -1430,7 +1456,8 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1430
1456
goto stats ;
1431
1457
}
1432
1458
1433
- if (!zpool_can_sleep_mapped (entry -> pool -> zpool )) {
1459
+ zpool = zswap_find_zpool (entry );
1460
+ if (!zpool_can_sleep_mapped (zpool )) {
1434
1461
tmp = kmalloc (entry -> length , GFP_KERNEL );
1435
1462
if (!tmp ) {
1436
1463
ret = - ENOMEM ;
@@ -1440,12 +1467,12 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1440
1467
1441
1468
/* decompress */
1442
1469
dlen = PAGE_SIZE ;
1443
- src = zpool_map_handle (entry -> pool -> zpool , entry -> handle , ZPOOL_MM_RO );
1470
+ src = zpool_map_handle (zpool , entry -> handle , ZPOOL_MM_RO );
1444
1471
1445
- if (!zpool_can_sleep_mapped (entry -> pool -> zpool )) {
1472
+ if (!zpool_can_sleep_mapped (zpool )) {
1446
1473
memcpy (tmp , src , entry -> length );
1447
1474
src = tmp ;
1448
- zpool_unmap_handle (entry -> pool -> zpool , entry -> handle );
1475
+ zpool_unmap_handle (zpool , entry -> handle );
1449
1476
}
1450
1477
1451
1478
acomp_ctx = raw_cpu_ptr (entry -> pool -> acomp_ctx );
@@ -1457,8 +1484,8 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1457
1484
ret = crypto_wait_req (crypto_acomp_decompress (acomp_ctx -> req ), & acomp_ctx -> wait );
1458
1485
mutex_unlock (acomp_ctx -> mutex );
1459
1486
1460
- if (zpool_can_sleep_mapped (entry -> pool -> zpool ))
1461
- zpool_unmap_handle (entry -> pool -> zpool , entry -> handle );
1487
+ if (zpool_can_sleep_mapped (zpool ))
1488
+ zpool_unmap_handle (zpool , entry -> handle );
1462
1489
else
1463
1490
kfree (tmp );
1464
1491
@@ -1619,7 +1646,7 @@ static int zswap_setup(void)
1619
1646
pool = __zswap_pool_create_fallback ();
1620
1647
if (pool ) {
1621
1648
pr_info ("loaded using pool %s/%s\n" , pool -> tfm_name ,
1622
- zpool_get_type (pool -> zpool ));
1649
+ zpool_get_type (pool -> zpools [ 0 ] ));
1623
1650
list_add (& pool -> list , & zswap_pools );
1624
1651
zswap_has_pool = true;
1625
1652
} else {
0 commit comments