@@ -143,14 +143,14 @@ struct crypt_config {
143
143
* pool for per bio private data, crypto requests,
144
144
* encryption requeusts/buffer pages and integrity tags
145
145
*/
146
- mempool_t * req_pool ;
147
- mempool_t * page_pool ;
148
- mempool_t * tag_pool ;
146
+ mempool_t req_pool ;
147
+ mempool_t page_pool ;
148
+ mempool_t tag_pool ;
149
149
unsigned tag_pool_max_sectors ;
150
150
151
151
struct percpu_counter n_allocated_pages ;
152
152
153
- struct bio_set * bs ;
153
+ struct bio_set bs ;
154
154
struct mutex bio_alloc_lock ;
155
155
156
156
struct workqueue_struct * io_queue ;
@@ -1245,7 +1245,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1245
1245
unsigned key_index = ctx -> cc_sector & (cc -> tfms_count - 1 );
1246
1246
1247
1247
if (!ctx -> r .req )
1248
- ctx -> r .req = mempool_alloc (cc -> req_pool , GFP_NOIO );
1248
+ ctx -> r .req = mempool_alloc (& cc -> req_pool , GFP_NOIO );
1249
1249
1250
1250
skcipher_request_set_tfm (ctx -> r .req , cc -> cipher_tfm .tfms [key_index ]);
1251
1251
@@ -1262,7 +1262,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
1262
1262
struct convert_context * ctx )
1263
1263
{
1264
1264
if (!ctx -> r .req_aead )
1265
- ctx -> r .req_aead = mempool_alloc (cc -> req_pool , GFP_NOIO );
1265
+ ctx -> r .req_aead = mempool_alloc (& cc -> req_pool , GFP_NOIO );
1266
1266
1267
1267
aead_request_set_tfm (ctx -> r .req_aead , cc -> cipher_tfm .tfms_aead [0 ]);
1268
1268
@@ -1290,7 +1290,7 @@ static void crypt_free_req_skcipher(struct crypt_config *cc,
1290
1290
struct dm_crypt_io * io = dm_per_bio_data (base_bio , cc -> per_bio_data_size );
1291
1291
1292
1292
if ((struct skcipher_request * )(io + 1 ) != req )
1293
- mempool_free (req , cc -> req_pool );
1293
+ mempool_free (req , & cc -> req_pool );
1294
1294
}
1295
1295
1296
1296
static void crypt_free_req_aead (struct crypt_config * cc ,
@@ -1299,7 +1299,7 @@ static void crypt_free_req_aead(struct crypt_config *cc,
1299
1299
struct dm_crypt_io * io = dm_per_bio_data (base_bio , cc -> per_bio_data_size );
1300
1300
1301
1301
if ((struct aead_request * )(io + 1 ) != req )
1302
- mempool_free (req , cc -> req_pool );
1302
+ mempool_free (req , & cc -> req_pool );
1303
1303
}
1304
1304
1305
1305
static void crypt_free_req (struct crypt_config * cc , void * req , struct bio * base_bio )
@@ -1409,7 +1409,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1409
1409
if (unlikely (gfp_mask & __GFP_DIRECT_RECLAIM ))
1410
1410
mutex_lock (& cc -> bio_alloc_lock );
1411
1411
1412
- clone = bio_alloc_bioset (GFP_NOIO , nr_iovecs , cc -> bs );
1412
+ clone = bio_alloc_bioset (GFP_NOIO , nr_iovecs , & cc -> bs );
1413
1413
if (!clone )
1414
1414
goto out ;
1415
1415
@@ -1418,7 +1418,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1418
1418
remaining_size = size ;
1419
1419
1420
1420
for (i = 0 ; i < nr_iovecs ; i ++ ) {
1421
- page = mempool_alloc (cc -> page_pool , gfp_mask );
1421
+ page = mempool_alloc (& cc -> page_pool , gfp_mask );
1422
1422
if (!page ) {
1423
1423
crypt_free_buffer_pages (cc , clone );
1424
1424
bio_put (clone );
@@ -1453,7 +1453,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1453
1453
1454
1454
bio_for_each_segment_all (bv , clone , i ) {
1455
1455
BUG_ON (!bv -> bv_page );
1456
- mempool_free (bv -> bv_page , cc -> page_pool );
1456
+ mempool_free (bv -> bv_page , & cc -> page_pool );
1457
1457
}
1458
1458
}
1459
1459
@@ -1492,7 +1492,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1492
1492
crypt_free_req (cc , io -> ctx .r .req , base_bio );
1493
1493
1494
1494
if (unlikely (io -> integrity_metadata_from_pool ))
1495
- mempool_free (io -> integrity_metadata , io -> cc -> tag_pool );
1495
+ mempool_free (io -> integrity_metadata , & io -> cc -> tag_pool );
1496
1496
else
1497
1497
kfree (io -> integrity_metadata );
1498
1498
@@ -1565,7 +1565,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1565
1565
* biovecs we don't need to worry about the block layer
1566
1566
* modifying the biovec array; so leverage bio_clone_fast().
1567
1567
*/
1568
- clone = bio_clone_fast (io -> base_bio , gfp , cc -> bs );
1568
+ clone = bio_clone_fast (io -> base_bio , gfp , & cc -> bs );
1569
1569
if (!clone )
1570
1570
return 1 ;
1571
1571
@@ -2219,17 +2219,16 @@ static void crypt_dtr(struct dm_target *ti)
2219
2219
2220
2220
crypt_free_tfms (cc );
2221
2221
2222
- if (cc -> bs )
2223
- bioset_free (cc -> bs );
2222
+ bioset_exit (& cc -> bs );
2224
2223
2225
- mempool_destroy (cc -> page_pool );
2226
- mempool_destroy (cc -> req_pool );
2227
- mempool_destroy (cc -> tag_pool );
2228
-
2229
- if (cc -> page_pool )
2224
+ if (mempool_initialized (& cc -> page_pool ))
2230
2225
WARN_ON (percpu_counter_sum (& cc -> n_allocated_pages ) != 0 );
2231
2226
percpu_counter_destroy (& cc -> n_allocated_pages );
2232
2227
2228
+ mempool_exit (& cc -> page_pool );
2229
+ mempool_exit (& cc -> req_pool );
2230
+ mempool_exit (& cc -> tag_pool );
2231
+
2233
2232
if (cc -> iv_gen_ops && cc -> iv_gen_ops -> dtr )
2234
2233
cc -> iv_gen_ops -> dtr (cc );
2235
2234
@@ -2743,17 +2742,15 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2743
2742
iv_size_padding = align_mask ;
2744
2743
}
2745
2744
2746
- ret = - ENOMEM ;
2747
-
2748
2745
/* ...| IV + padding | original IV | original sec. number | bio tag offset | */
2749
2746
additional_req_size = sizeof (struct dm_crypt_request ) +
2750
2747
iv_size_padding + cc -> iv_size +
2751
2748
cc -> iv_size +
2752
2749
sizeof (uint64_t ) +
2753
2750
sizeof (unsigned int );
2754
2751
2755
- cc -> req_pool = mempool_create_kmalloc_pool ( MIN_IOS , cc -> dmreq_start + additional_req_size );
2756
- if (! cc -> req_pool ) {
2752
+ ret = mempool_init_kmalloc_pool ( & cc -> req_pool , MIN_IOS , cc -> dmreq_start + additional_req_size );
2753
+ if (ret ) {
2757
2754
ti -> error = "Cannot allocate crypt request mempool" ;
2758
2755
goto bad ;
2759
2756
}
@@ -2762,14 +2759,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2762
2759
ALIGN (sizeof (struct dm_crypt_io ) + cc -> dmreq_start + additional_req_size ,
2763
2760
ARCH_KMALLOC_MINALIGN );
2764
2761
2765
- cc -> page_pool = mempool_create ( BIO_MAX_PAGES , crypt_page_alloc , crypt_page_free , cc );
2766
- if (! cc -> page_pool ) {
2762
+ ret = mempool_init ( & cc -> page_pool , BIO_MAX_PAGES , crypt_page_alloc , crypt_page_free , cc );
2763
+ if (ret ) {
2767
2764
ti -> error = "Cannot allocate page mempool" ;
2768
2765
goto bad ;
2769
2766
}
2770
2767
2771
- cc -> bs = bioset_create ( MIN_IOS , 0 , BIOSET_NEED_BVECS );
2772
- if (! cc -> bs ) {
2768
+ ret = bioset_init ( & cc -> bs , MIN_IOS , 0 , BIOSET_NEED_BVECS );
2769
+ if (ret ) {
2773
2770
ti -> error = "Cannot allocate crypt bioset" ;
2774
2771
goto bad ;
2775
2772
}
@@ -2806,11 +2803,10 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2806
2803
if (!cc -> tag_pool_max_sectors )
2807
2804
cc -> tag_pool_max_sectors = 1 ;
2808
2805
2809
- cc -> tag_pool = mempool_create_kmalloc_pool ( MIN_IOS ,
2806
+ ret = mempool_init_kmalloc_pool ( & cc -> tag_pool , MIN_IOS ,
2810
2807
cc -> tag_pool_max_sectors * cc -> on_disk_tag_size );
2811
- if (! cc -> tag_pool ) {
2808
+ if (ret ) {
2812
2809
ti -> error = "Cannot allocate integrity tags mempool" ;
2813
- ret = - ENOMEM ;
2814
2810
goto bad ;
2815
2811
}
2816
2812
@@ -2903,7 +2899,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
2903
2899
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN )))) {
2904
2900
if (bio_sectors (bio ) > cc -> tag_pool_max_sectors )
2905
2901
dm_accept_partial_bio (bio , cc -> tag_pool_max_sectors );
2906
- io -> integrity_metadata = mempool_alloc (cc -> tag_pool , GFP_NOIO );
2902
+ io -> integrity_metadata = mempool_alloc (& cc -> tag_pool , GFP_NOIO );
2907
2903
io -> integrity_metadata_from_pool = true;
2908
2904
}
2909
2905
}
0 commit comments