22
22
#include <crypto/internal/aead.h>
23
23
#include <crypto/cryptd.h>
24
24
#include <crypto/crypto_wq.h>
25
+ #include <linux/atomic.h>
25
26
#include <linux/err.h>
26
27
#include <linux/init.h>
27
28
#include <linux/kernel.h>
31
32
#include <linux/sched.h>
32
33
#include <linux/slab.h>
33
34
34
- #define CRYPTD_MAX_CPU_QLEN 100
35
+ #define CRYPTD_MAX_CPU_QLEN 1000
35
36
36
37
struct cryptd_cpu_queue {
37
38
struct crypto_queue queue ;
@@ -58,6 +59,7 @@ struct aead_instance_ctx {
58
59
};
59
60
60
61
struct cryptd_blkcipher_ctx {
62
+ atomic_t refcnt ;
61
63
struct crypto_blkcipher * child ;
62
64
};
63
65
@@ -66,6 +68,7 @@ struct cryptd_blkcipher_request_ctx {
66
68
};
67
69
68
70
struct cryptd_hash_ctx {
71
+ atomic_t refcnt ;
69
72
struct crypto_shash * child ;
70
73
};
71
74
@@ -75,6 +78,7 @@ struct cryptd_hash_request_ctx {
75
78
};
76
79
77
80
struct cryptd_aead_ctx {
81
+ atomic_t refcnt ;
78
82
struct crypto_aead * child ;
79
83
};
80
84
@@ -118,11 +122,29 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
118
122
{
119
123
int cpu , err ;
120
124
struct cryptd_cpu_queue * cpu_queue ;
125
+ struct crypto_tfm * tfm ;
126
+ atomic_t * refcnt ;
127
+ bool may_backlog ;
121
128
122
129
cpu = get_cpu ();
123
130
cpu_queue = this_cpu_ptr (queue -> cpu_queue );
124
131
err = crypto_enqueue_request (& cpu_queue -> queue , request );
132
+
133
+ refcnt = crypto_tfm_ctx (request -> tfm );
134
+ may_backlog = request -> flags & CRYPTO_TFM_REQ_MAY_BACKLOG ;
135
+
136
+ if (err == - EBUSY && !may_backlog )
137
+ goto out_put_cpu ;
138
+
125
139
queue_work_on (cpu , kcrypto_wq , & cpu_queue -> work );
140
+
141
+ if (!atomic_read (refcnt ))
142
+ goto out_put_cpu ;
143
+
144
+ tfm = request -> tfm ;
145
+ atomic_inc (refcnt );
146
+
147
+ out_put_cpu :
126
148
put_cpu ();
127
149
128
150
return err ;
@@ -206,7 +228,10 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
206
228
unsigned int len ))
207
229
{
208
230
struct cryptd_blkcipher_request_ctx * rctx ;
231
+ struct cryptd_blkcipher_ctx * ctx ;
232
+ struct crypto_ablkcipher * tfm ;
209
233
struct blkcipher_desc desc ;
234
+ int refcnt ;
210
235
211
236
rctx = ablkcipher_request_ctx (req );
212
237
@@ -222,9 +247,16 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
222
247
req -> base .complete = rctx -> complete ;
223
248
224
249
out :
250
+ tfm = crypto_ablkcipher_reqtfm (req );
251
+ ctx = crypto_ablkcipher_ctx (tfm );
252
+ refcnt = atomic_read (& ctx -> refcnt );
253
+
225
254
local_bh_disable ();
226
255
rctx -> complete (& req -> base , err );
227
256
local_bh_enable ();
257
+
258
+ if (err != - EINPROGRESS && refcnt && atomic_dec_and_test (& ctx -> refcnt ))
259
+ crypto_free_ablkcipher (tfm );
228
260
}
229
261
230
262
static void cryptd_blkcipher_encrypt (struct crypto_async_request * req , int err )
@@ -456,6 +488,21 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
456
488
return cryptd_enqueue_request (queue , & req -> base );
457
489
}
458
490
491
+ static void cryptd_hash_complete (struct ahash_request * req , int err )
492
+ {
493
+ struct crypto_ahash * tfm = crypto_ahash_reqtfm (req );
494
+ struct cryptd_hash_ctx * ctx = crypto_ahash_ctx (tfm );
495
+ struct cryptd_hash_request_ctx * rctx = ahash_request_ctx (req );
496
+ int refcnt = atomic_read (& ctx -> refcnt );
497
+
498
+ local_bh_disable ();
499
+ rctx -> complete (& req -> base , err );
500
+ local_bh_enable ();
501
+
502
+ if (err != - EINPROGRESS && refcnt && atomic_dec_and_test (& ctx -> refcnt ))
503
+ crypto_free_ahash (tfm );
504
+ }
505
+
459
506
static void cryptd_hash_init (struct crypto_async_request * req_async , int err )
460
507
{
461
508
struct cryptd_hash_ctx * ctx = crypto_tfm_ctx (req_async -> tfm );
@@ -475,9 +522,7 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
475
522
req -> base .complete = rctx -> complete ;
476
523
477
524
out :
478
- local_bh_disable ();
479
- rctx -> complete (& req -> base , err );
480
- local_bh_enable ();
525
+ cryptd_hash_complete (req , err );
481
526
}
482
527
483
528
static int cryptd_hash_init_enqueue (struct ahash_request * req )
@@ -500,9 +545,7 @@ static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
500
545
req -> base .complete = rctx -> complete ;
501
546
502
547
out :
503
- local_bh_disable ();
504
- rctx -> complete (& req -> base , err );
505
- local_bh_enable ();
548
+ cryptd_hash_complete (req , err );
506
549
}
507
550
508
551
static int cryptd_hash_update_enqueue (struct ahash_request * req )
@@ -523,9 +566,7 @@ static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
523
566
req -> base .complete = rctx -> complete ;
524
567
525
568
out :
526
- local_bh_disable ();
527
- rctx -> complete (& req -> base , err );
528
- local_bh_enable ();
569
+ cryptd_hash_complete (req , err );
529
570
}
530
571
531
572
static int cryptd_hash_final_enqueue (struct ahash_request * req )
@@ -546,9 +587,7 @@ static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
546
587
req -> base .complete = rctx -> complete ;
547
588
548
589
out :
549
- local_bh_disable ();
550
- rctx -> complete (& req -> base , err );
551
- local_bh_enable ();
590
+ cryptd_hash_complete (req , err );
552
591
}
553
592
554
593
static int cryptd_hash_finup_enqueue (struct ahash_request * req )
@@ -575,9 +614,7 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
575
614
req -> base .complete = rctx -> complete ;
576
615
577
616
out :
578
- local_bh_disable ();
579
- rctx -> complete (& req -> base , err );
580
- local_bh_enable ();
617
+ cryptd_hash_complete (req , err );
581
618
}
582
619
583
620
static int cryptd_hash_digest_enqueue (struct ahash_request * req )
@@ -688,7 +725,10 @@ static void cryptd_aead_crypt(struct aead_request *req,
688
725
int (* crypt )(struct aead_request * req ))
689
726
{
690
727
struct cryptd_aead_request_ctx * rctx ;
728
+ struct cryptd_aead_ctx * ctx ;
691
729
crypto_completion_t compl ;
730
+ struct crypto_aead * tfm ;
731
+ int refcnt ;
692
732
693
733
rctx = aead_request_ctx (req );
694
734
compl = rctx -> complete ;
@@ -697,10 +737,18 @@ static void cryptd_aead_crypt(struct aead_request *req,
697
737
goto out ;
698
738
aead_request_set_tfm (req , child );
699
739
err = crypt ( req );
740
+
700
741
out :
742
+ tfm = crypto_aead_reqtfm (req );
743
+ ctx = crypto_aead_ctx (tfm );
744
+ refcnt = atomic_read (& ctx -> refcnt );
745
+
701
746
local_bh_disable ();
702
747
compl (& req -> base , err );
703
748
local_bh_enable ();
749
+
750
+ if (err != - EINPROGRESS && refcnt && atomic_dec_and_test (& ctx -> refcnt ))
751
+ crypto_free_aead (tfm );
704
752
}
705
753
706
754
static void cryptd_aead_encrypt (struct crypto_async_request * areq , int err )
@@ -883,6 +931,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
883
931
u32 type , u32 mask )
884
932
{
885
933
char cryptd_alg_name [CRYPTO_MAX_ALG_NAME ];
934
+ struct cryptd_blkcipher_ctx * ctx ;
886
935
struct crypto_tfm * tfm ;
887
936
888
937
if (snprintf (cryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
@@ -899,6 +948,9 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
899
948
return ERR_PTR (- EINVAL );
900
949
}
901
950
951
+ ctx = crypto_tfm_ctx (tfm );
952
+ atomic_set (& ctx -> refcnt , 1 );
953
+
902
954
return __cryptd_ablkcipher_cast (__crypto_ablkcipher_cast (tfm ));
903
955
}
904
956
EXPORT_SYMBOL_GPL (cryptd_alloc_ablkcipher );
@@ -910,16 +962,28 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
910
962
}
911
963
EXPORT_SYMBOL_GPL (cryptd_ablkcipher_child );
912
964
965
+ bool cryptd_ablkcipher_queued (struct cryptd_ablkcipher * tfm )
966
+ {
967
+ struct cryptd_blkcipher_ctx * ctx = crypto_ablkcipher_ctx (& tfm -> base );
968
+
969
+ return atomic_read (& ctx -> refcnt ) - 1 ;
970
+ }
971
+ EXPORT_SYMBOL_GPL (cryptd_ablkcipher_queued );
972
+
913
973
void cryptd_free_ablkcipher (struct cryptd_ablkcipher * tfm )
914
974
{
915
- crypto_free_ablkcipher (& tfm -> base );
975
+ struct cryptd_blkcipher_ctx * ctx = crypto_ablkcipher_ctx (& tfm -> base );
976
+
977
+ if (atomic_dec_and_test (& ctx -> refcnt ))
978
+ crypto_free_ablkcipher (& tfm -> base );
916
979
}
917
980
EXPORT_SYMBOL_GPL (cryptd_free_ablkcipher );
918
981
919
982
struct cryptd_ahash * cryptd_alloc_ahash (const char * alg_name ,
920
983
u32 type , u32 mask )
921
984
{
922
985
char cryptd_alg_name [CRYPTO_MAX_ALG_NAME ];
986
+ struct cryptd_hash_ctx * ctx ;
923
987
struct crypto_ahash * tfm ;
924
988
925
989
if (snprintf (cryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
@@ -933,6 +997,9 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
933
997
return ERR_PTR (- EINVAL );
934
998
}
935
999
1000
+ ctx = crypto_ahash_ctx (tfm );
1001
+ atomic_set (& ctx -> refcnt , 1 );
1002
+
936
1003
return __cryptd_ahash_cast (tfm );
937
1004
}
938
1005
EXPORT_SYMBOL_GPL (cryptd_alloc_ahash );
@@ -952,16 +1019,28 @@ struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
952
1019
}
953
1020
EXPORT_SYMBOL_GPL (cryptd_shash_desc );
954
1021
1022
+ bool cryptd_ahash_queued (struct cryptd_ahash * tfm )
1023
+ {
1024
+ struct cryptd_hash_ctx * ctx = crypto_ahash_ctx (& tfm -> base );
1025
+
1026
+ return atomic_read (& ctx -> refcnt ) - 1 ;
1027
+ }
1028
+ EXPORT_SYMBOL_GPL (cryptd_ahash_queued );
1029
+
955
1030
void cryptd_free_ahash (struct cryptd_ahash * tfm )
956
1031
{
957
- crypto_free_ahash (& tfm -> base );
1032
+ struct cryptd_hash_ctx * ctx = crypto_ahash_ctx (& tfm -> base );
1033
+
1034
+ if (atomic_dec_and_test (& ctx -> refcnt ))
1035
+ crypto_free_ahash (& tfm -> base );
958
1036
}
959
1037
EXPORT_SYMBOL_GPL (cryptd_free_ahash );
960
1038
961
1039
struct cryptd_aead * cryptd_alloc_aead (const char * alg_name ,
962
1040
u32 type , u32 mask )
963
1041
{
964
1042
char cryptd_alg_name [CRYPTO_MAX_ALG_NAME ];
1043
+ struct cryptd_aead_ctx * ctx ;
965
1044
struct crypto_aead * tfm ;
966
1045
967
1046
if (snprintf (cryptd_alg_name , CRYPTO_MAX_ALG_NAME ,
@@ -974,6 +1053,10 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
974
1053
crypto_free_aead (tfm );
975
1054
return ERR_PTR (- EINVAL );
976
1055
}
1056
+
1057
+ ctx = crypto_aead_ctx (tfm );
1058
+ atomic_set (& ctx -> refcnt , 1 );
1059
+
977
1060
return __cryptd_aead_cast (tfm );
978
1061
}
979
1062
EXPORT_SYMBOL_GPL (cryptd_alloc_aead );
@@ -986,9 +1069,20 @@ struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
986
1069
}
987
1070
EXPORT_SYMBOL_GPL (cryptd_aead_child );
988
1071
1072
+ bool cryptd_aead_queued (struct cryptd_aead * tfm )
1073
+ {
1074
+ struct cryptd_aead_ctx * ctx = crypto_aead_ctx (& tfm -> base );
1075
+
1076
+ return atomic_read (& ctx -> refcnt ) - 1 ;
1077
+ }
1078
+ EXPORT_SYMBOL_GPL (cryptd_aead_queued );
1079
+
989
1080
void cryptd_free_aead (struct cryptd_aead * tfm )
990
1081
{
991
- crypto_free_aead (& tfm -> base );
1082
+ struct cryptd_aead_ctx * ctx = crypto_aead_ctx (& tfm -> base );
1083
+
1084
+ if (atomic_dec_and_test (& ctx -> refcnt ))
1085
+ crypto_free_aead (& tfm -> base );
992
1086
}
993
1087
EXPORT_SYMBOL_GPL (cryptd_free_aead );
994
1088
0 commit comments