82
82
struct atmel_aes_caps {
83
83
bool has_dualbuff ;
84
84
bool has_cfb64 ;
85
+ bool has_ctr32 ;
85
86
u32 max_burst_size ;
86
87
};
87
88
@@ -103,6 +104,15 @@ struct atmel_aes_ctx {
103
104
struct atmel_aes_base_ctx base ;
104
105
};
105
106
107
+ struct atmel_aes_ctr_ctx {
108
+ struct atmel_aes_base_ctx base ;
109
+
110
+ u32 iv [AES_BLOCK_SIZE / sizeof (u32 )];
111
+ size_t offset ;
112
+ struct scatterlist src [2 ];
113
+ struct scatterlist dst [2 ];
114
+ };
115
+
106
116
struct atmel_aes_reqctx {
107
117
unsigned long mode ;
108
118
};
@@ -762,6 +772,96 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
762
772
atmel_aes_transfer_complete );
763
773
}
764
774
775
+ static inline struct atmel_aes_ctr_ctx *
776
+ atmel_aes_ctr_ctx_cast (struct atmel_aes_base_ctx * ctx )
777
+ {
778
+ return container_of (ctx , struct atmel_aes_ctr_ctx , base );
779
+ }
780
+
781
+ static int atmel_aes_ctr_transfer (struct atmel_aes_dev * dd )
782
+ {
783
+ struct atmel_aes_ctr_ctx * ctx = atmel_aes_ctr_ctx_cast (dd -> ctx );
784
+ struct ablkcipher_request * req = ablkcipher_request_cast (dd -> areq );
785
+ struct scatterlist * src , * dst ;
786
+ u32 ctr , blocks ;
787
+ size_t datalen ;
788
+ bool use_dma , fragmented = false;
789
+
790
+ /* Check for transfer completion. */
791
+ ctx -> offset += dd -> total ;
792
+ if (ctx -> offset >= req -> nbytes )
793
+ return atmel_aes_transfer_complete (dd );
794
+
795
+ /* Compute data length. */
796
+ datalen = req -> nbytes - ctx -> offset ;
797
+ blocks = DIV_ROUND_UP (datalen , AES_BLOCK_SIZE );
798
+ ctr = be32_to_cpu (ctx -> iv [3 ]);
799
+ if (dd -> caps .has_ctr32 ) {
800
+ /* Check 32bit counter overflow. */
801
+ u32 start = ctr ;
802
+ u32 end = start + blocks - 1 ;
803
+
804
+ if (end < start ) {
805
+ ctr |= 0xffffffff ;
806
+ datalen = AES_BLOCK_SIZE * - start ;
807
+ fragmented = true;
808
+ }
809
+ } else {
810
+ /* Check 16bit counter overflow. */
811
+ u16 start = ctr & 0xffff ;
812
+ u16 end = start + (u16 )blocks - 1 ;
813
+
814
+ if (blocks >> 16 || end < start ) {
815
+ ctr |= 0xffff ;
816
+ datalen = AES_BLOCK_SIZE * (0x10000 - start );
817
+ fragmented = true;
818
+ }
819
+ }
820
+ use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD );
821
+
822
+ /* Jump to offset. */
823
+ src = scatterwalk_ffwd (ctx -> src , req -> src , ctx -> offset );
824
+ dst = ((req -> src == req -> dst ) ? src :
825
+ scatterwalk_ffwd (ctx -> dst , req -> dst , ctx -> offset ));
826
+
827
+ /* Configure hardware. */
828
+ atmel_aes_write_ctrl (dd , use_dma , ctx -> iv );
829
+ if (unlikely (fragmented )) {
830
+ /*
831
+ * Increment the counter manually to cope with the hardware
832
+ * counter overflow.
833
+ */
834
+ ctx -> iv [3 ] = cpu_to_be32 (ctr );
835
+ crypto_inc ((u8 * )ctx -> iv , AES_BLOCK_SIZE );
836
+ }
837
+
838
+ if (use_dma )
839
+ return atmel_aes_dma_start (dd , src , dst , datalen ,
840
+ atmel_aes_ctr_transfer );
841
+
842
+ return atmel_aes_cpu_start (dd , src , dst , datalen ,
843
+ atmel_aes_ctr_transfer );
844
+ }
845
+
846
+ static int atmel_aes_ctr_start (struct atmel_aes_dev * dd )
847
+ {
848
+ struct atmel_aes_ctr_ctx * ctx = atmel_aes_ctr_ctx_cast (dd -> ctx );
849
+ struct ablkcipher_request * req = ablkcipher_request_cast (dd -> areq );
850
+ struct atmel_aes_reqctx * rctx = ablkcipher_request_ctx (req );
851
+ int err ;
852
+
853
+ atmel_aes_set_mode (dd , rctx );
854
+
855
+ err = atmel_aes_hw_init (dd );
856
+ if (err )
857
+ return atmel_aes_complete (dd , err );
858
+
859
+ memcpy (ctx -> iv , req -> info , AES_BLOCK_SIZE );
860
+ ctx -> offset = 0 ;
861
+ dd -> total = 0 ;
862
+ return atmel_aes_ctr_transfer (dd );
863
+ }
864
+
765
865
static int atmel_aes_crypt (struct ablkcipher_request * req , unsigned long mode )
766
866
{
767
867
struct atmel_aes_base_ctx * ctx ;
@@ -919,6 +1019,16 @@ static int atmel_aes_cra_init(struct crypto_tfm *tfm)
919
1019
return 0 ;
920
1020
}
921
1021
1022
+ static int atmel_aes_ctr_cra_init (struct crypto_tfm * tfm )
1023
+ {
1024
+ struct atmel_aes_ctx * ctx = crypto_tfm_ctx (tfm );
1025
+
1026
+ tfm -> crt_ablkcipher .reqsize = sizeof (struct atmel_aes_reqctx );
1027
+ ctx -> base .start = atmel_aes_ctr_start ;
1028
+
1029
+ return 0 ;
1030
+ }
1031
+
922
1032
static void atmel_aes_cra_exit (struct crypto_tfm * tfm )
923
1033
{
924
1034
}
@@ -1076,11 +1186,11 @@ static struct crypto_alg aes_algs[] = {
1076
1186
.cra_priority = ATMEL_AES_PRIORITY ,
1077
1187
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC ,
1078
1188
.cra_blocksize = 1 ,
1079
- .cra_ctxsize = sizeof (struct atmel_aes_ctx ),
1189
+ .cra_ctxsize = sizeof (struct atmel_aes_ctr_ctx ),
1080
1190
.cra_alignmask = 0xf ,
1081
1191
.cra_type = & crypto_ablkcipher_type ,
1082
1192
.cra_module = THIS_MODULE ,
1083
- .cra_init = atmel_aes_cra_init ,
1193
+ .cra_init = atmel_aes_ctr_cra_init ,
1084
1194
.cra_exit = atmel_aes_cra_exit ,
1085
1195
.cra_u .ablkcipher = {
1086
1196
.min_keysize = AES_MIN_KEY_SIZE ,
@@ -1262,18 +1372,21 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1262
1372
{
1263
1373
dd -> caps .has_dualbuff = 0 ;
1264
1374
dd -> caps .has_cfb64 = 0 ;
1375
+ dd -> caps .has_ctr32 = 0 ;
1265
1376
dd -> caps .max_burst_size = 1 ;
1266
1377
1267
1378
/* keep only major version number */
1268
1379
switch (dd -> hw_version & 0xff0 ) {
1269
1380
case 0x500 :
1270
1381
dd -> caps .has_dualbuff = 1 ;
1271
1382
dd -> caps .has_cfb64 = 1 ;
1383
+ dd -> caps .has_ctr32 = 1 ;
1272
1384
dd -> caps .max_burst_size = 4 ;
1273
1385
break ;
1274
1386
case 0x200 :
1275
1387
dd -> caps .has_dualbuff = 1 ;
1276
1388
dd -> caps .has_cfb64 = 1 ;
1389
+ dd -> caps .has_ctr32 = 1 ;
1277
1390
dd -> caps .max_burst_size = 4 ;
1278
1391
break ;
1279
1392
case 0x130 :
0 commit comments