@@ -87,6 +87,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
87
87
{
88
88
unsigned int nents = 0 ;
89
89
90
+ * lbytes = 0 ;
91
+
90
92
while (nbytes && sg_list ) {
91
93
nents ++ ;
92
94
/* get the number of bytes in the last entry */
@@ -95,6 +97,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
95
97
nbytes : sg_list -> length ;
96
98
sg_list = sg_next (sg_list );
97
99
}
100
+
98
101
dev_dbg (dev , "nents %d last bytes %d\n" , nents , * lbytes );
99
102
return nents ;
100
103
}
@@ -290,37 +293,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
290
293
unsigned int nbytes , int direction , u32 * nents ,
291
294
u32 max_sg_nents , u32 * lbytes , u32 * mapped_nents )
292
295
{
293
- if (sg_is_last (sg )) {
294
- /* One entry only case -set to DLLI */
295
- if (dma_map_sg (dev , sg , 1 , direction ) != 1 ) {
296
- dev_err (dev , "dma_map_sg() single buffer failed\n" );
297
- return - ENOMEM ;
298
- }
299
- dev_dbg (dev , "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n" ,
300
- & sg_dma_address (sg ), sg_page (sg ), sg_virt (sg ),
301
- sg -> offset , sg -> length );
302
- * lbytes = nbytes ;
303
- * nents = 1 ;
304
- * mapped_nents = 1 ;
305
- } else { /*sg_is_last*/
306
- * nents = cc_get_sgl_nents (dev , sg , nbytes , lbytes );
307
- if (* nents > max_sg_nents ) {
308
- * nents = 0 ;
309
- dev_err (dev , "Too many fragments. current %d max %d\n" ,
310
- * nents , max_sg_nents );
311
- return - ENOMEM ;
312
- }
313
- /* In case of mmu the number of mapped nents might
314
- * be changed from the original sgl nents
315
- */
316
- * mapped_nents = dma_map_sg (dev , sg , * nents , direction );
317
- if (* mapped_nents == 0 ) {
318
- * nents = 0 ;
319
- dev_err (dev , "dma_map_sg() sg buffer failed\n" );
320
- return - ENOMEM ;
321
- }
296
+ int ret = 0 ;
297
+
298
+ * nents = cc_get_sgl_nents (dev , sg , nbytes , lbytes );
299
+ if (* nents > max_sg_nents ) {
300
+ * nents = 0 ;
301
+ dev_err (dev , "Too many fragments. current %d max %d\n" ,
302
+ * nents , max_sg_nents );
303
+ return - ENOMEM ;
322
304
}
323
305
306
+ ret = dma_map_sg (dev , sg , * nents , direction );
307
+ if (dma_mapping_error (dev , ret )) {
308
+ * nents = 0 ;
309
+ dev_err (dev , "dma_map_sg() sg buffer failed %d\n" , ret );
310
+ return - ENOMEM ;
311
+ }
312
+
313
+ * mapped_nents = ret ;
314
+
324
315
return 0 ;
325
316
}
326
317
@@ -555,11 +546,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
555
546
sg_virt (req -> src ), areq_ctx -> src .nents , areq_ctx -> assoc .nents ,
556
547
areq_ctx -> assoclen , req -> cryptlen );
557
548
558
- dma_unmap_sg (dev , req -> src , sg_nents (req -> src ), DMA_BIDIRECTIONAL );
549
+ dma_unmap_sg (dev , req -> src , areq_ctx -> src .mapped_nents ,
550
+ DMA_BIDIRECTIONAL );
559
551
if (req -> src != req -> dst ) {
560
552
dev_dbg (dev , "Unmapping dst sgl: req->dst=%pK\n" ,
561
553
sg_virt (req -> dst ));
562
- dma_unmap_sg (dev , req -> dst , sg_nents ( req -> dst ) ,
554
+ dma_unmap_sg (dev , req -> dst , areq_ctx -> dst . mapped_nents ,
563
555
DMA_BIDIRECTIONAL );
564
556
}
565
557
if (drvdata -> coherent &&
@@ -881,7 +873,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
881
873
& src_last_bytes );
882
874
sg_index = areq_ctx -> src_sgl -> length ;
883
875
//check where the data starts
884
- while (sg_index <= size_to_skip ) {
876
+ while (src_mapped_nents && ( sg_index <= size_to_skip ) ) {
885
877
src_mapped_nents -- ;
886
878
offset -= areq_ctx -> src_sgl -> length ;
887
879
sgl = sg_next (areq_ctx -> src_sgl );
@@ -908,7 +900,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
908
900
size_for_map += crypto_aead_ivsize (tfm );
909
901
910
902
rc = cc_map_sg (dev , req -> dst , size_for_map , DMA_BIDIRECTIONAL ,
911
- & areq_ctx -> dst .nents ,
903
+ & areq_ctx -> dst .mapped_nents ,
912
904
LLI_MAX_NUM_OF_DATA_ENTRIES , & dst_last_bytes ,
913
905
& dst_mapped_nents );
914
906
if (rc )
@@ -921,7 +913,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
921
913
offset = size_to_skip ;
922
914
923
915
//check where the data starts
924
- while (sg_index <= size_to_skip ) {
916
+ while (dst_mapped_nents && sg_index <= size_to_skip ) {
925
917
dst_mapped_nents -- ;
926
918
offset -= areq_ctx -> dst_sgl -> length ;
927
919
sgl = sg_next (areq_ctx -> dst_sgl );
@@ -1123,7 +1115,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1123
1115
if (is_gcm4543 )
1124
1116
size_to_map += crypto_aead_ivsize (tfm );
1125
1117
rc = cc_map_sg (dev , req -> src , size_to_map , DMA_BIDIRECTIONAL ,
1126
- & areq_ctx -> src .nents ,
1118
+ & areq_ctx -> src .mapped_nents ,
1127
1119
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1128
1120
LLI_MAX_NUM_OF_DATA_ENTRIES ),
1129
1121
& dummy , & mapped_nents );
0 commit comments