Skip to content

Commit e37a7e5

Browse files
Cyrille Pitchenherbertx
authored andcommitted
crypto: atmel-aes - create sections to regroup functions by usage
This patch only creates sections to regroup functions by usage. This will help to integrate the GCM support patch later by making the difference between shared/common and specific code. Hence current sections are: - Shared functions: common code which will be reused by the GCM support. - CPU transfer: handles transfers monitored by the CPU (PIO accesses). - DMA transfer: handles transfers monitored by the DMA controller. - AES async block ciphers: dedicated to the already supported block ciphers - Probe functions: used to register all crypto algorithms. Signed-off-by: Cyrille Pitchen <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent afbac17 commit e37a7e5

File tree

1 file changed

+108
-102
lines changed

1 file changed

+108
-102
lines changed

drivers/crypto/atmel-aes.c

Lines changed: 108 additions & 102 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,7 @@ static struct atmel_aes_drv atmel_aes = {
166166
.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
167167
};
168168

169+
/* Shared functions */
169170

170171
static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
171172
{
@@ -302,6 +303,38 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
302303
return err;
303304
}
304305

306+
static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
307+
const u32 *iv)
308+
{
309+
u32 valmr = 0;
310+
311+
/* MR register must be set before IV registers */
312+
if (dd->ctx->keylen == AES_KEYSIZE_128)
313+
valmr |= AES_MR_KEYSIZE_128;
314+
else if (dd->ctx->keylen == AES_KEYSIZE_192)
315+
valmr |= AES_MR_KEYSIZE_192;
316+
else
317+
valmr |= AES_MR_KEYSIZE_256;
318+
319+
valmr |= dd->flags & AES_FLAGS_MODE_MASK;
320+
321+
if (use_dma) {
322+
valmr |= AES_MR_SMOD_IDATAR0;
323+
if (dd->caps.has_dualbuff)
324+
valmr |= AES_MR_DUALBUFF;
325+
} else {
326+
valmr |= AES_MR_SMOD_AUTO;
327+
}
328+
329+
atmel_aes_write(dd, AES_MR, valmr);
330+
331+
atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
332+
SIZE_IN_WORDS(dd->ctx->keylen));
333+
334+
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
335+
atmel_aes_write_block(dd, AES_IVR(0), iv);
336+
}
337+
305338

306339
/* CPU transfer */
307340

@@ -661,38 +694,6 @@ static void atmel_aes_dma_callback(void *data)
661694
(void)dd->resume(dd);
662695
}
663696

664-
static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
665-
const u32 *iv)
666-
{
667-
u32 valmr = 0;
668-
669-
/* MR register must be set before IV registers */
670-
if (dd->ctx->keylen == AES_KEYSIZE_128)
671-
valmr |= AES_MR_KEYSIZE_128;
672-
else if (dd->ctx->keylen == AES_KEYSIZE_192)
673-
valmr |= AES_MR_KEYSIZE_192;
674-
else
675-
valmr |= AES_MR_KEYSIZE_256;
676-
677-
valmr |= dd->flags & AES_FLAGS_MODE_MASK;
678-
679-
if (use_dma) {
680-
valmr |= AES_MR_SMOD_IDATAR0;
681-
if (dd->caps.has_dualbuff)
682-
valmr |= AES_MR_DUALBUFF;
683-
} else {
684-
valmr |= AES_MR_SMOD_AUTO;
685-
}
686-
687-
atmel_aes_write(dd, AES_MR, valmr);
688-
689-
atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
690-
SIZE_IN_WORDS(dd->ctx->keylen));
691-
692-
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
693-
atmel_aes_write_block(dd, AES_IVR(0), iv);
694-
}
695-
696697
static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
697698
struct crypto_async_request *new_areq)
698699
{
@@ -730,6 +731,9 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
730731
return (dd->is_async) ? ret : err;
731732
}
732733

734+
735+
/* AES async block ciphers */
736+
733737
static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
734738
{
735739
return atmel_aes_complete(dd, 0);
@@ -758,26 +762,6 @@ static int atmel_aes_start(struct atmel_aes_dev *dd)
758762
atmel_aes_transfer_complete);
759763
}
760764

761-
762-
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
763-
{
764-
dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
765-
dd->buflen = ATMEL_AES_BUFFER_SIZE;
766-
dd->buflen &= ~(AES_BLOCK_SIZE - 1);
767-
768-
if (!dd->buf) {
769-
dev_err(dd->dev, "unable to alloc pages.\n");
770-
return -ENOMEM;
771-
}
772-
773-
return 0;
774-
}
775-
776-
static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
777-
{
778-
free_page((unsigned long)dd->buf);
779-
}
780-
781765
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
782766
{
783767
struct atmel_aes_base_ctx *ctx;
@@ -817,56 +801,6 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
817801
return atmel_aes_handle_queue(dd, &req->base);
818802
}
819803

820-
static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
821-
{
822-
struct at_dma_slave *sl = slave;
823-
824-
if (sl && sl->dma_dev == chan->device->dev) {
825-
chan->private = sl;
826-
return true;
827-
} else {
828-
return false;
829-
}
830-
}
831-
832-
static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
833-
struct crypto_platform_data *pdata)
834-
{
835-
struct at_dma_slave *slave;
836-
int err = -ENOMEM;
837-
dma_cap_mask_t mask;
838-
839-
dma_cap_zero(mask);
840-
dma_cap_set(DMA_SLAVE, mask);
841-
842-
/* Try to grab 2 DMA channels */
843-
slave = &pdata->dma_slave->rxdata;
844-
dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
845-
slave, dd->dev, "tx");
846-
if (!dd->src.chan)
847-
goto err_dma_in;
848-
849-
slave = &pdata->dma_slave->txdata;
850-
dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
851-
slave, dd->dev, "rx");
852-
if (!dd->dst.chan)
853-
goto err_dma_out;
854-
855-
return 0;
856-
857-
err_dma_out:
858-
dma_release_channel(dd->src.chan);
859-
err_dma_in:
860-
dev_warn(dd->dev, "no DMA channel available\n");
861-
return err;
862-
}
863-
864-
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
865-
{
866-
dma_release_channel(dd->dst.chan);
867-
dma_release_channel(dd->src.chan);
868-
}
869-
870804
static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
871805
unsigned int keylen)
872806
{
@@ -1181,6 +1115,78 @@ static struct crypto_alg aes_cfb64_alg = {
11811115
}
11821116
};
11831117

1118+
1119+
/* Probe functions */
1120+
1121+
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
1122+
{
1123+
dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
1124+
dd->buflen = ATMEL_AES_BUFFER_SIZE;
1125+
dd->buflen &= ~(AES_BLOCK_SIZE - 1);
1126+
1127+
if (!dd->buf) {
1128+
dev_err(dd->dev, "unable to alloc pages.\n");
1129+
return -ENOMEM;
1130+
}
1131+
1132+
return 0;
1133+
}
1134+
1135+
static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
1136+
{
1137+
free_page((unsigned long)dd->buf);
1138+
}
1139+
1140+
static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
1141+
{
1142+
struct at_dma_slave *sl = slave;
1143+
1144+
if (sl && sl->dma_dev == chan->device->dev) {
1145+
chan->private = sl;
1146+
return true;
1147+
} else {
1148+
return false;
1149+
}
1150+
}
1151+
1152+
static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
1153+
struct crypto_platform_data *pdata)
1154+
{
1155+
struct at_dma_slave *slave;
1156+
int err = -ENOMEM;
1157+
dma_cap_mask_t mask;
1158+
1159+
dma_cap_zero(mask);
1160+
dma_cap_set(DMA_SLAVE, mask);
1161+
1162+
/* Try to grab 2 DMA channels */
1163+
slave = &pdata->dma_slave->rxdata;
1164+
dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1165+
slave, dd->dev, "tx");
1166+
if (!dd->src.chan)
1167+
goto err_dma_in;
1168+
1169+
slave = &pdata->dma_slave->txdata;
1170+
dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1171+
slave, dd->dev, "rx");
1172+
if (!dd->dst.chan)
1173+
goto err_dma_out;
1174+
1175+
return 0;
1176+
1177+
err_dma_out:
1178+
dma_release_channel(dd->src.chan);
1179+
err_dma_in:
1180+
dev_warn(dd->dev, "no DMA channel available\n");
1181+
return err;
1182+
}
1183+
1184+
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
1185+
{
1186+
dma_release_channel(dd->dst.chan);
1187+
dma_release_channel(dd->src.chan);
1188+
}
1189+
11841190
static void atmel_aes_queue_task(unsigned long data)
11851191
{
11861192
struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;

0 commit comments

Comments
 (0)