Skip to content

Commit 199354d

Browse files
committed
crypto: caam - Remove GFP_DMA and add DMA alignment padding
GFP_DMA does not guarantee that the returned memory is aligned for DMA. It should be removed where it is superfluous. However, kmalloc may start returning DMA-unaligned memory in future so fix this by adding the alignment by hand. Signed-off-by: Herbert Xu <[email protected]>
1 parent c27b2d2 commit 199354d

File tree

13 files changed

+111
-71
lines changed

13 files changed

+111
-71
lines changed

drivers/crypto/caam/blob_gen.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
8383
output_len = info->input_len - CAAM_BLOB_OVERHEAD;
8484
}
8585

86-
desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL | GFP_DMA);
86+
desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL);
8787
if (!desc)
8888
return -ENOMEM;
8989

drivers/crypto/caam/caamalg.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,8 @@
5959
#include <crypto/engine.h>
6060
#include <crypto/xts.h>
6161
#include <asm/unaligned.h>
62+
#include <linux/dma-mapping.h>
63+
#include <linux/kernel.h>
6264

6365
/*
6466
* crypto alg
@@ -1379,8 +1381,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
13791381
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
13801382

13811383
/* allocate space for base edesc and hw desc commands, link tables */
1382-
edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1383-
GFP_DMA | flags);
1384+
edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags);
13841385
if (!edesc) {
13851386
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
13861387
0, 0, 0);
@@ -1608,6 +1609,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
16081609
u8 *iv;
16091610
int ivsize = crypto_skcipher_ivsize(skcipher);
16101611
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1612+
unsigned int aligned_size;
16111613

16121614
src_nents = sg_nents_for_len(req->src, req->cryptlen);
16131615
if (unlikely(src_nents < 0)) {
@@ -1681,15 +1683,18 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
16811683
/*
16821684
* allocate space for base edesc and hw desc commands, link tables, IV
16831685
*/
1684-
edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1685-
GFP_DMA | flags);
1686-
if (!edesc) {
1686+
aligned_size = ALIGN(ivsize, __alignof__(*edesc));
1687+
aligned_size += sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
1688+
aligned_size = ALIGN(aligned_size, dma_get_cache_alignment());
1689+
iv = kzalloc(aligned_size, flags);
1690+
if (!iv) {
16871691
dev_err(jrdev, "could not allocate extended descriptor\n");
16881692
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
16891693
0, 0, 0);
16901694
return ERR_PTR(-ENOMEM);
16911695
}
16921696

1697+
edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc)));
16931698
edesc->src_nents = src_nents;
16941699
edesc->dst_nents = dst_nents;
16951700
edesc->mapped_src_nents = mapped_src_nents;
@@ -1701,7 +1706,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
17011706

17021707
/* Make sure IV is located in a DMAable area */
17031708
if (ivsize) {
1704-
iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
17051709
memcpy(iv, req->iv, ivsize);
17061710

17071711
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);

drivers/crypto/caam/caamalg_qi.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@
2020
#include "caamalg_desc.h"
2121
#include <crypto/xts.h>
2222
#include <asm/unaligned.h>
23+
#include <linux/dma-mapping.h>
24+
#include <linux/kernel.h>
2325

2426
/*
2527
* crypto alg
@@ -959,7 +961,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
959961
return (struct aead_edesc *)drv_ctx;
960962

961963
/* allocate space for base edesc and hw desc commands, link tables */
962-
edesc = qi_cache_alloc(GFP_DMA | flags);
964+
edesc = qi_cache_alloc(flags);
963965
if (unlikely(!edesc)) {
964966
dev_err(qidev, "could not allocate extended descriptor\n");
965967
return ERR_PTR(-ENOMEM);
@@ -1317,8 +1319,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
13171319
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
13181320

13191321
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1320-
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1321-
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1322+
if (unlikely(ALIGN(ivsize, __alignof__(*edesc)) +
1323+
offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes >
1324+
CAAM_QI_MEMCACHE_SIZE)) {
13221325
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
13231326
qm_sg_ents, ivsize);
13241327
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1327,17 +1330,18 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
13271330
}
13281331

13291332
/* allocate space for base edesc, link tables and IV */
1330-
edesc = qi_cache_alloc(GFP_DMA | flags);
1331-
if (unlikely(!edesc)) {
1333+
iv = qi_cache_alloc(flags);
1334+
if (unlikely(!iv)) {
13321335
dev_err(qidev, "could not allocate extended descriptor\n");
13331336
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13341337
0, DMA_NONE, 0, 0);
13351338
return ERR_PTR(-ENOMEM);
13361339
}
13371340

1341+
edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc)));
1342+
13381343
/* Make sure IV is located in a DMAable area */
13391344
sg_table = &edesc->sgt[0];
1340-
iv = (u8 *)(sg_table + qm_sg_ents);
13411345
memcpy(iv, req->iv, ivsize);
13421346

13431347
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);

drivers/crypto/caam/caamalg_qi2.c

Lines changed: 29 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,9 @@
1616
#include "caamalg_desc.h"
1717
#include "caamhash_desc.h"
1818
#include "dpseci-debugfs.h"
19+
#include <linux/dma-mapping.h>
1920
#include <linux/fsl/mc.h>
21+
#include <linux/kernel.h>
2022
#include <soc/fsl/dpaa2-io.h>
2123
#include <soc/fsl/dpaa2-fd.h>
2224
#include <crypto/xts.h>
@@ -370,7 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
370372
struct dpaa2_sg_entry *sg_table;
371373

372374
/* allocate space for base edesc, link tables and IV */
373-
edesc = qi_cache_zalloc(GFP_DMA | flags);
375+
edesc = qi_cache_zalloc(flags);
374376
if (unlikely(!edesc)) {
375377
dev_err(dev, "could not allocate extended descriptor\n");
376378
return ERR_PTR(-ENOMEM);
@@ -1189,7 +1191,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
11891191
}
11901192

11911193
/* allocate space for base edesc, link tables and IV */
1192-
edesc = qi_cache_zalloc(GFP_DMA | flags);
1194+
edesc = qi_cache_zalloc(flags);
11931195
if (unlikely(!edesc)) {
11941196
dev_err(dev, "could not allocate extended descriptor\n");
11951197
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -3220,14 +3222,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
32203222
int ret = -ENOMEM;
32213223
struct dpaa2_fl_entry *in_fle, *out_fle;
32223224

3223-
req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3225+
req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL);
32243226
if (!req_ctx)
32253227
return -ENOMEM;
32263228

32273229
in_fle = &req_ctx->fd_flt[1];
32283230
out_fle = &req_ctx->fd_flt[0];
32293231

3230-
flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3232+
flc = kzalloc(sizeof(*flc), GFP_KERNEL);
32313233
if (!flc)
32323234
goto err_flc;
32333235

@@ -3316,7 +3318,13 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
33163318
dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
33173319

33183320
if (keylen > blocksize) {
3319-
hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3321+
unsigned int aligned_len =
3322+
ALIGN(keylen, dma_get_cache_alignment());
3323+
3324+
if (aligned_len < keylen)
3325+
return -EOVERFLOW;
3326+
3327+
hashed_key = kmemdup(key, aligned_len, GFP_KERNEL);
33203328
if (!hashed_key)
33213329
return -ENOMEM;
33223330
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
@@ -3560,7 +3568,7 @@ static int ahash_update_ctx(struct ahash_request *req)
35603568
}
35613569

35623570
/* allocate space for base edesc and link tables */
3563-
edesc = qi_cache_zalloc(GFP_DMA | flags);
3571+
edesc = qi_cache_zalloc(flags);
35643572
if (!edesc) {
35653573
dma_unmap_sg(ctx->dev, req->src, src_nents,
35663574
DMA_TO_DEVICE);
@@ -3654,7 +3662,7 @@ static int ahash_final_ctx(struct ahash_request *req)
36543662
int ret;
36553663

36563664
/* allocate space for base edesc and link tables */
3657-
edesc = qi_cache_zalloc(GFP_DMA | flags);
3665+
edesc = qi_cache_zalloc(flags);
36583666
if (!edesc)
36593667
return -ENOMEM;
36603668

@@ -3743,7 +3751,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
37433751
}
37443752

37453753
/* allocate space for base edesc and link tables */
3746-
edesc = qi_cache_zalloc(GFP_DMA | flags);
3754+
edesc = qi_cache_zalloc(flags);
37473755
if (!edesc) {
37483756
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
37493757
return -ENOMEM;
@@ -3836,7 +3844,7 @@ static int ahash_digest(struct ahash_request *req)
38363844
}
38373845

38383846
/* allocate space for base edesc and link tables */
3839-
edesc = qi_cache_zalloc(GFP_DMA | flags);
3847+
edesc = qi_cache_zalloc(flags);
38403848
if (!edesc) {
38413849
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
38423850
return ret;
@@ -3913,7 +3921,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
39133921
int ret = -ENOMEM;
39143922

39153923
/* allocate space for base edesc and link tables */
3916-
edesc = qi_cache_zalloc(GFP_DMA | flags);
3924+
edesc = qi_cache_zalloc(flags);
39173925
if (!edesc)
39183926
return ret;
39193927

@@ -4012,7 +4020,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
40124020
}
40134021

40144022
/* allocate space for base edesc and link tables */
4015-
edesc = qi_cache_zalloc(GFP_DMA | flags);
4023+
edesc = qi_cache_zalloc(flags);
40164024
if (!edesc) {
40174025
dma_unmap_sg(ctx->dev, req->src, src_nents,
40184026
DMA_TO_DEVICE);
@@ -4125,7 +4133,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
41254133
}
41264134

41274135
/* allocate space for base edesc and link tables */
4128-
edesc = qi_cache_zalloc(GFP_DMA | flags);
4136+
edesc = qi_cache_zalloc(flags);
41294137
if (!edesc) {
41304138
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
41314139
return ret;
@@ -4230,7 +4238,7 @@ static int ahash_update_first(struct ahash_request *req)
42304238
}
42314239

42324240
/* allocate space for base edesc and link tables */
4233-
edesc = qi_cache_zalloc(GFP_DMA | flags);
4241+
edesc = qi_cache_zalloc(flags);
42344242
if (!edesc) {
42354243
dma_unmap_sg(ctx->dev, req->src, src_nents,
42364244
DMA_TO_DEVICE);
@@ -4926,6 +4934,7 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
49264934
{
49274935
struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
49284936
struct device *dev = priv->dev;
4937+
unsigned int alignmask;
49294938
int err;
49304939

49314940
/*
@@ -4936,13 +4945,14 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
49364945
!(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
49374946
return 0;
49384947

4939-
priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4940-
GFP_KERNEL | GFP_DMA);
4948+
alignmask = DPAA2_CSCN_ALIGN - 1;
4949+
alignmask |= dma_get_cache_alignment() - 1;
4950+
priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1),
4951+
GFP_KERNEL);
49414952
if (!priv->cscn_mem)
49424953
return -ENOMEM;
49434954

4944-
priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4945-
priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4955+
priv->cscn_dma = dma_map_single(dev, priv->cscn_mem,
49464956
DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
49474957
if (dma_mapping_error(dev, priv->cscn_dma)) {
49484958
dev_err(dev, "Error mapping CSCN memory area\n");
@@ -5174,7 +5184,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
51745184
priv->domain = iommu_get_domain_for_dev(dev);
51755185

51765186
qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5177-
0, SLAB_CACHE_DMA, NULL);
5187+
0, 0, NULL);
51785188
if (!qi_cache) {
51795189
dev_err(dev, "Can't allocate SEC cache\n");
51805190
return -ENOMEM;
@@ -5451,7 +5461,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
54515461
dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
54525462
DPAA2_CSCN_SIZE,
54535463
DMA_FROM_DEVICE);
5454-
if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5464+
if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
54555465
dev_dbg_ratelimited(dev, "Dropping request\n");
54565466
return -EBUSY;
54575467
}

drivers/crypto/caam/caamalg_qi2.h

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,14 @@
77
#ifndef _CAAMALG_QI2_H_
88
#define _CAAMALG_QI2_H_
99

10+
#include <crypto/internal/skcipher.h>
11+
#include <linux/compiler_attributes.h>
1012
#include <soc/fsl/dpaa2-io.h>
1113
#include <soc/fsl/dpaa2-fd.h>
1214
#include <linux/threads.h>
1315
#include <linux/netdevice.h>
1416
#include "dpseci.h"
1517
#include "desc_constr.h"
16-
#include <crypto/skcipher.h>
1718

1819
#define DPAA2_CAAM_STORE_SIZE 16
1920
/* NAPI weight *must* be a multiple of the store size. */
@@ -36,8 +37,6 @@
3637
* @tx_queue_attr: array of Tx queue attributes
3738
* @cscn_mem: pointer to memory region containing the congestion SCN
3839
* it's size is larger than to accommodate alignment
39-
* @cscn_mem_aligned: pointer to congestion SCN; it is computed as
40-
* PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
4140
* @cscn_dma: dma address used by the QMAN to write CSCN messages
4241
* @dev: device associated with the DPSECI object
4342
* @mc_io: pointer to MC portal's I/O object
@@ -58,7 +57,6 @@ struct dpaa2_caam_priv {
5857

5958
/* congestion */
6059
void *cscn_mem;
61-
void *cscn_mem_aligned;
6260
dma_addr_t cscn_dma;
6361

6462
struct device *dev;
@@ -158,7 +156,7 @@ struct ahash_edesc {
158156
struct caam_flc {
159157
u32 flc[16];
160158
u32 sh_desc[MAX_SDLEN];
161-
} ____cacheline_aligned;
159+
} __aligned(CRYPTO_DMA_ALIGN);
162160

163161
enum optype {
164162
ENCRYPT = 0,
@@ -180,7 +178,7 @@ enum optype {
180178
* @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
181179
*/
182180
struct caam_request {
183-
struct dpaa2_fl_entry fd_flt[2];
181+
struct dpaa2_fl_entry fd_flt[2] __aligned(CRYPTO_DMA_ALIGN);
184182
dma_addr_t fd_flt_dma;
185183
struct caam_flc *flc;
186184
dma_addr_t flc_dma;

drivers/crypto/caam/caamhash.c

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,8 @@
6666
#include "key_gen.h"
6767
#include "caamhash_desc.h"
6868
#include <crypto/engine.h>
69+
#include <linux/dma-mapping.h>
70+
#include <linux/kernel.h>
6971

7072
#define CAAM_CRA_PRIORITY 3000
7173

@@ -365,7 +367,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
365367
dma_addr_t key_dma;
366368
int ret;
367369

368-
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
370+
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
369371
if (!desc) {
370372
dev_err(jrdev, "unable to allocate key input memory\n");
371373
return -ENOMEM;
@@ -432,7 +434,13 @@ static int ahash_setkey(struct crypto_ahash *ahash,
432434
dev_dbg(jrdev, "keylen %d\n", keylen);
433435

434436
if (keylen > blocksize) {
435-
hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
437+
unsigned int aligned_len =
438+
ALIGN(keylen, dma_get_cache_alignment());
439+
440+
if (aligned_len < keylen)
441+
return -EOVERFLOW;
442+
443+
hashed_key = kmemdup(key, keylen, GFP_KERNEL);
436444
if (!hashed_key)
437445
return -ENOMEM;
438446
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
@@ -702,7 +710,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
702710
struct ahash_edesc *edesc;
703711
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
704712

705-
edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
713+
edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
706714
if (!edesc) {
707715
dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
708716
return NULL;

0 commit comments

Comments
 (0)