Skip to content

Commit 53a5d5d

Browse files
committed
crypto: echainiv - Replace chaining with multiplication
The current implementation uses a global per-cpu array to store data which are used to derive the next IV. This is insecure as the attacker may change the stored data. This patch removes all traces of chaining and replaces it with multiplication of the salt and the sequence number. Fixes: a10f554 ("crypto: echainiv - Add encrypted chain IV...") Cc: [email protected] Reported-by: Mathias Krause <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent 0bd2223 commit 53a5d5d

File tree

1 file changed

+24
-91
lines changed

1 file changed

+24
-91
lines changed

crypto/echainiv.c

Lines changed: 24 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
/*
22
* echainiv: Encrypted Chain IV Generator
33
*
4-
* This generator generates an IV based on a sequence number by xoring it
5-
* with a salt and then encrypting it with the same key as used to encrypt
4+
* This generator generates an IV based on a sequence number by multiplying
5+
* it with a salt and then encrypting it with the same key as used to encrypt
66
* the plain text. This algorithm requires that the block size be equal
77
* to the IV size. It is mainly useful for CBC.
88
*
@@ -24,81 +24,17 @@
2424
#include <linux/err.h>
2525
#include <linux/init.h>
2626
#include <linux/kernel.h>
27-
#include <linux/mm.h>
2827
#include <linux/module.h>
29-
#include <linux/percpu.h>
30-
#include <linux/spinlock.h>
28+
#include <linux/slab.h>
3129
#include <linux/string.h>
3230

33-
#define MAX_IV_SIZE 16
34-
35-
static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
36-
37-
/* We don't care if we get preempted and read/write IVs from the next CPU. */
38-
static void echainiv_read_iv(u8 *dst, unsigned size)
39-
{
40-
u32 *a = (u32 *)dst;
41-
u32 __percpu *b = echainiv_iv;
42-
43-
for (; size >= 4; size -= 4) {
44-
*a++ = this_cpu_read(*b);
45-
b++;
46-
}
47-
}
48-
49-
static void echainiv_write_iv(const u8 *src, unsigned size)
50-
{
51-
const u32 *a = (const u32 *)src;
52-
u32 __percpu *b = echainiv_iv;
53-
54-
for (; size >= 4; size -= 4) {
55-
this_cpu_write(*b, *a);
56-
a++;
57-
b++;
58-
}
59-
}
60-
61-
static void echainiv_encrypt_complete2(struct aead_request *req, int err)
62-
{
63-
struct aead_request *subreq = aead_request_ctx(req);
64-
struct crypto_aead *geniv;
65-
unsigned int ivsize;
66-
67-
if (err == -EINPROGRESS)
68-
return;
69-
70-
if (err)
71-
goto out;
72-
73-
geniv = crypto_aead_reqtfm(req);
74-
ivsize = crypto_aead_ivsize(geniv);
75-
76-
echainiv_write_iv(subreq->iv, ivsize);
77-
78-
if (req->iv != subreq->iv)
79-
memcpy(req->iv, subreq->iv, ivsize);
80-
81-
out:
82-
if (req->iv != subreq->iv)
83-
kzfree(subreq->iv);
84-
}
85-
86-
static void echainiv_encrypt_complete(struct crypto_async_request *base,
87-
int err)
88-
{
89-
struct aead_request *req = base->data;
90-
91-
echainiv_encrypt_complete2(req, err);
92-
aead_request_complete(req, err);
93-
}
94-
9531
static int echainiv_encrypt(struct aead_request *req)
9632
{
9733
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
9834
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
9935
struct aead_request *subreq = aead_request_ctx(req);
100-
crypto_completion_t compl;
101-
void *data;
36+
__be64 nseqno;
37+
u64 seqno;
10238
u8 *info;
10339
unsigned int ivsize = crypto_aead_ivsize(geniv);
10440
int err;
@@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
10844

10945
aead_request_set_tfm(subreq, ctx->child);
11046

111-
compl = echainiv_encrypt_complete;
112-
data = req;
11347
info = req->iv;
11448

11549
if (req->src != req->dst) {
@@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
12761
return err;
12862
}
12963

130-
if (unlikely(!IS_ALIGNED((unsigned long)info,
131-
crypto_aead_alignmask(geniv) + 1))) {
132-
info = kmalloc(ivsize, req->base.flags &
133-
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
134-
GFP_ATOMIC);
135-
if (!info)
136-
return -ENOMEM;
137-
138-
memcpy(info, req->iv, ivsize);
139-
}
140-
141-
aead_request_set_callback(subreq, req->base.flags, compl, data);
64+
aead_request_set_callback(subreq, req->base.flags,
65+
req->base.complete, req->base.data);
14266
aead_request_set_crypt(subreq, req->dst, req->dst,
14367
req->cryptlen, info);
14468
aead_request_set_ad(subreq, req->assoclen);
14569

146-
crypto_xor(info, ctx->salt, ivsize);
70+
memcpy(&nseqno, info + ivsize - 8, 8);
71+
seqno = be64_to_cpu(nseqno);
72+
memset(info, 0, ivsize);
73+
14774
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
148-
echainiv_read_iv(info, ivsize);
14975

150-
err = crypto_aead_encrypt(subreq);
151-
echainiv_encrypt_complete2(req, err);
152-
return err;
76+
do {
77+
u64 a;
78+
79+
memcpy(&a, ctx->salt + ivsize - 8, 8);
80+
81+
a |= 1;
82+
a *= seqno;
83+
84+
memcpy(info + ivsize - 8, &a, 8);
85+
} while ((ivsize -= 8));
86+
87+
return crypto_aead_encrypt(subreq);
15388
}
15489

15590
static int echainiv_decrypt(struct aead_request *req)
@@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
196131
alg = crypto_spawn_aead_alg(spawn);
197132

198133
err = -EINVAL;
199-
if (inst->alg.ivsize & (sizeof(u32) - 1) ||
200-
inst->alg.ivsize > MAX_IV_SIZE)
134+
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
201135
goto free_inst;
202136

203137
inst->alg.encrypt = echainiv_encrypt;
@@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
206140
inst->alg.init = aead_init_geniv;
207141
inst->alg.exit = aead_exit_geniv;
208142

209-
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
210143
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
211144
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
212145

0 commit comments

Comments
 (0)