Skip to content

Commit dadf5e5

Browse files
ebiggersherbertx
authored andcommitted
crypto: adiantum - add fast path for single-page messages
When the source scatterlist is a single page, optimize the first hash step of adiantum to use crypto_shash_digest() instead of init/update/final, and use the same local kmap for both hashing the bulk part and loading the narrow part of the source data. Likewise, when the destination scatterlist is a single page, optimize the second hash step of adiantum to use crypto_shash_digest() instead of init/update/final, and use the same local kmap for both hashing the bulk part and storing the narrow part of the destination data. In some cases these optimizations improve performance significantly. Note: ideally, for optimal performance each architecture should implement the full "adiantum(xchacha12,aes)" algorithm and fully optimize the contiguous buffer case to use no indirect calls. That's not something I've gotten around to doing, though. This commit just makes a relatively small change that provides some benefit with the existing template-based approach. Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent 01aed66 commit dadf5e5

File tree

1 file changed

+47
-18
lines changed

1 file changed

+47
-18
lines changed

crypto/adiantum.c

Lines changed: 47 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -245,25 +245,21 @@ static void adiantum_hash_header(struct skcipher_request *req)
245245

246246
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
247247
static int adiantum_hash_message(struct skcipher_request *req,
248-
struct scatterlist *sgl, le128 *digest)
248+
struct scatterlist *sgl, unsigned int nents,
249+
le128 *digest)
249250
{
250-
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
251-
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
252251
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
253252
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
254253
struct shash_desc *hash_desc = &rctx->u.hash_desc;
255254
struct sg_mapping_iter miter;
256255
unsigned int i, n;
257256
int err;
258257

259-
hash_desc->tfm = tctx->hash;
260-
261258
err = crypto_shash_init(hash_desc);
262259
if (err)
263260
return err;
264261

265-
sg_miter_start(&miter, sgl, sg_nents(sgl),
266-
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
262+
sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
267263
for (i = 0; i < bulk_len; i += n) {
268264
sg_miter_next(&miter);
269265
n = min_t(unsigned int, miter.length, bulk_len - i);
@@ -285,6 +281,8 @@ static int adiantum_finish(struct skcipher_request *req)
285281
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
286282
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
287283
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
284+
struct scatterlist *dst = req->dst;
285+
const unsigned int dst_nents = sg_nents(dst);
288286
le128 digest;
289287
int err;
290288

@@ -298,13 +296,30 @@ static int adiantum_finish(struct skcipher_request *req)
298296
* enc: C_R = C_M - H_{K_H}(T, C_L)
299297
* dec: P_R = P_M - H_{K_H}(T, P_L)
300298
*/
301-
err = adiantum_hash_message(req, req->dst, &digest);
302-
if (err)
303-
return err;
304-
le128_add(&digest, &digest, &rctx->header_hash);
305-
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
306-
scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
307-
bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
299+
rctx->u.hash_desc.tfm = tctx->hash;
300+
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
301+
if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
302+
/* Fast path for single-page destination */
303+
void *virt = kmap_local_page(sg_page(dst)) + dst->offset;
304+
305+
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
306+
(u8 *)&digest);
307+
if (err) {
308+
kunmap_local(virt);
309+
return err;
310+
}
311+
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
312+
memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
313+
kunmap_local(virt);
314+
} else {
315+
/* Slow path that works for any destination scatterlist */
316+
err = adiantum_hash_message(req, dst, dst_nents, &digest);
317+
if (err)
318+
return err;
319+
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
320+
scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
321+
bulk_len, sizeof(le128), 1);
322+
}
308323
return 0;
309324
}
310325

@@ -324,6 +339,8 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
324339
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
325340
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
326341
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
342+
struct scatterlist *src = req->src;
343+
const unsigned int src_nents = sg_nents(src);
327344
unsigned int stream_len;
328345
le128 digest;
329346
int err;
@@ -339,12 +356,24 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
339356
* dec: C_M = C_R + H_{K_H}(T, C_L)
340357
*/
341358
adiantum_hash_header(req);
342-
err = adiantum_hash_message(req, req->src, &digest);
359+
rctx->u.hash_desc.tfm = tctx->hash;
360+
if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
361+
/* Fast path for single-page source */
362+
void *virt = kmap_local_page(sg_page(src)) + src->offset;
363+
364+
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
365+
(u8 *)&digest);
366+
memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
367+
kunmap_local(virt);
368+
} else {
369+
/* Slow path that works for any source scatterlist */
370+
err = adiantum_hash_message(req, src, src_nents, &digest);
371+
scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
372+
bulk_len, sizeof(le128), 0);
373+
}
343374
if (err)
344375
return err;
345-
le128_add(&digest, &digest, &rctx->header_hash);
346-
scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
347-
bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
376+
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
348377
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
349378

350379
/* If encrypting, encrypt P_M with the block cipher to get C_M */

0 commit comments

Comments
 (0)