Skip to content

Commit b20209c

Browse files
Junaid Shahidherbertx
authored andcommitted
crypto: aesni - Fix out-of-bounds access of the data buffer in generic-gcm-aesni
The aesni_gcm_enc/dec functions can access memory before the start of the data buffer if the length of the data buffer is less than 16 bytes. This is because they perform the read via a single 16-byte load. This can potentially result in accessing a page that is not mapped and thus causing the machine to crash. This patch fixes that by reading the partial block byte-by-byte and optionally an via 8-byte load if the block was at least 8 bytes. Fixes: 0487cca ("crypto: aesni - make non-AVX AES-GCM work with any aadlen") Cc: <[email protected]> Signed-off-by: Junaid Shahid <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent 02d9e32 commit b20209c

File tree

1 file changed

+45
-42
lines changed

1 file changed

+45
-42
lines changed

arch/x86/crypto/aesni-intel_asm.S

Lines changed: 45 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -256,6 +256,37 @@ aad_shift_arr:
256256
pxor \TMP1, \GH # result is in TMP1
257257
.endm
258258

259+
# Reads DLEN bytes starting at DPTR and stores in XMMDst
260+
# where 0 < DLEN < 16
261+
# Clobbers %rax, DLEN and XMM1
262+
.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
263+
cmp $8, \DLEN
264+
jl _read_lt8_\@
265+
mov (\DPTR), %rax
266+
MOVQ_R64_XMM %rax, \XMMDst
267+
sub $8, \DLEN
268+
jz _done_read_partial_block_\@
269+
xor %eax, %eax
270+
_read_next_byte_\@:
271+
shl $8, %rax
272+
mov 7(\DPTR, \DLEN, 1), %al
273+
dec \DLEN
274+
jnz _read_next_byte_\@
275+
MOVQ_R64_XMM %rax, \XMM1
276+
pslldq $8, \XMM1
277+
por \XMM1, \XMMDst
278+
jmp _done_read_partial_block_\@
279+
_read_lt8_\@:
280+
xor %eax, %eax
281+
_read_next_byte_lt8_\@:
282+
shl $8, %rax
283+
mov -1(\DPTR, \DLEN, 1), %al
284+
dec \DLEN
285+
jnz _read_next_byte_lt8_\@
286+
MOVQ_R64_XMM %rax, \XMMDst
287+
_done_read_partial_block_\@:
288+
.endm
289+
259290
/*
260291
* if a = number of total plaintext bytes
261292
* b = floor(a/16)
@@ -1385,14 +1416,6 @@ _esb_loop_\@:
13851416
*
13861417
* AAD Format with 64-bit Extended Sequence Number
13871418
*
1388-
* aadLen:
1389-
* from the definition of the spec, aadLen can only be 8 or 12 bytes.
1390-
* The code supports 16 too but for other sizes, the code will fail.
1391-
*
1392-
* TLen:
1393-
* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
1394-
* For other sizes, the code will fail.
1395-
*
13961419
* poly = x^128 + x^127 + x^126 + x^121 + 1
13971420
*
13981421
*****************************************************************************/
@@ -1486,19 +1509,16 @@ _zero_cipher_left_decrypt:
14861509
PSHUFB_XMM %xmm10, %xmm0
14871510

14881511
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
1489-
sub $16, %r11
1490-
add %r13, %r11
1491-
movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
1492-
lea SHIFT_MASK+16(%rip), %r12
1493-
sub %r13, %r12
1494-
# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
1495-
# (%r13 is the number of bytes in plaintext mod 16)
1496-
movdqu (%r12), %xmm2 # get the appropriate shuffle mask
1497-
PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
14981512

1513+
lea (%arg3,%r11,1), %r10
1514+
mov %r13, %r12
1515+
READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
1516+
1517+
lea ALL_F+16(%rip), %r12
1518+
sub %r13, %r12
14991519
movdqa %xmm1, %xmm2
15001520
pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
1501-
movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
1521+
movdqu (%r12), %xmm1
15021522
# get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
15031523
pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
15041524
pand %xmm1, %xmm2
@@ -1507,9 +1527,6 @@ _zero_cipher_left_decrypt:
15071527

15081528
pxor %xmm2, %xmm8
15091529
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
1510-
# GHASH computation for the last <16 byte block
1511-
sub %r13, %r11
1512-
add $16, %r11
15131530

15141531
# output %r13 bytes
15151532
MOVQ_R64_XMM %xmm0, %rax
@@ -1663,14 +1680,6 @@ ENDPROC(aesni_gcm_dec)
16631680
*
16641681
* AAD Format with 64-bit Extended Sequence Number
16651682
*
1666-
* aadLen:
1667-
* from the definition of the spec, aadLen can only be 8 or 12 bytes.
1668-
* The code supports 16 too but for other sizes, the code will fail.
1669-
*
1670-
* TLen:
1671-
* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
1672-
* For other sizes, the code will fail.
1673-
*
16741683
* poly = x^128 + x^127 + x^126 + x^121 + 1
16751684
***************************************************************************/
16761685
ENTRY(aesni_gcm_enc)
@@ -1763,19 +1772,16 @@ _zero_cipher_left_encrypt:
17631772
movdqa SHUF_MASK(%rip), %xmm10
17641773
PSHUFB_XMM %xmm10, %xmm0
17651774

1766-
17671775
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
1768-
sub $16, %r11
1769-
add %r13, %r11
1770-
movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
1771-
lea SHIFT_MASK+16(%rip), %r12
1776+
1777+
lea (%arg3,%r11,1), %r10
1778+
mov %r13, %r12
1779+
READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
1780+
1781+
lea ALL_F+16(%rip), %r12
17721782
sub %r13, %r12
1773-
# adjust the shuffle mask pointer to be able to shift 16-r13 bytes
1774-
# (%r13 is the number of bytes in plaintext mod 16)
1775-
movdqu (%r12), %xmm2 # get the appropriate shuffle mask
1776-
PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
17771783
pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
1778-
movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
1784+
movdqu (%r12), %xmm1
17791785
# get the appropriate mask to mask out top 16-r13 bytes of xmm0
17801786
pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
17811787
movdqa SHUF_MASK(%rip), %xmm10
@@ -1784,9 +1790,6 @@ _zero_cipher_left_encrypt:
17841790
pxor %xmm0, %xmm8
17851791
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
17861792
# GHASH computation for the last <16 byte block
1787-
sub %r13, %r11
1788-
add $16, %r11
1789-
17901793
movdqa SHUF_MASK(%rip), %xmm10
17911794
PSHUFB_XMM %xmm10, %xmm0
17921795

0 commit comments

Comments
 (0)