summaryrefslogtreecommitdiff
path: root/cipher/cipher-gcm.c
diff options
context:
space:
mode:
authorJussi Kivilinna <jussi.kivilinna@iki.fi>2018-01-31 20:02:48 +0200
committerJussi Kivilinna <jussi.kivilinna@iki.fi>2018-01-31 20:05:08 +0200
commitffdc6f3623a0bcb41324d562340b2cd1c288e387 (patch)
tree53e9f8b1af3ff34f01e58b55ff4f75179fceee3e /cipher/cipher-gcm.c
parent0b55f349a8b8f4b0ac9ed724c2d5b8dcc9f5401c (diff)
downloadlibgcrypt-ffdc6f3623a0bcb41324d562340b2cd1c288e387.tar.gz
Fix incorrect counter overflow handling for GCM
* cipher/cipher-gcm.c (gcm_ctr_encrypt): New function to handle 32-bit CTR increment for GCM. (_gcry_cipher_gcm_encrypt, _gcry_cipher_gcm_decrypt): Do not use generic CTR implementation directly, use gcm_ctr_encrypt instead. * tests/basic.c (_check_gcm_cipher): Add test-vectors for 32-bit CTR overflow. (check_gcm_cipher): Add 'split input to 15 bytes and 17 bytes' test-runs. -- Reported-by: Clemens Lang <Clemens.Lang@bmw.de> > I believe we have found what seems to be a bug in counter overflow > handling in AES-GCM in libgcrypt's implementation. This leads to > incorrect results when using a non-12-byte IV and decrypting payloads > encrypted with other AES-GCM implementations, such as OpenSSL. > > According to the NIST Special Publication 800-38D "Recommendation for > Block Cipher Modes of Operation: Galois/Counter Mode (GCM) and GMAC", > section 7.1, algorithm 4, step 3 [NIST38D], the counter increment is > defined as inc_32. Section 6.2 of the same document defines the > incrementing function inc_s for positive integers s as follows: > > | the function increments the right-most s bits of the string, regarded > | as the binary representation of an integer, modulo 2^s; the remaining, > | left-most len(X) - s bits remain unchanged > > (X is the complete counter value in this case) > > This problem does not occur when using a 12-byte IV, because AES-GCM has > a special case for the inital counter value with 12-byte IVs: > > | If len(IV)=96, then J_0 = IV || 0^31 || 1 > > i.e., one would have to encrypt (UINT_MAX - 1) * blocksize of data to > hit an overflow. However, for non-12-byte IVs, the initial counter value > is the output of a hash function, which makes hitting an overflow much > more likely. > > In practice, we have found that using > > iv = 9e 79 18 8c ff 09 56 1e c9 90 99 cc 6d 5d f6 d3 > key = 26 56 e5 73 76 03 c6 95 0d 22 07 31 5d 32 5c 6b a5 54 5f 40 23 98 60 f6 f7 06 6f 7a 4f c2 ca 40 > > will reliably trigger an overflow when encrypting 10 MiB of data. It > seems that this is caused by re-using the AES-CTR implementation for > incrementing the counter. Bug was introduced by commit bd4bd23a2511a4bce63c3217cca0d4ecf0c79532 "GCM: Use counter mode code for speed-up". GnuPG-bug-id: 3764 Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
Diffstat (limited to 'cipher/cipher-gcm.c')
-rw-r--r--cipher/cipher-gcm.c77
1 files changed, 74 insertions, 3 deletions
diff --git a/cipher/cipher-gcm.c b/cipher/cipher-gcm.c
index 2b8b454b..6169d142 100644
--- a/cipher/cipher-gcm.c
+++ b/cipher/cipher-gcm.c
@@ -1,6 +1,6 @@
/* cipher-gcm.c - Generic Galois Counter Mode implementation
* Copyright (C) 2013 Dmitry Eremin-Solenikov
- * Copyright (C) 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (C) 2013, 2018 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
@@ -556,6 +556,77 @@ do_ghash_buf(gcry_cipher_hd_t c, byte *hash, const byte *buf,
}
+static gcry_err_code_t
+gcm_ctr_encrypt (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen,
+ const byte *inbuf, size_t inbuflen)
+{
+ gcry_err_code_t err = 0;
+
+ while (inbuflen)
+ {
+ u32 nblocks_to_overflow;
+ u32 num_ctr_increments;
+ u32 curr_ctr_low;
+ size_t currlen = inbuflen;
+ byte ctr_copy[GCRY_GCM_BLOCK_LEN];
+ int fix_ctr = 0;
+
+ /* GCM CTR increments only least significant 32-bits, without carry
+ * to upper 96-bits of counter. Using generic CTR implementation
+ * directly would carry 32-bit overflow to upper 96-bit. Detect
+ * if input length is long enough to cause overflow, and limit
+ * input length so that CTR overflow happen but updated CTR value is
+ * not used to encrypt further input. After overflow, upper 96 bits
+ * of CTR are restored to cancel out modification done by generic CTR
+ * encryption. */
+
+ if (inbuflen > c->unused)
+ {
+ curr_ctr_low = gcm_add32_be128 (c->u_ctr.ctr, 0);
+
+ /* Number of CTR increments this inbuflen would cause. */
+ num_ctr_increments = (inbuflen - c->unused) / GCRY_GCM_BLOCK_LEN +
+ !!((inbuflen - c->unused) % GCRY_GCM_BLOCK_LEN);
+
+ if ((u32)(num_ctr_increments + curr_ctr_low) < curr_ctr_low)
+ {
+ nblocks_to_overflow = 0xffffffffU - curr_ctr_low + 1;
+ currlen = nblocks_to_overflow * GCRY_GCM_BLOCK_LEN + c->unused;
+ if (currlen > inbuflen)
+ {
+ currlen = inbuflen;
+ }
+
+ fix_ctr = 1;
+ buf_cpy(ctr_copy, c->u_ctr.ctr, GCRY_GCM_BLOCK_LEN);
+ }
+ }
+
+ err = _gcry_cipher_ctr_encrypt(c, outbuf, outbuflen, inbuf, currlen);
+ if (err != 0)
+ return err;
+
+ if (fix_ctr)
+ {
+ /* Lower 32-bits of CTR should now be zero. */
+ gcry_assert(gcm_add32_be128 (c->u_ctr.ctr, 0) == 0);
+
+ /* Restore upper part of CTR. */
+ buf_cpy(c->u_ctr.ctr, ctr_copy, GCRY_GCM_BLOCK_LEN - sizeof(u32));
+
+ wipememory(ctr_copy, sizeof(ctr_copy));
+ }
+
+ inbuflen -= currlen;
+ inbuf += currlen;
+ outbuflen -= currlen;
+ outbuf += currlen;
+ }
+
+ return err;
+}
+
+
gcry_err_code_t
_gcry_cipher_gcm_encrypt (gcry_cipher_hd_t c,
byte *outbuf, size_t outbuflen,
@@ -595,7 +666,7 @@ _gcry_cipher_gcm_encrypt (gcry_cipher_hd_t c,
return GPG_ERR_INV_LENGTH;
}
- err = _gcry_cipher_ctr_encrypt(c, outbuf, outbuflen, inbuf, inbuflen);
+ err = gcm_ctr_encrypt(c, outbuf, outbuflen, inbuf, inbuflen);
if (err != 0)
return err;
@@ -642,7 +713,7 @@ _gcry_cipher_gcm_decrypt (gcry_cipher_hd_t c,
do_ghash_buf(c, c->u_mode.gcm.u_tag.tag, inbuf, inbuflen, 0);
- return _gcry_cipher_ctr_encrypt(c, outbuf, outbuflen, inbuf, inbuflen);
+ return gcm_ctr_encrypt(c, outbuf, outbuflen, inbuf, inbuflen);
}