summaryrefslogtreecommitdiff
path: root/cipher/cipher-gcm-siv.c
diff options
context:
space:
mode:
authorJussi Kivilinna <jussi.kivilinna@iki.fi>2021-10-27 21:04:01 +0300
committerJussi Kivilinna <jussi.kivilinna@iki.fi>2021-11-15 19:24:21 +0200
commit5e0187d84fc16d9ff0fbb0ccd4348657fea90d36 (patch)
tree377bda09db1dacb352f4927e57cd0ad3fac296ad /cipher/cipher-gcm-siv.c
parentec671cfa239888b67fcafda40b19006b61d9bbf2 (diff)
downloadlibgcrypt-5e0187d84fc16d9ff0fbb0ccd4348657fea90d36.tar.gz
Add intel-pclmul accelerated POLYVAL for GCM-SIV
* cipher/cipher-gcm-intel-pclmul.c (gfmul_pclmul_aggr4) (gfmul_pclmul_aggr8): Move assembly to new GFMUL_AGGRx_ASM* macros. (GFMUL_AGGR4_ASM_1, GFMUL_AGGR4_ASM_2, gfmul_pclmul_aggr4_le) (GFMUL_AGGR8_ASM, gfmul_pclmul_aggr8_le) (_gcry_polyval_intel_pclmul): New. * cipher/cipher-gcm-siv.c (do_polyval_buf): Use polyval function if available. * cipher/cipher-gcm.c (_gcry_polyval_intel_pclmul): New. (setupM): Setup 'c->u_mode.gcm.polyval_fn' with accelerated polyval function if available. * cipher/cipher-internal.h (gcry_cipher_handle): Add member 'u_mode.gcm.polyval_fn'. -- Benchmark on AMD Ryzen 7 5800X: Before: AES | nanosecs/byte mebibytes/sec cycles/byte auto Mhz GCM-SIV enc | 0.150 ns/B 6337 MiB/s 0.730 c/B 4849 GCM-SIV dec | 0.163 ns/B 5862 MiB/s 0.789 c/B 4850 GCM-SIV auth | 0.119 ns/B 8022 MiB/s 0.577 c/B 4850 After (enc/dec ~26% faster, auth ~43% faster): AES | nanosecs/byte mebibytes/sec cycles/byte auto Mhz GCM-SIV enc | 0.117 ns/B 8138 MiB/s 0.568 c/B 4850 GCM-SIV dec | 0.128 ns/B 7429 MiB/s 0.623 c/B 4850 GCM-SIV auth | 0.083 ns/B 11507 MiB/s 0.402 c/B 4851 Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
Diffstat (limited to 'cipher/cipher-gcm-siv.c')
-rw-r--r--cipher/cipher-gcm-siv.c35
1 files changed, 27 insertions, 8 deletions
diff --git a/cipher/cipher-gcm-siv.c b/cipher/cipher-gcm-siv.c
index 813cf579..9ebc0036 100644
--- a/cipher/cipher-gcm-siv.c
+++ b/cipher/cipher-gcm-siv.c
@@ -96,6 +96,7 @@ do_polyval_buf(gcry_cipher_hd_t c, byte *hash, const byte *buf,
unsigned int blocksize = GCRY_SIV_BLOCK_LEN;
unsigned int unused = c->u_mode.gcm.mac_unused;
ghash_fn_t ghash_fn = c->u_mode.gcm.ghash_fn;
+ ghash_fn_t polyval_fn = c->u_mode.gcm.polyval_fn;
byte tmp_blocks[16][GCRY_SIV_BLOCK_LEN];
size_t nblocks, n;
unsigned int burn = 0, nburn;
@@ -137,9 +138,17 @@ do_polyval_buf(gcry_cipher_hd_t c, byte *hash, const byte *buf,
gcry_assert (unused == blocksize);
/* Process one block from macbuf. */
- cipher_block_bswap (c->u_mode.gcm.macbuf, c->u_mode.gcm.macbuf,
- blocksize);
- nburn = ghash_fn (c, hash, c->u_mode.gcm.macbuf, 1);
+ if (polyval_fn)
+ {
+ nburn = polyval_fn (c, hash, c->u_mode.gcm.macbuf, 1);
+ }
+ else
+ {
+ cipher_block_bswap (c->u_mode.gcm.macbuf, c->u_mode.gcm.macbuf,
+ blocksize);
+ nburn = ghash_fn (c, hash, c->u_mode.gcm.macbuf, 1);
+ }
+
burn = nburn > burn ? nburn : burn;
unused = 0;
}
@@ -148,12 +157,22 @@ do_polyval_buf(gcry_cipher_hd_t c, byte *hash, const byte *buf,
while (nblocks)
{
- for (n = 0; n < (nblocks > 16 ? 16 : nblocks); n++)
- cipher_block_bswap (tmp_blocks[n], buf + n * blocksize, blocksize);
-
- num_blks_used = n > num_blks_used ? n : num_blks_used;
+ if (polyval_fn)
+ {
+ n = nblocks;
+ nburn = polyval_fn (c, hash, buf, n);
+ }
+ else
+ {
+ for (n = 0; n < (nblocks > 16 ? 16 : nblocks); n++)
+ cipher_block_bswap (tmp_blocks[n], buf + n * blocksize,
+ blocksize);
+
+ num_blks_used = n > num_blks_used ? n : num_blks_used;
+
+ nburn = ghash_fn (c, hash, tmp_blocks[0], n);
+ }
- nburn = ghash_fn (c, hash, tmp_blocks[0], n);
burn = nburn > burn ? nburn : burn;
buf += n * blocksize;
buflen -= n * blocksize;