summaryrefslogtreecommitdiff
path: root/cipher/camellia-gfni-avx512-amd64.S
diff options
context:
space:
mode:
authorJussi Kivilinna <jussi.kivilinna@iki.fi>2022-06-28 17:50:13 +0300
committerJussi Kivilinna <jussi.kivilinna@iki.fi>2022-07-06 13:04:08 +0300
commit99b7375bd6162c7c3f481ab6d0d106bfcb5b2b07 (patch)
treeff5bbf7c5df5312fa10817e754ea32b001f4c9af /cipher/camellia-gfni-avx512-amd64.S
parentac14d9ee7a094a2b9a26b1e3f4d36f59dbf68b40 (diff)
downloadlibgcrypt-99b7375bd6162c7c3f481ab6d0d106bfcb5b2b07.tar.gz
camellia-gfni-avx512: remove copy-paste / leftover extra instructions
* cipher/camellia-gfni-avx512-amd64.S: Remove extranous copy-pasted instructions after `.Lbswap128_mask` constant; Remove left-over plaintext xorring in OCB encryption input loading macro. -- Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
Diffstat (limited to 'cipher/camellia-gfni-avx512-amd64.S')
-rw-r--r--cipher/camellia-gfni-avx512-amd64.S8
1 files changed, 1 insertions, 7 deletions
diff --git a/cipher/camellia-gfni-avx512-amd64.S b/cipher/camellia-gfni-avx512-amd64.S
index eb9c01f2..15b2dc90 100644
--- a/cipher/camellia-gfni-avx512-amd64.S
+++ b/cipher/camellia-gfni-avx512-amd64.S
@@ -1,4 +1,4 @@
-/* camellia-gfni-avx512-amd64.h - GFNI/AVX512 implementation of Camellia
+/* camellia-gfni-avx512-amd64.S - GFNI/AVX512 implementation of Camellia
*
* Copyright (C) 2022 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
@@ -622,11 +622,6 @@ ELF(.type _gcry_camellia_gfni_avx512__constants,@object;)
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
- vbroadcasti64x2 .Lcounter4444_lo rRIP, %zmm22;
- vbroadcasti64x2 .Lcounter8888_lo rRIP, %zmm23;
- vbroadcasti64x2 .Lcounter16161616_lo rRIP, %zmm24;
- vbroadcasti64x2 .Lcounter1111_hi rRIP, %zmm25;
-
.Lbyte_ones:
.byte 1, 1, 1, 1, 1, 1, 1, 1
@@ -1155,7 +1150,6 @@ _gcry_camellia_gfni_avx512_ocb_enc:
vinserti64x2 $2, %xmm30, %zmm16, %zmm16; \
vpxorq (l3reg), %xmm30, %xmm30; \
vinserti64x2 $3, %xmm30, %zmm16, %zmm16; \
- vpxorq zplain, %zmm31, %zmm31; \
vpxorq zplain, %zmm16, zreg; \
vmovdqu64 %zmm16, (n * 64)(%rsi);