diff options
author | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2022-07-21 11:05:38 +0300 |
---|---|---|
committer | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2022-07-21 11:05:38 +0300 |
commit | eaed633c1662d8a98042ac146c981113f2807b22 (patch) | |
tree | 5d0977724cbf429c34f2bc52dfe6f2f32406a2c6 | |
parent | 2dc2654006746a25f9cb6b24786867f1725ac244 (diff) | |
download | libgcrypt-eaed633c1662d8a98042ac146c981113f2807b22.tar.gz |
sm4: add amd64 GFNI/AVX512 implementation
* cipher/Makefile.am: Add 'sm4-gfni-avx512-amd64.S'.
* cipher/sm4-gfni-avx512-amd64.S: New.
* cipher/sm4-gfni.c (USE_GFNI_AVX512): New.
(SM4_context): Add 'use_gfni_avx512' and 'crypt_blk1_16'.
(_gcry_sm4_gfni_avx512_expand_key, _gcry_sm4_gfni_avx512_ctr_enc)
(_gcry_sm4_gfni_avx512_cbc_dec, _gcry_sm4_gfni_avx512_cfb_dec)
(_gcry_sm4_gfni_avx512_ocb_enc, _gcry_sm4_gfni_avx512_ocb_dec)
(_gcry_sm4_gfni_avx512_ocb_auth, _gcry_sm4_gfni_avx512_ctr_enc_blk32)
(_gcry_sm4_gfni_avx512_cbc_dec_blk32)
(_gcry_sm4_gfni_avx512_cfb_dec_blk32)
(_gcry_sm4_gfni_avx512_ocb_enc_blk32)
(_gcry_sm4_gfni_avx512_ocb_dec_blk32)
(_gcry_sm4_gfni_avx512_crypt_blk1_16)
(_gcry_sm4_gfni_avx512_crypt_blk32, sm4_gfni_avx512_crypt_blk1_16)
(sm4_crypt_blk1_32, sm4_encrypt_blk1_32, sm4_decrypt_blk1_32): New.
(sm4_expand_key): Add GFNI/AVX512 code-path
(sm4_setkey): Use GFNI/AVX512 if supported by CPU; Setup
`ctx->crypt_blk1_16`.
(sm4_encrypt, sm4_decrypt, sm4_get_crypt_blk1_16_fn, _gcry_sm4_ctr_enc)
(_gcry_sm4_cbc_dec, _gcry_sm4_cfb_dec, _gcry_sm4_ocb_crypt)
(_gcry_sm4_ocb_auth) [USE_GFNI_AVX512]: Add GFNI/AVX512 code path.
(_gcry_sm4_xts_crypt): Change parallel block size from 16 to 32.
* configure.ac: Add 'sm4-gfni-avx512-amd64.lo'.
--
Benchmark on Intel i3-1115G4 (tigerlake):
Before:
SM4 | nanosecs/byte mebibytes/sec cycles/byte auto Mhz
CBC enc | 9.45 ns/B 101.0 MiB/s 38.63 c/B 4089
CBC dec | 0.647 ns/B 1475 MiB/s 2.64 c/B 4089
CFB enc | 9.43 ns/B 101.1 MiB/s 38.57 c/B 4089
CFB dec | 0.648 ns/B 1472 MiB/s 2.65 c/B 4089
CTR enc | 0.661 ns/B 1443 MiB/s 2.70 c/B 4089
CTR dec | 0.661 ns/B 1444 MiB/s 2.70 c/B 4089
XTS enc | 0.767 ns/B 1243 MiB/s 3.14 c/B 4089
XTS dec | 0.772 ns/B 1235 MiB/s 3.16 c/B 4089
OCB enc | 0.671 ns/B 1421 MiB/s 2.74 c/B 4089
OCB dec | 0.676 ns/B 1410 MiB/s 2.77 c/B 4089
OCB auth | 0.668 ns/B 1428 MiB/s 2.73 c/B 4090
After:
SM4 | nanosecs/byte mebibytes/sec cycles/byte auto Mhz
CBC enc | 7.80 ns/B 122.2 MiB/s 31.91 c/B 4090
CBC dec | 0.293 ns/B 3258 MiB/s 1.20 c/B 4095±3
CFB enc | 7.80 ns/B 122.2 MiB/s 31.90 c/B 4089
CFB dec | 0.294 ns/B 3247 MiB/s 1.20 c/B 4096±3
CTR enc | 0.306 ns/B 3120 MiB/s 1.25 c/B 4098±4
CTR dec | 0.300 ns/B 3182 MiB/s 1.23 c/B 4103±6
XTS enc | 0.431 ns/B 2211 MiB/s 1.77 c/B 4107±9
XTS dec | 0.431 ns/B 2213 MiB/s 1.77 c/B 4102±6
OCB enc | 0.324 ns/B 2946 MiB/s 1.33 c/B 4096±3
OCB dec | 0.326 ns/B 2923 MiB/s 1.34 c/B 4093±2
OCB auth | 0.536 ns/B 1779 MiB/s 2.19 c/B 4089
CBC/CFB enc: 1.20x faster
CBC/CFB dec: 2.20x faster
CTR: 2.18x faster
XTS: 1.78x faster
OCB enc/dec: 2.07x faster
OCB auth: 1.24x faster
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
-rw-r--r-- | cipher/Makefile.am | 6 | ||||
-rw-r--r-- | cipher/sm4-gfni-avx512-amd64.S | 1750 | ||||
-rw-r--r-- | cipher/sm4.c | 336 | ||||
-rw-r--r-- | configure.ac | 1 |
4 files changed, 2076 insertions, 17 deletions
diff --git a/cipher/Makefile.am b/cipher/Makefile.am index 97823cb4..3d95a794 100644 --- a/cipher/Makefile.am +++ b/cipher/Makefile.am @@ -118,9 +118,9 @@ EXTRA_libcipher_la_SOURCES = \ scrypt.c \ seed.c \ serpent.c serpent-sse2-amd64.S \ - sm4.c sm4-aesni-avx-amd64.S sm4-aesni-avx2-amd64.S sm4-aarch64.S \ - sm4-armv8-aarch64-ce.S sm4-gfni-avx2-amd64.S \ - sm4-armv9-aarch64-sve-ce.S \ + sm4.c sm4-aesni-avx-amd64.S sm4-aesni-avx2-amd64.S \ + sm4-gfni-avx2-amd64.S sm4-gfni-avx512-amd64.S \ + sm4-aarch64.S sm4-armv8-aarch64-ce.S sm4-armv9-aarch64-sve-ce.S \ serpent-avx2-amd64.S serpent-armv7-neon.S \ sha1.c sha1-ssse3-amd64.S sha1-avx-amd64.S sha1-avx-bmi2-amd64.S \ sha1-avx2-bmi2-amd64.S sha1-armv7-neon.S sha1-armv8-aarch32-ce.S \ diff --git a/cipher/sm4-gfni-avx512-amd64.S b/cipher/sm4-gfni-avx512-amd64.S new file mode 100644 index 00000000..1d5e9a48 --- /dev/null +++ b/cipher/sm4-gfni-avx512-amd64.S @@ -0,0 +1,1750 @@ +/* sm4-gfni-avx512-amd64.S - GFNI/AVX512 implementation of SM4 cipher + * + * Copyright (C) 2022 Jussi Kivilinna <jussi.kivilinna@iki.fi> + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <config.h> + +#ifdef __x86_64 +#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ + defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ + defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT) + +#include "asm-common-amd64.h" + +/********************************************************************** + helper macros + **********************************************************************/ + +/* Transpose four 32-bit words between 128-bit vectors. */ +#define transpose_4x4(x0, x1, x2, x3, t1, t2) \ + vpunpckhdq x1, x0, t2; \ + vpunpckldq x1, x0, x0; \ + \ + vpunpckldq x3, x2, t1; \ + vpunpckhdq x3, x2, x2; \ + \ + vpunpckhqdq t1, x0, x1; \ + vpunpcklqdq t1, x0, x0; \ + \ + vpunpckhqdq x2, t2, x3; \ + vpunpcklqdq x2, t2, x2; + +/********************************************************************** + 4-way && 8-way SM4 with GFNI and AVX512 (128-bit vectors) + **********************************************************************/ + +/* vector registers */ +#define RX0 %ymm0 +#define RX1 %ymm1 +#define RX0x %xmm0 +#define RX1x %xmm1 +#define RX0z %zmm0 +#define RX1z %zmm1 + +#define RTMP0 %ymm2 +#define RTMP1 %ymm3 +#define RTMP2 %ymm4 +#define RTMP3 %ymm5 +#define RTMP4 %ymm6 +#define RTMP0x %xmm2 +#define RTMP1x %xmm3 +#define RTMP2x %xmm4 +#define RTMP3x %xmm5 +#define RTMP4x %xmm6 +#define RTMP0z %zmm2 +#define RTMP1z %zmm3 +#define RTMP2z %zmm4 +#define RTMP3z %zmm5 +#define RTMP4z %zmm6 + +#define RNOT %ymm7 +#define RNOTx %xmm7 +#define RNOTz %zmm7 + +#define RA0 %ymm8 +#define RA1 %ymm9 +#define RA2 %ymm10 +#define RA3 %ymm11 +#define RA0x %xmm8 +#define RA1x %xmm9 +#define RA2x %xmm10 +#define RA3x %xmm11 +#define RA0z %zmm8 +#define RA1z %zmm9 +#define RA2z %zmm10 +#define RA3z %zmm11 + +#define RB0 %ymm12 +#define RB1 %ymm13 +#define RB2 %ymm14 +#define RB3 %ymm15 +#define RB0x %xmm12 +#define RB1x %xmm13 +#define RB2x %xmm14 +#define RB3x %xmm15 +#define RB0z %zmm12 +#define RB1z %zmm13 +#define RB2z %zmm14 +#define RB3z %zmm15 + +.text +.align 32 + +/* Affine transform, SM4 field to AES field */ +.Lpre_affine_s: + .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 + .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 + .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 + .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 + +/* Affine transform, AES field to SM4 field */ +.Lpost_affine_s: + .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 + .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 + .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 + .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 + +/* For CTR-mode IV byteswap */ +.Lbswap128_mask: + .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 + +/* For input word byte-swap */ +.Lbswap32_mask: + .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 + +.Lcounter2222_lo: + .quad 2, 0 +.Lcounter4444_lo: + .quad 4, 0 +.Lcounter8888_lo: + .quad 8, 0 +.Lcounter16161616_lo: + .quad 16, 0 +.Lcounter1111_hi: + .quad 0, 1 + +.align 64 +.Lcounter0123_lo: + .quad 0, 0 + .quad 1, 0 + .quad 2, 0 + .quad 3, 0 + +.align 16 +.globl _gcry_sm4_gfni_avx512_expand_key +ELF(.type _gcry_sm4_gfni_avx512_expand_key,@function;) +_gcry_sm4_gfni_avx512_expand_key: + /* input: + * %rdi: 128-bit key + * %rsi: rkey_enc + * %rdx: rkey_dec + * %rcx: fk array + * %r8: ck array + */ + CFI_STARTPROC(); + + vmovd 0*4(%rdi), RA0x; + vmovd 1*4(%rdi), RA1x; + vmovd 2*4(%rdi), RA2x; + vmovd 3*4(%rdi), RA3x; + + vmovdqa .Lbswap32_mask rRIP, RTMP2x; + vpshufb RTMP2x, RA0x, RA0x; + vpshufb RTMP2x, RA1x, RA1x; + vpshufb RTMP2x, RA2x, RA2x; + vpshufb RTMP2x, RA3x, RA3x; + + vmovd 0*4(%rcx), RB0x; + vmovd 1*4(%rcx), RB1x; + vmovd 2*4(%rcx), RB2x; + vmovd 3*4(%rcx), RB3x; + vpxor RB0x, RA0x, RA0x; + vpxor RB1x, RA1x, RA1x; + vpxor RB2x, RA2x, RA2x; + vpxor RB3x, RA3x, RA3x; + +#define ROUND(round, s0, s1, s2, s3) \ + vpxord (4*(round))(%r8) {1to4}, s1, RX0x; \ + vpternlogd $0x96, s2, s3, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \ + \ + /* sbox, non-linear part */ \ + vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \ + vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \ + \ + /* linear part */ \ + vpxor RX0x, s0, s0; /* s0 ^ x */ \ + vprold $13, RX0x, RTMP1x; \ + vprold $23, RX0x, RTMP3x; \ + vpternlogd $0x96, RTMP1x, RTMP3x, s0; /* s0 ^ x ^ rol(x,13) ^ rol(x,23) */ + + leaq (32*4)(%r8), %rax; + leaq (32*4)(%rdx), %rdx; +.align 16 +.Lroundloop_expand_key: + leaq (-4*4)(%rdx), %rdx; + ROUND(0, RA0x, RA1x, RA2x, RA3x); + ROUND(1, RA1x, RA2x, RA3x, RA0x); + ROUND(2, RA2x, RA3x, RA0x, RA1x); + ROUND(3, RA3x, RA0x, RA1x, RA2x); + leaq (4*4)(%r8), %r8; + vmovd RA0x, (0*4)(%rsi); + vmovd RA1x, (1*4)(%rsi); + vmovd RA2x, (2*4)(%rsi); + vmovd RA3x, (3*4)(%rsi); + vmovd RA0x, (3*4)(%rdx); + vmovd RA1x, (2*4)(%rdx); + vmovd RA2x, (1*4)(%rdx); + vmovd RA3x, (0*4)(%rdx); + leaq (4*4)(%rsi), %rsi; + cmpq %rax, %r8; + jne .Lroundloop_expand_key; + +#undef ROUND + + vzeroall; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_expand_key,.-_gcry_sm4_gfni_avx512_expand_key;) + +.align 16 +ELF(.type sm4_gfni_avx512_crypt_blk1_4,@function;) +sm4_gfni_avx512_crypt_blk1_4: + /* input: + * %rdi: round key array, CTX + * %rsi: dst (1..4 blocks) + * %rdx: src (1..4 blocks) + * %rcx: num blocks (1..4) + */ + CFI_STARTPROC(); + + vmovdqu 0*16(%rdx), RA0x; + vmovdqa RA0x, RA1x; + vmovdqa RA0x, RA2x; + vmovdqa RA0x, RA3x; + cmpq $2, %rcx; + jb .Lblk4_load_input_done; + vmovdqu 1*16(%rdx), RA1x; + je .Lblk4_load_input_done; + vmovdqu 2*16(%rdx), RA2x; + cmpq $3, %rcx; + je .Lblk4_load_input_done; + vmovdqu 3*16(%rdx), RA3x; + +.Lblk4_load_input_done: + + vmovdqa .Lbswap32_mask rRIP, RTMP2x; + vpshufb RTMP2x, RA0x, RA0x; + vpshufb RTMP2x, RA1x, RA1x; + vpshufb RTMP2x, RA2x, RA2x; + vpshufb RTMP2x, RA3x, RA3x; + + transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); + +#define ROUND(round, s0, s1, s2, s3) \ + vpxord (4*(round))(%rdi) {1to4}, s1, RX0x; \ + vpternlogd $0x96, s2, s3, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \ + \ + /* sbox, non-linear part */ \ + vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \ + vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \ + \ + /* linear part */ \ + vprold $2, RX0x, RTMP0x; \ + vprold $10, RX0x, RTMP1x; \ + vprold $18, RX0x, RTMP2x; \ + vpternlogd $0x96, RTMP0x, RX0x, s0; /* s0 ^ x ^ rol(x,2) */ \ + vprold $24, RX0x, RX0x; \ + vpternlogd $0x96, RTMP1x, RTMP2x, RX0x; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpxor RX0x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ + + leaq (32*4)(%rdi), %rax; +.align 16 +.Lroundloop_blk4: + ROUND(0, RA0x, RA1x, RA2x, RA3x); + ROUND(1, RA1x, RA2x, RA3x, RA0x); + ROUND(2, RA2x, RA3x, RA0x, RA1x); + ROUND(3, RA3x, RA0x, RA1x, RA2x); + leaq (4*4)(%rdi), %rdi; + cmpq %rax, %rdi; + jne .Lroundloop_blk4; + +#undef ROUND + + vmovdqa .Lbswap128_mask rRIP, RTMP2x; + + transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); + vpshufb RTMP2x, RA0x, RA0x; + vpshufb RTMP2x, RA1x, RA1x; + vpshufb RTMP2x, RA2x, RA2x; + vpshufb RTMP2x, RA3x, RA3x; + + vmovdqu RA0x, 0*16(%rsi); + cmpq $2, %rcx; + jb .Lblk4_store_output_done; + vmovdqu RA1x, 1*16(%rsi); + je .Lblk4_store_output_done; + vmovdqu RA2x, 2*16(%rsi); + cmpq $3, %rcx; + je .Lblk4_store_output_done; + vmovdqu RA3x, 3*16(%rsi); + +.Lblk4_store_output_done: + vzeroall; + xorl %eax, %eax; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size sm4_gfni_avx512_crypt_blk1_4,.-sm4_gfni_avx512_crypt_blk1_4;) + +.align 16 +ELF(.type __sm4_gfni_crypt_blk8,@function;) +__sm4_gfni_crypt_blk8: + /* input: + * %rdi: round key array, CTX + * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel + * ciphertext blocks + * output: + * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel plaintext + * blocks + */ + CFI_STARTPROC(); + + vmovdqa .Lbswap32_mask rRIP, RTMP2x; + vpshufb RTMP2x, RA0x, RA0x; + vpshufb RTMP2x, RA1x, RA1x; + vpshufb RTMP2x, RA2x, RA2x; + vpshufb RTMP2x, RA3x, RA3x; + vpshufb RTMP2x, RB0x, RB0x; + vpshufb RTMP2x, RB1x, RB1x; + vpshufb RTMP2x, RB2x, RB2x; + vpshufb RTMP2x, RB3x, RB3x; + + transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); + transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x); + +#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ + vpbroadcastd (4*(round))(%rdi), RX1x; \ + vmovdqa .Lpre_affine_s rRIP, RTMP2x; \ + vmovdqa .Lpost_affine_s rRIP, RTMP3x; \ + vpxor s1, RX1x, RX0x; \ + vpternlogd $0x96, s2, s3, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \ + vpxor r1, RX1x, RX1x; \ + vpternlogd $0x96, r2, r3, RX1x; /* r1 ^ r2 ^ r3 ^ rk */ \ + \ + /* sbox, non-linear part */ \ + vgf2p8affineqb $0x65, RTMP2x, RX0x, RX0x; \ + vgf2p8affineinvqb $0xd3, RTMP3x, RX0x, RX0x; \ + vgf2p8affineqb $0x65, RTMP2x, RX1x, RX1x; \ + vgf2p8affineinvqb $0xd3, RTMP3x, RX1x, RX1x; \ + \ + /* linear part */ \ + vprold $2, RX0x, RTMP0x; \ + vprold $10, RX0x, RTMP1x; \ + vprold $18, RX0x, RTMP2x; \ + vpternlogd $0x96, RTMP0x, RX0x, s0; /* s0 ^ x ^ rol(x,2) */ \ + vprold $24, RX0x, RX0x; \ + vprold $2, RX1x, RTMP3x; \ + vprold $10, RX1x, RTMP4x; \ + vprold $18, RX1x, RTMP0x; \ + vpternlogd $0x96, RTMP3x, RX1x, r0; /* r0 ^ x ^ rol(x,2) */ \ + vprold $24, RX1x, RX1x; \ + vpternlogd $0x96, RTMP1x, RTMP2x, RX0x; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpternlogd $0x96, RTMP4x, RTMP0x, RX1x; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpxor RX0x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpxor RX1x, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ + + leaq (32*4)(%rdi), %rax; +.align 16 +.Lroundloop_blk8: + ROUND(0, RA0x, RA1x, RA2x, RA3x, RB0x, RB1x, RB2x, RB3x); + ROUND(1, RA1x, RA2x, RA3x, RA0x, RB1x, RB2x, RB3x, RB0x); + ROUND(2, RA2x, RA3x, RA0x, RA1x, RB2x, RB3x, RB0x, RB1x); + ROUND(3, RA3x, RA0x, RA1x, RA2x, RB3x, RB0x, RB1x, RB2x); + leaq (4*4)(%rdi), %rdi; + cmpq %rax, %rdi; + jne .Lroundloop_blk8; + +#undef ROUND + + vmovdqa .Lbswap128_mask rRIP, RTMP2x; + + transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); + transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x); + vpshufb RTMP2x, RA0x, RA0x; + vpshufb RTMP2x, RA1x, RA1x; + vpshufb RTMP2x, RA2x, RA2x; + vpshufb RTMP2x, RA3x, RA3x; + vpshufb RTMP2x, RB0x, RB0x; + vpshufb RTMP2x, RB1x, RB1x; + vpshufb RTMP2x, RB2x, RB2x; + vpshufb RTMP2x, RB3x, RB3x; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size __sm4_gfni_crypt_blk8,.-__sm4_gfni_crypt_blk8;) + +.align 16 +ELF(.type _gcry_sm4_gfni_avx512_crypt_blk1_8,@function;) +_gcry_sm4_gfni_avx512_crypt_blk1_8: + /* input: + * %rdi: round key array, CTX + * %rsi: dst (1..8 blocks) + * %rdx: src (1..8 blocks) + * %rcx: num blocks (1..8) + */ + CFI_STARTPROC(); + + cmpq $5, %rcx; + jb sm4_gfni_avx512_crypt_blk1_4; + vmovdqu (0 * 16)(%rdx), RA0x; + vmovdqu (1 * 16)(%rdx), RA1x; + vmovdqu (2 * 16)(%rdx), RA2x; + vmovdqu (3 * 16)(%rdx), RA3x; + vmovdqu (4 * 16)(%rdx), RB0x; + vmovdqa RB0x, RB1x; + vmovdqa RB0x, RB2x; + vmovdqa RB0x, RB3x; + je .Lblk8_load_input_done; + vmovdqu (5 * 16)(%rdx), RB1x; + cmpq $7, %rcx; + jb .Lblk8_load_input_done; + vmovdqu (6 * 16)(%rdx), RB2x; + je .Lblk8_load_input_done; + vmovdqu (7 * 16)(%rdx), RB3x; + +.Lblk8_load_input_done: + call __sm4_gfni_crypt_blk8; + + cmpq $6, %rcx; + vmovdqu RA0x, (0 * 16)(%rsi); + vmovdqu RA1x, (1 * 16)(%rsi); + vmovdqu RA2x, (2 * 16)(%rsi); + vmovdqu RA3x, (3 * 16)(%rsi); + vmovdqu RB0x, (4 * 16)(%rsi); + jb .Lblk8_store_output_done; + vmovdqu RB1x, (5 * 16)(%rsi); + je .Lblk8_store_output_done; + vmovdqu RB2x, (6 * 16)(%rsi); + cmpq $7, %rcx; + je .Lblk8_store_output_done; + vmovdqu RB3x, (7 * 16)(%rsi); + +.Lblk8_store_output_done: + vzeroall; + xorl %eax, %eax; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_crypt_blk1_8,.-_gcry_sm4_gfni_avx512_crypt_blk1_8;) + +/********************************************************************** + 16-way SM4 with GFNI and AVX512 (256-bit vectors) + **********************************************************************/ + +.align 16 +ELF(.type __sm4_gfni_crypt_blk16,@function;) +__sm4_gfni_crypt_blk16: + /* input: + * %rdi: ctx, CTX + * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel + * plaintext blocks + * output: + * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel + * ciphertext blocks + */ + CFI_STARTPROC(); + + vbroadcasti128 .Lbswap32_mask rRIP, RTMP2; + vpshufb RTMP2, RA0, RA0; + vpshufb RTMP2, RA1, RA1; + vpshufb RTMP2, RA2, RA2; + vpshufb RTMP2, RA3, RA3; + vpshufb RTMP2, RB0, RB0; + vpshufb RTMP2, RB1, RB1; + vpshufb RTMP2, RB2, RB2; + vpshufb RTMP2, RB3, RB3; + + transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); + transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); + +#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ + vpbroadcastd (4*(round))(%rdi), RX1; \ + vbroadcasti128 .Lpre_affine_s rRIP, RTMP2; \ + vbroadcasti128 .Lpost_affine_s rRIP, RTMP3; \ + vpxor s1, RX1, RX0; \ + vpternlogd $0x96, s2, s3, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ + vpxor r1, RX1, RX1; \ + vpternlogd $0x96, r2, r3, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ + \ + /* sbox, non-linear part */ \ + vgf2p8affineqb $0x65, RTMP2, RX0, RX0; \ + vgf2p8affineinvqb $0xd3, RTMP3, RX0, RX0; \ + vgf2p8affineqb $0x65, RTMP2, RX1, RX1; \ + vgf2p8affineinvqb $0xd3, RTMP3, RX1, RX1; \ + \ + /* linear part */ \ + vprold $2, RX0, RTMP0; \ + vprold $10, RX0, RTMP1; \ + vprold $18, RX0, RTMP2; \ + vpternlogd $0x96, RTMP0, RX0, s0; /* s0 ^ x ^ rol(x,2) */ \ + vprold $24, RX0, RX0; \ + vprold $2, RX1, RTMP3; \ + vprold $10, RX1, RTMP4; \ + vprold $18, RX1, RTMP0; \ + vpternlogd $0x96, RTMP3, RX1, r0; /* r0 ^ x ^ rol(x,2) */ \ + vprold $24, RX1, RX1; \ + vpternlogd $0x96, RTMP1, RTMP2, RX0; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpternlogd $0x96, RTMP4, RTMP0, RX1; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpxor RX0, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpxor RX1, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ + + leaq (32*4)(%rdi), %rax; +.align 16 +.Lroundloop_blk16: + ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); + ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); + ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); + ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); + leaq (4*4)(%rdi), %rdi; + cmpq %rax, %rdi; + jne .Lroundloop_blk16; + +#undef ROUND + + vbroadcasti128 .Lbswap128_mask rRIP, RTMP2; + + transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); + transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); + vpshufb RTMP2, RA0, RA0; + vpshufb RTMP2, RA1, RA1; + vpshufb RTMP2, RA2, RA2; + vpshufb RTMP2, RA3, RA3; + vpshufb RTMP2, RB0, RB0; + vpshufb RTMP2, RB1, RB1; + vpshufb RTMP2, RB2, RB2; + vpshufb RTMP2, RB3, RB3; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size __sm4_gfni_crypt_blk16,.-__sm4_gfni_crypt_blk16;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_crypt_blk1_16 +ELF(.type _gcry_sm4_gfni_avx512_crypt_blk1_16,@function;) +_gcry_sm4_gfni_avx512_crypt_blk1_16: + /* input: + * %rdi: round key array, CTX + * %rsi: dst (1..16 blocks) + * %rdx: src (1..16 blocks) + * %rcx: num blocks (1..16) + */ + CFI_STARTPROC(); + +#define LOAD_INPUT(offset, yreg) \ + cmpq $(1 + 2 * (offset)), %rcx; \ + jb .Lblk16_load_input_done; \ + ja 1f; \ + vmovdqu (offset) * 32(%rdx), yreg##x; \ + jmp .Lblk16_load_input_done; \ + 1: \ + vmovdqu (offset) * 32(%rdx), yreg; + + cmpq $8, %rcx; + jbe _gcry_sm4_gfni_avx512_crypt_blk1_8; + vmovdqu (0 * 32)(%rdx), RA0; + vmovdqu (1 * 32)(%rdx), RA1; + vmovdqu (2 * 32)(%rdx), RA2; + vmovdqu (3 * 32)(%rdx), RA3; + LOAD_INPUT(4, RB0); + LOAD_INPUT(5, RB1); + LOAD_INPUT(6, RB2); + LOAD_INPUT(7, RB3); +#undef LOAD_INPUT + +.Lblk16_load_input_done: + call __sm4_gfni_crypt_blk16; + +#define STORE_OUTPUT(yreg, offset) \ + cmpq $(1 + 2 * (offset)), %rcx; \ + jb .Lblk16_store_output_done; \ + ja 1f; \ + vmovdqu yreg##x, (offset) * 32(%rsi); \ + jmp .Lblk16_store_output_done; \ + 1: \ + vmovdqu yreg, (offset) * 32(%rsi); + + vmovdqu RA0, (0 * 32)(%rsi); + vmovdqu RA1, (1 * 32)(%rsi); + vmovdqu RA2, (2 * 32)(%rsi); + vmovdqu RA3, (3 * 32)(%rsi); + STORE_OUTPUT(RB0, 4); + STORE_OUTPUT(RB1, 5); + STORE_OUTPUT(RB2, 6); + STORE_OUTPUT(RB3, 7); +#undef STORE_OUTPUT + +.Lblk16_store_output_done: + vzeroall; + xorl %eax, %eax; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_crypt_blk1_16,.-_gcry_sm4_gfni_avx512_crypt_blk1_16;) + +#define add_le128(out, in, lo_counter, hi_counter1) \ + vpaddq lo_counter, in, out; \ + vpcmpuq $1, lo_counter, out, %k1; \ + kaddb %k1, %k1, %k1; \ + vpaddq hi_counter1, out, out{%k1}; + +.align 16 +.globl _gcry_sm4_gfni_avx512_ctr_enc +ELF(.type _gcry_sm4_gfni_avx512_ctr_enc,@function;) +_gcry_sm4_gfni_avx512_ctr_enc: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (16 blocks) + * %rdx: src (16 blocks) + * %rcx: iv (big endian, 128bit) + */ + CFI_STARTPROC(); + + vbroadcasti128 .Lbswap128_mask rRIP, RTMP0; + vmovdqa .Lcounter0123_lo rRIP, RTMP1; + vbroadcasti128 .Lcounter2222_lo rRIP, RTMP2; + vbroadcasti128 .Lcounter4444_lo rRIP, RTMP3; + vbroadcasti128 .Lcounter8888_lo rRIP, RTMP4; + + /* load IV and byteswap */ + movq 8(%rcx), %r11; + bswapq %r11; + vbroadcasti128 (%rcx), RB3; + vpshufb RTMP0, RB3, RB3; + + /* check need for handling 64-bit overflow and carry */ + cmpq $(0xffffffffffffffff - 16), %r11; + ja .Lhandle_ctr_carry_blk16; + + /* construct IVs */ + vpaddq RTMP1, RB3, RA0; /* +0:+1 */ + vpaddq RTMP2, RA0, RA1; /* +2:+3 */ + vpaddq RTMP3, RA0, RA2; /* +4:+5 */ + vpaddq RTMP3, RA1, RA3; /* +6:+7 */ + vpaddq RTMP4, RA0, RB0; /* +8... */ + vpaddq RTMP4, RA1, RB1; /* +10... */ + vpaddq RTMP4, RA2, RB2; /* +12... */ + vpaddq RTMP4, RA3, RB3; /* +14... */ + + /* Update counter */ + leaq 16(%r11), %r11; + bswapq %r11; + movq %r11, 8(%rcx); + + jmp .Lctr_carry_done_blk16; + +.Lhandle_ctr_carry_blk16: + vbroadcasti128 .Lcounter1111_hi rRIP, RNOT; + + /* construct IVs */ + add_le128(RA0, RB3, RTMP1, RNOT); /* +0:+1 */ + add_le128(RA1, RA0, RTMP2, RNOT); /* +2:+3 */ + add_le128(RA2, RA0, RTMP3, RNOT); /* +4:+5 */ + add_le128(RA3, RA1, RTMP3, RNOT); /* +6:+7 */ + add_le128(RB0, RA0, RTMP4, RNOT); /* +8... */ + add_le128(RB1, RA1, RTMP4, RNOT); /* +10... */ + add_le128(RB2, RA2, RTMP4, RNOT); /* +12... */ + add_le128(RB3, RA3, RTMP4, RNOT); /* +14... */ + + /* Update counter */ + addq $16, %r11; + movq (%rcx), %r10; + bswapq %r10; + adcq $0, %r10; + bswapq %r11; + bswapq %r10; + movq %r11, 8(%rcx); + movq %r10, (%rcx); + +.align 16 +.Lctr_carry_done_blk16: + /* Byte-swap IVs. */ + vpshufb RTMP0, RA0, RA0; + vpshufb RTMP0, RA1, RA1; + vpshufb RTMP0, RA2, RA2; + vpshufb RTMP0, RA3, RA3; + vpshufb RTMP0, RB0, RB0; + vpshufb RTMP0, RB1, RB1; + vpshufb RTMP0, RB2, RB2; + vpshufb RTMP0, RB3, RB3; + + call __sm4_gfni_crypt_blk16; + + vpxor (0 * 32)(%rdx), RA0, RA0; + vpxor (1 * 32)(%rdx), RA1, RA1; + vpxor (2 * 32)(%rdx), RA2, RA2; + vpxor (3 * 32)(%rdx), RA3, RA3; + vpxor (4 * 32)(%rdx), RB0, RB0; + vpxor (5 * 32)(%rdx), RB1, RB1; + vpxor (6 * 32)(%rdx), RB2, RB2; + vpxor (7 * 32)(%rdx), RB3, RB3; + + vmovdqu RA0, (0 * 32)(%rsi); + vmovdqu RA1, (1 * 32)(%rsi); + vmovdqu RA2, (2 * 32)(%rsi); + vmovdqu RA3, (3 * 32)(%rsi); + vmovdqu RB0, (4 * 32)(%rsi); + vmovdqu RB1, (5 * 32)(%rsi); + vmovdqu RB2, (6 * 32)(%rsi); + vmovdqu RB3, (7 * 32)(%rsi); + + vzeroall; + kxorq %k1, %k1, %k1; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_ctr_enc,.-_gcry_sm4_gfni_avx512_ctr_enc;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_cbc_dec +ELF(.type _gcry_sm4_gfni_avx512_cbc_dec,@function;) +_gcry_sm4_gfni_avx512_cbc_dec: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (16 blocks) + * %rdx: src (16 blocks) + * %rcx: iv + */ + CFI_STARTPROC(); + + vmovdqu (0 * 32)(%rdx), RA0; + vmovdqu (1 * 32)(%rdx), RA1; + vmovdqu (2 * 32)(%rdx), RA2; + vmovdqu (3 * 32)(%rdx), RA3; + vmovdqu (4 * 32)(%rdx), RB0; + vmovdqu (5 * 32)(%rdx), RB1; + vmovdqu (6 * 32)(%rdx), RB2; + vmovdqu (7 * 32)(%rdx), RB3; + + call __sm4_gfni_crypt_blk16; + + vmovdqu (%rcx), RNOTx; + vinserti128 $1, (%rdx), RNOT, RNOT; + vpxor RNOT, RA0, RA0; + vpxor (0 * 32 + 16)(%rdx), RA1, RA1; + vpxor (1 * 32 + 16)(%rdx), RA2, RA2; + vpxor (2 * 32 + 16)(%rdx), RA3, RA3; + vpxor (3 * 32 + 16)(%rdx), RB0, RB0; + vpxor (4 * 32 + 16)(%rdx), RB1, RB1; + vpxor (5 * 32 + 16)(%rdx), RB2, RB2; + vpxor (6 * 32 + 16)(%rdx), RB3, RB3; + vmovdqu (7 * 32 + 16)(%rdx), RNOTx; + vmovdqu RNOTx, (%rcx); /* store new IV */ + + vmovdqu RA0, (0 * 32)(%rsi); + vmovdqu RA1, (1 * 32)(%rsi); + vmovdqu RA2, (2 * 32)(%rsi); + vmovdqu RA3, (3 * 32)(%rsi); + vmovdqu RB0, (4 * 32)(%rsi); + vmovdqu RB1, (5 * 32)(%rsi); + vmovdqu RB2, (6 * 32)(%rsi); + vmovdqu RB3, (7 * 32)(%rsi); + + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_cbc_dec,.-_gcry_sm4_gfni_avx512_cbc_dec;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_cfb_dec +ELF(.type _gcry_sm4_gfni_avx512_cfb_dec,@function;) +_gcry_sm4_gfni_avx512_cfb_dec: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (16 blocks) + * %rdx: src (16 blocks) + * %rcx: iv + */ + CFI_STARTPROC(); + + /* Load input */ + vmovdqu (%rcx), RNOTx; + vinserti128 $1, (%rdx), RNOT, RA0; + vmovdqu (0 * 32 + 16)(%rdx), RA1; + vmovdqu (1 * 32 + 16)(%rdx), RA2; + vmovdqu (2 * 32 + 16)(%rdx), RA3; + vmovdqu (3 * 32 + 16)(%rdx), RB0; + vmovdqu (4 * 32 + 16)(%rdx), RB1; + vmovdqu (5 * 32 + 16)(%rdx), RB2; + vmovdqu (6 * 32 + 16)(%rdx), RB3; + + /* Update IV */ + vmovdqu (7 * 32 + 16)(%rdx), RNOTx; + vmovdqu RNOTx, (%rcx); + + call __sm4_gfni_crypt_blk16; + + vpxor (0 * 32)(%rdx), RA0, RA0; + vpxor (1 * 32)(%rdx), RA1, RA1; + vpxor (2 * 32)(%rdx), RA2, RA2; + vpxor (3 * 32)(%rdx), RA3, RA3; + vpxor (4 * 32)(%rdx), RB0, RB0; + vpxor (5 * 32)(%rdx), RB1, RB1; + vpxor (6 * 32)(%rdx), RB2, RB2; + vpxor (7 * 32)(%rdx), RB3, RB3; + + vmovdqu RA0, (0 * 32)(%rsi); + vmovdqu RA1, (1 * 32)(%rsi); + vmovdqu RA2, (2 * 32)(%rsi); + vmovdqu RA3, (3 * 32)(%rsi); + vmovdqu RB0, (4 * 32)(%rsi); + vmovdqu RB1, (5 * 32)(%rsi); + vmovdqu RB2, (6 * 32)(%rsi); + vmovdqu RB3, (7 * 32)(%rsi); + + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_cfb_dec,.-_gcry_sm4_gfni_avx512_cfb_dec;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_ocb_enc +ELF(.type _gcry_sm4_gfni_avx512_ocb_enc,@function;) + +_gcry_sm4_gfni_avx512_ocb_enc: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (16 blocks) + * %rdx: src (16 blocks) + * %rcx: offset + * %r8 : checksum + * %r9 : L pointers (void *L[16]) + */ + CFI_STARTPROC(); + + subq $(4 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(4 * 8); + + movq %r10, (0 * 8)(%rsp); + movq %r11, (1 * 8)(%rsp); + movq %r12, (2 * 8)(%rsp); + movq %r13, (3 * 8)(%rsp); + CFI_REL_OFFSET(%r10, 0 * 8); + CFI_REL_OFFSET(%r11, 1 * 8); + CFI_REL_OFFSET(%r12, 2 * 8); + CFI_REL_OFFSET(%r13, 3 * 8); + + vmovdqu (%rcx), RTMP0x; + vmovdqu (%r8), RTMP1x; + + /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ + /* Checksum_i = Checksum_{i-1} xor P_i */ + /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ + +#define OCB_INPUT(n, l0reg, l1reg, yreg, inreg) \ + vmovdqu (n * 32)(%rdx), inreg; \ + vpxor (l0reg), RTMP0x, RNOTx; \ + vpxor (l1reg), RNOTx, RTMP0x; \ + vinserti128 $1, RTMP0x, RNOT, RNOT; \ + vpxor inreg, RNOT, yreg; \ + vmovdqu RNOT, (n * 32)(%rsi); + + movq (0 * 8)(%r9), %r10; + movq (1 * 8)(%r9), %r11; + movq (2 * 8)(%r9), %r12; + movq (3 * 8)(%r9), %r13; + OCB_INPUT(0, %r10, %r11, RA0, RTMP2); + OCB_INPUT(1, %r12, %r13, RA1, RTMP3); + movq (4 * 8)(%r9), %r10; + movq (5 * 8)(%r9), %r11; + movq (6 * 8)(%r9), %r12; + movq (7 * 8)(%r9), %r13; + OCB_INPUT(2, %r10, %r11, RA2, RTMP4); + vpternlogd $0x96, RTMP2, RTMP3, RTMP4; + OCB_INPUT(3, %r12, %r13, RA3, RX0); + movq (8 * 8)(%r9), %r10; + movq (9 * 8)(%r9), %r11; + movq (10 * 8)(%r9), %r12; + movq (11 * 8)(%r9), %r13; + OCB_INPUT(4, %r10, %r11, RB0, RX1); + OCB_INPUT(5, %r12, %r13, RB1, RTMP2); + vpternlogd $0x96, RX0, RX1, RTMP2; + movq (12 * 8)(%r9), %r10; + movq (13 * 8)(%r9), %r11; + movq (14 * 8)(%r9), %r12; + movq (15 * 8)(%r9), %r13; + OCB_INPUT(6, %r10, %r11, RB2, RTMP3); + OCB_INPUT(7, %r12, %r13, RB3, RX0); + vpternlogd $0x96, RTMP3, RX0, RTMP1; +#undef OCB_INPUT + + vpternlogd $0x96, RTMP4, RTMP2, RTMP1; + vextracti128 $1, RTMP1, RNOTx; + vmovdqu RTMP0x, (%rcx); + vpxor RNOTx, RTMP1x, RTMP1x; + vmovdqu RTMP1x, (%r8); + + movq (0 * 8)(%rsp), %r10; + movq (1 * 8)(%rsp), %r11; + movq (2 * 8)(%rsp), %r12; + movq (3 * 8)(%rsp), %r13; + CFI_RESTORE(%r10); + CFI_RESTORE(%r11); + CFI_RESTORE(%r12); + CFI_RESTORE(%r13); + + call __sm4_gfni_crypt_blk16; + + addq $(4 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(-4 * 8); + + vpxor (0 * 32)(%rsi), RA0, RA0; + vpxor (1 * 32)(%rsi), RA1, RA1; + vpxor (2 * 32)(%rsi), RA2, RA2; + vpxor (3 * 32)(%rsi), RA3, RA3; + vpxor (4 * 32)(%rsi), RB0, RB0; + vpxor (5 * 32)(%rsi), RB1, RB1; + vpxor (6 * 32)(%rsi), RB2, RB2; + vpxor (7 * 32)(%rsi), RB3, RB3; + + vmovdqu RA0, (0 * 32)(%rsi); + vmovdqu RA1, (1 * 32)(%rsi); + vmovdqu RA2, (2 * 32)(%rsi); + vmovdqu RA3, (3 * 32)(%rsi); + vmovdqu RB0, (4 * 32)(%rsi); + vmovdqu RB1, (5 * 32)(%rsi); + vmovdqu RB2, (6 * 32)(%rsi); + vmovdqu RB3, (7 * 32)(%rsi); + + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_ocb_enc,.-_gcry_sm4_gfni_avx512_ocb_enc;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_ocb_dec +ELF(.type _gcry_sm4_gfni_avx512_ocb_dec,@function;) + +_gcry_sm4_gfni_avx512_ocb_dec: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (16 blocks) + * %rdx: src (16 blocks) + * %rcx: offset + * %r8 : checksum + * %r9 : L pointers (void *L[16]) + */ + CFI_STARTPROC(); + + subq $(4 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(4 * 8); + + movq %r10, (0 * 8)(%rsp); + movq %r11, (1 * 8)(%rsp); + movq %r12, (2 * 8)(%rsp); + movq %r13, (3 * 8)(%rsp); + CFI_REL_OFFSET(%r10, 0 * 8); + CFI_REL_OFFSET(%r11, 1 * 8); + CFI_REL_OFFSET(%r12, 2 * 8); + CFI_REL_OFFSET(%r13, 3 * 8); + + vmovdqu (%rcx), RTMP0x; + + /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ + /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ + +#define OCB_INPUT(n, l0reg, l1reg, yreg) \ + vmovdqu (n * 32)(%rdx), yreg; \ + vpxor (l0reg), RTMP0x, RNOTx; \ + vpxor (l1reg), RNOTx, RTMP0x; \ + vinserti128 $1, RTMP0x, RNOT, RNOT; \ + vpxor yreg, RNOT, yreg; \ + vmovdqu RNOT, (n * 32)(%rsi); + + movq (0 * 8)(%r9), %r10; + movq (1 * 8)(%r9), %r11; + movq (2 * 8)(%r9), %r12; + movq (3 * 8)(%r9), %r13; + OCB_INPUT(0, %r10, %r11, RA0); + OCB_INPUT(1, %r12, %r13, RA1); + movq (4 * 8)(%r9), %r10; + movq (5 * 8)(%r9), %r11; + movq (6 * 8)(%r9), %r12; + movq (7 * 8)(%r9), %r13; + OCB_INPUT(2, %r10, %r11, RA2); + OCB_INPUT(3, %r12, %r13, RA3); + movq (8 * 8)(%r9), %r10; + movq (9 * 8)(%r9), %r11; + movq (10 * 8)(%r9), %r12; + movq (11 * 8)(%r9), %r13; + OCB_INPUT(4, %r10, %r11, RB0); + OCB_INPUT(5, %r12, %r13, RB1); + movq (12 * 8)(%r9), %r10; + movq (13 * 8)(%r9), %r11; + movq (14 * 8)(%r9), %r12; + movq (15 * 8)(%r9), %r13; + OCB_INPUT(6, %r10, %r11, RB2); + OCB_INPUT(7, %r12, %r13, RB3); +#undef OCB_INPUT + + vmovdqu RTMP0x, (%rcx); + + movq (0 * 8)(%rsp), %r10; + movq (1 * 8)(%rsp), %r11; + movq (2 * 8)(%rsp), %r12; + movq (3 * 8)(%rsp), %r13; + CFI_RESTORE(%r10); + CFI_RESTORE(%r11); + CFI_RESTORE(%r12); + CFI_RESTORE(%r13); + + call __sm4_gfni_crypt_blk16; + + addq $(4 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(-4 * 8); + + vpxor (0 * 32)(%rsi), RA0, RA0; + vpxor (1 * 32)(%rsi), RA1, RA1; + vpxor (2 * 32)(%rsi), RA2, RA2; + vpxor (3 * 32)(%rsi), RA3, RA3; + vpxor (4 * 32)(%rsi), RB0, RB0; + vpxor (5 * 32)(%rsi), RB1, RB1; + vpxor (6 * 32)(%rsi), RB2, RB2; + vpxor (7 * 32)(%rsi), RB3, RB3; + + /* Checksum_i = Checksum_{i-1} xor P_i */ + + vmovdqu RA0, (0 * 32)(%rsi); + vmovdqu RA1, (1 * 32)(%rsi); + vmovdqu RA2, (2 * 32)(%rsi); + vmovdqu RA3, (3 * 32)(%rsi); + vmovdqu RB0, (4 * 32)(%rsi); + vmovdqu RB1, (5 * 32)(%rsi); + vmovdqu RB2, (6 * 32)(%rsi); + vmovdqu RB3, (7 * 32)(%rsi); + + vpternlogd $0x96, RA0, RA1, RA2; + vpternlogd $0x96, RA3, RB0, RB1; + vpternlogd $0x96, RB2, RB3, RA2; + vpxord RA2, RB1, RTMP1; + + vextracti128 $1, RTMP1, RNOTx; + vpternlogd $0x96, (%r8), RNOTx, RTMP1x; + vmovdqu RTMP1x, (%r8); + + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_ocb_dec,.-_gcry_sm4_gfni_avx512_ocb_dec;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_ocb_auth +ELF(.type _gcry_sm4_gfni_avx512_ocb_auth,@function;) + +_gcry_sm4_gfni_avx512_ocb_auth: + /* input: + * %rdi: ctx, CTX + * %rsi: abuf (16 blocks) + * %rdx: offset + * %rcx: checksum + * %r8 : L pointers (void *L[16]) + */ + CFI_STARTPROC(); + + subq $(4 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(4 * 8); + + movq %r10, (0 * 8)(%rsp); + movq %r11, (1 * 8)(%rsp); + movq %r12, (2 * 8)(%rsp); + movq %r13, (3 * 8)(%rsp); + CFI_REL_OFFSET(%r10, 0 * 8); + CFI_REL_OFFSET(%r11, 1 * 8); + CFI_REL_OFFSET(%r12, 2 * 8); + CFI_REL_OFFSET(%r13, 3 * 8); + + vmovdqu (%rdx), RTMP0x; + + /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ + /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ + +#define OCB_INPUT(n, l0reg, l1reg, yreg) \ + vmovdqu (n * 32)(%rsi), yreg; \ + vpxor (l0reg), RTMP0x, RNOTx; \ + vpxor (l1reg), RNOTx, RTMP0x; \ + vinserti128 $1, RTMP0x, RNOT, RNOT; \ + vpxor yreg, RNOT, yreg; + + movq (0 * 8)(%r8), %r10; + movq (1 * 8)(%r8), %r11; + movq (2 * 8)(%r8), %r12; + movq (3 * 8)(%r8), %r13; + OCB_INPUT(0, %r10, %r11, RA0); + OCB_INPUT(1, %r12, %r13, RA1); + movq (4 * 8)(%r8), %r10; + movq (5 * 8)(%r8), %r11; + movq (6 * 8)(%r8), %r12; + movq (7 * 8)(%r8), %r13; + OCB_INPUT(2, %r10, %r11, RA2); + OCB_INPUT(3, %r12, %r13, RA3); + movq (8 * 8)(%r8), %r10; + movq (9 * 8)(%r8), %r11; + movq (10 * 8)(%r8), %r12; + movq (11 * 8)(%r8), %r13; + OCB_INPUT(4, %r10, %r11, RB0); + OCB_INPUT(5, %r12, %r13, RB1); + movq (12 * 8)(%r8), %r10; + movq (13 * 8)(%r8), %r11; + movq (14 * 8)(%r8), %r12; + movq (15 * 8)(%r8), %r13; + OCB_INPUT(6, %r10, %r11, RB2); + OCB_INPUT(7, %r12, %r13, RB3); +#undef OCB_INPUT + + vmovdqu RTMP0x, (%rdx); + + movq (0 * 8)(%rsp), %r10; + movq (1 * 8)(%rsp), %r11; + movq (2 * 8)(%rsp), %r12; + movq (3 * 8)(%rsp), %r13; + CFI_RESTORE(%r10); + CFI_RESTORE(%r11); + CFI_RESTORE(%r12); + CFI_RESTORE(%r13); + + call __sm4_gfni_crypt_blk16; + + addq $(4 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(-4 * 8); + + vpternlogd $0x96, RA0, RA1, RA2; + vpternlogd $0x96, RA3, RB0, RB1; + vpternlogd $0x96, RB2, RB3, RA2; + vpxor RA2, RB1, RTMP1; + + vextracti128 $1, RTMP1, RNOTx; + vpternlogd $0x96, (%rcx), RNOTx, RTMP1x; + vmovdqu RTMP1x, (%rcx); + + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_ocb_auth,.-_gcry_sm4_gfni_avx512_ocb_auth;) + +/********************************************************************** + 32-way SM4 with GFNI and AVX512 (512-bit vectors) + **********************************************************************/ + +.align 16 +ELF(.type __sm4_gfni_crypt_blk32,@function;) +__sm4_gfni_crypt_blk32: + /* input: + * %rdi: ctx, CTX + * RA0z, RA1z, RA2z, RA3z, RB0z, RB1z, RB2z, RB3z: 32 parallel plaintext blocks + * output: + * RA0z, RA1z, RA2z, RA3z, RB0z, RB1z, RB2z, RB3z: 32 parallel ciphertext blocks + */ + CFI_STARTPROC(); + + vbroadcasti32x4 .Lbswap32_mask rRIP, RTMP2z; + vpshufb RTMP2z, RA0z, RA0z; + vpshufb RTMP2z, RA1z, RA1z; + vpshufb RTMP2z, RA2z, RA2z; + vpshufb RTMP2z, RA3z, RA3z; + vpshufb RTMP2z, RB0z, RB0z; + vpshufb RTMP2z, RB1z, RB1z; + vpshufb RTMP2z, RB2z, RB2z; + vpshufb RTMP2z, RB3z, RB3z; + + vbroadcasti32x4 .Lpre_affine_s rRIP, %zmm16; + vbroadcasti32x4 .Lpost_affine_s rRIP, %zmm17; + + transpose_4x4(RA0z, RA1z, RA2z, RA3z, RTMP0z, RTMP1z); + transpose_4x4(RB0z, RB1z, RB2z, RB3z, RTMP0z, RTMP1z); + +#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ + vpbroadcastd (4*(round))(%rdi), RX1z; \ + vpxord s1, RX1z, RX0z; \ + vpternlogd $0x96, s2, s3, RX0z; /* s1 ^ s2 ^ s3 ^ rk */ \ + vpxord r1, RX1z, RX1z; \ + vpternlogd $0x96, r2, r3, RX1z; /* r1 ^ r2 ^ r3 ^ rk */ \ + \ + /* sbox, non-linear part */ \ + vgf2p8affineqb $0x65, %zmm16, RX0z, RX0z; \ + vgf2p8affineinvqb $0xd3, %zmm17, RX0z, RX0z; \ + vgf2p8affineqb $0x65, %zmm16, RX1z, RX1z; \ + vgf2p8affineinvqb $0xd3, %zmm17, RX1z, RX1z; \ + \ + /* linear part */ \ + vprold $2, RX0z, RTMP0z; \ + vprold $10, RX0z, RTMP1z; \ + vprold $18, RX0z, RTMP2z; \ + vpternlogd $0x96, RTMP0z, RX0z, s0; /* s0 ^ x ^ rol(x,2) */ \ + vprold $24, RX0z, RX0z; \ + vprold $2, RX1z, RTMP3z; \ + vprold $10, RX1z, RTMP4z; \ + vprold $18, RX1z, RTMP0z; \ + vpternlogd $0x96, RTMP3z, RX1z, r0; /* r0 ^ x ^ rol(x,2) */ \ + vprold $24, RX1z, RX1z; \ + vpternlogd $0x96, RTMP1z, RTMP2z, RX0z; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpternlogd $0x96, RTMP4z, RTMP0z, RX1z; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpxord RX0z, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ + vpxord RX1z, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ + + leaq (32*4)(%rdi), %rax; +.align 16 +.Lroundloop_blk32: + ROUND(0, RA0z, RA1z, RA2z, RA3z, RB0z, RB1z, RB2z, RB3z); + ROUND(1, RA1z, RA2z, RA3z, RA0z, RB1z, RB2z, RB3z, RB0z); + ROUND(2, RA2z, RA3z, RA0z, RA1z, RB2z, RB3z, RB0z, RB1z); + ROUND(3, RA3z, RA0z, RA1z, RA2z, RB3z, RB0z, RB1z, RB2z); + leaq (4*4)(%rdi), %rdi; + cmpq %rax, %rdi; + jne .Lroundloop_blk32; + +#undef ROUND + + vbroadcasti32x4 .Lbswap128_mask rRIP, RTMP2z; + + transpose_4x4(RA0z, RA1z, RA2z, RA3z, RTMP0z, RTMP1z); + transpose_4x4(RB0z, RB1z, RB2z, RB3z, RTMP0z, RTMP1z); + vpshufb RTMP2z, RA0z, RA0z; + vpshufb RTMP2z, RA1z, RA1z; + vpshufb RTMP2z, RA2z, RA2z; + vpshufb RTMP2z, RA3z, RA3z; + vpshufb RTMP2z, RB0z, RB0z; + vpshufb RTMP2z, RB1z, RB1z; + vpshufb RTMP2z, RB2z, RB2z; + vpshufb RTMP2z, RB3z, RB3z; + + vpxord %zmm16, %zmm16, %zmm16; + vpxord %zmm17, %zmm17, %zmm17; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size __sm4_gfni_crypt_blk32,.-__sm4_gfni_crypt_blk32;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_crypt_blk32 +ELF(.type _gcry_sm4_gfni_avx512_crypt_blk32,@function;) +_gcry_sm4_gfni_avx512_crypt_blk32: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (32 blocks) + * %rdx: src (32 blocks) + */ + CFI_STARTPROC(); + vpopcntb %zmm16, %zmm16; /* spec stop for old AVX512 CPUs */ + + /* Load input */ + vmovdqu32 (0 * 64)(%rdx), RA0z; + vmovdqu32 (1 * 64)(%rdx), RA1z; + vmovdqu32 (2 * 64)(%rdx), RA2z; + vmovdqu32 (3 * 64)(%rdx), RA3z; + vmovdqu32 (4 * 64)(%rdx), RB0z; + vmovdqu32 (5 * 64)(%rdx), RB1z; + vmovdqu32 (6 * 64)(%rdx), RB2z; + vmovdqu32 (7 * 64)(%rdx), RB3z; + + call __sm4_gfni_crypt_blk32; + + vmovdqu32 RA0z, (0 * 64)(%rsi); + vmovdqu32 RA1z, (1 * 64)(%rsi); + vmovdqu32 RA2z, (2 * 64)(%rsi); + vmovdqu32 RA3z, (3 * 64)(%rsi); + vmovdqu32 RB0z, (4 * 64)(%rsi); + vmovdqu32 RB1z, (5 * 64)(%rsi); + vmovdqu32 RB2z, (6 * 64)(%rsi); + vmovdqu32 RB3z, (7 * 64)(%rsi); + + xorl %eax, %eax; + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_crypt_blk32,.-_gcry_sm4_gfni_avx512_crypt_blk32;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_ctr_enc_blk32 +ELF(.type _gcry_sm4_gfni_avx512_ctr_enc_blk32,@function;) +_gcry_sm4_gfni_avx512_ctr_enc_blk32: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (32 blocks) + * %rdx: src (32 blocks) + * %rcx: iv (big endian, 128bit) + */ + CFI_STARTPROC(); + vpopcntb %zmm16, %zmm16; /* spec stop for old AVX512 CPUs */ + + vbroadcasti64x2 .Lbswap128_mask rRIP, RTMP0z; + vmovdqa32 .Lcounter0123_lo rRIP, RTMP1z; + vbroadcasti64x2 .Lcounter4444_lo rRIP, RTMP2z; + vbroadcasti64x2 .Lcounter8888_lo rRIP, RTMP3z; + vbroadcasti64x2 .Lcounter16161616_lo rRIP, RTMP4z; + + /* load IV and byteswap */ + movq 8(%rcx), %r11; + bswapq %r11; + vbroadcasti64x2 (%rcx), RB3z; + vpshufb RTMP0z, RB3z, RB3z; + + /* check need for handling 64-bit overflow and carry */ + cmpq $(0xffffffffffffffff - 32), %r11; + ja .Lhandle_ctr_carry_blk32; + + /* construct IVs */ + vpaddq RTMP1z, RB3z, RA0z; /* +0:+1:+2:+3 */ + vpaddq RTMP2z, RA0z, RA1z; /* +4:+5:+6:+7 */ + vpaddq RTMP3z, RA0z, RA2z; /* +8:+9:+10:+11 */ + vpaddq RTMP3z, RA1z, RA3z; /* +12:+13:+14:+15 */ + vpaddq RTMP4z, RA0z, RB0z; /* +16... */ + vpaddq RTMP4z, RA1z, RB1z; /* +20... */ + vpaddq RTMP4z, RA2z, RB2z; /* +24... */ + vpaddq RTMP4z, RA3z, RB3z; /* +28... */ + + /* Update counter */ + leaq 32(%r11), %r11; + bswapq %r11; + movq %r11, 8(%rcx); + + jmp .Lctr_carry_done_blk32; + +.Lhandle_ctr_carry_blk32: + vbroadcasti64x2 .Lcounter1111_hi rRIP, RNOTz; + + /* construct IVs */ + add_le128(RA0z, RB3z, RTMP1z, RNOTz); /* +0:+1:+2:+3 */ + add_le128(RA1z, RA0z, RTMP2z, RNOTz); /* +4:+5:+6:+7 */ + add_le128(RA2z, RA0z, RTMP3z, RNOTz); /* +8:+9:+10:+11 */ + add_le128(RA3z, RA1z, RTMP3z, RNOTz); /* +12:+13:+14:+15 */ + add_le128(RB0z, RA0z, RTMP4z, RNOTz); /* +16... */ + add_le128(RB1z, RA1z, RTMP4z, RNOTz); /* +20... */ + add_le128(RB2z, RA2z, RTMP4z, RNOTz); /* +24... */ + add_le128(RB3z, RA3z, RTMP4z, RNOTz); /* +28... */ + + /* Update counter */ + addq $32, %r11; + movq (%rcx), %r10; + bswapq %r10; + adcq $0, %r10; + bswapq %r11; + bswapq %r10; + movq %r11, 8(%rcx); + movq %r10, (%rcx); + +.align 16 +.Lctr_carry_done_blk32: + /* Byte-swap IVs. */ + vpshufb RTMP0z, RA0z, RA0z; + vpshufb RTMP0z, RA1z, RA1z; + vpshufb RTMP0z, RA2z, RA2z; + vpshufb RTMP0z, RA3z, RA3z; + vpshufb RTMP0z, RB0z, RB0z; + vpshufb RTMP0z, RB1z, RB1z; + vpshufb RTMP0z, RB2z, RB2z; + vpshufb RTMP0z, RB3z, RB3z; + + call __sm4_gfni_crypt_blk32; + + vpxord (0 * 64)(%rdx), RA0z, RA0z; + vpxord (1 * 64)(%rdx), RA1z, RA1z; + vpxord (2 * 64)(%rdx), RA2z, RA2z; + vpxord (3 * 64)(%rdx), RA3z, RA3z; + vpxord (4 * 64)(%rdx), RB0z, RB0z; + vpxord (5 * 64)(%rdx), RB1z, RB1z; + vpxord (6 * 64)(%rdx), RB2z, RB2z; + vpxord (7 * 64)(%rdx), RB3z, RB3z; + + vmovdqu32 RA0z, (0 * 64)(%rsi); + vmovdqu32 RA1z, (1 * 64)(%rsi); + vmovdqu32 RA2z, (2 * 64)(%rsi); + vmovdqu32 RA3z, (3 * 64)(%rsi); + vmovdqu32 RB0z, (4 * 64)(%rsi); + vmovdqu32 RB1z, (5 * 64)(%rsi); + vmovdqu32 RB2z, (6 * 64)(%rsi); + vmovdqu32 RB3z, (7 * 64)(%rsi); + + vzeroall; + kxorq %k1, %k1, %k1; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_ctr_enc_blk32,.-_gcry_sm4_gfni_avx512_ctr_enc_blk32;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_cbc_dec_blk32 +ELF(.type _gcry_sm4_gfni_avx512_cbc_dec_blk32,@function;) +_gcry_sm4_gfni_avx512_cbc_dec_blk32: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (32 blocks) + * %rdx: src (32 blocks) + * %rcx: iv + */ + CFI_STARTPROC(); + vpopcntb %zmm16, %zmm16; /* spec stop for old AVX512 CPUs */ + + vmovdqu32 (0 * 64)(%rdx), RA0z; + vmovdqu32 (1 * 64)(%rdx), RA1z; + vmovdqu32 (2 * 64)(%rdx), RA2z; + vmovdqu32 (3 * 64)(%rdx), RA3z; + vmovdqu32 (4 * 64)(%rdx), RB0z; + vmovdqu32 (5 * 64)(%rdx), RB1z; + vmovdqu32 (6 * 64)(%rdx), RB2z; + vmovdqu32 (7 * 64)(%rdx), RB3z; + + call __sm4_gfni_crypt_blk32; + + vmovdqu (%rcx), RNOTx; + vinserti64x2 $1, (0 * 16)(%rdx), RNOT, RNOT; + vinserti64x4 $1, (1 * 16)(%rdx), RNOTz, RNOTz; + vpxord RNOTz, RA0z, RA0z; + vpxord (0 * 64 + 48)(%rdx), RA1z, RA1z; + vpxord (1 * 64 + 48)(%rdx), RA2z, RA2z; + vpxord (2 * 64 + 48)(%rdx), RA3z, RA3z; + vpxord (3 * 64 + 48)(%rdx), RB0z, RB0z; + vpxord (4 * 64 + 48)(%rdx), RB1z, RB1z; + vpxord (5 * 64 + 48)(%rdx), RB2z, RB2z; + vpxord (6 * 64 + 48)(%rdx), RB3z, RB3z; + vmovdqu (7 * 64 + 48)(%rdx), RNOTx; + vmovdqu RNOTx, (%rcx); /* store new IV */ + + vmovdqu32 RA0z, (0 * 64)(%rsi); + vmovdqu32 RA1z, (1 * 64)(%rsi); + vmovdqu32 RA2z, (2 * 64)(%rsi); + vmovdqu32 RA3z, (3 * 64)(%rsi); + vmovdqu32 RB0z, (4 * 64)(%rsi); + vmovdqu32 RB1z, (5 * 64)(%rsi); + vmovdqu32 RB2z, (6 * 64)(%rsi); + vmovdqu32 RB3z, (7 * 64)(%rsi); + + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_cbc_dec_blk32,.-_gcry_sm4_gfni_avx512_cbc_dec_blk32;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_cfb_dec_blk32 +ELF(.type _gcry_sm4_gfni_avx512_cfb_dec_blk32,@function;) +_gcry_sm4_gfni_avx512_cfb_dec_blk32: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (32 blocks) + * %rdx: src (32 blocks) + * %rcx: iv + */ + CFI_STARTPROC(); + vpopcntb %zmm16, %zmm16; /* spec stop for old AVX512 CPUs */ + + /* Load input */ + vmovdqu (%rcx), RA0x; + vinserti64x2 $1, (%rdx), RA0, RA0; + vinserti64x4 $1, 16(%rdx), RA0z, RA0z; + vmovdqu32 (0 * 64 + 48)(%rdx), RA1z; + vmovdqu32 (1 * 64 + 48)(%rdx), RA2z; + vmovdqu32 (2 * 64 + 48)(%rdx), RA3z; + vmovdqu32 (3 * 64 + 48)(%rdx), RB0z; + vmovdqu32 (4 * 64 + 48)(%rdx), RB1z; + vmovdqu32 (5 * 64 + 48)(%rdx), RB2z; + vmovdqu32 (6 * 64 + 48)(%rdx), RB3z; + + /* Update IV */ + vmovdqu (7 * 64 + 48)(%rdx), RNOTx; + vmovdqu RNOTx, (%rcx); + + call __sm4_gfni_crypt_blk32; + + vpxord (0 * 64)(%rdx), RA0z, RA0z; + vpxord (1 * 64)(%rdx), RA1z, RA1z; + vpxord (2 * 64)(%rdx), RA2z, RA2z; + vpxord (3 * 64)(%rdx), RA3z, RA3z; + vpxord (4 * 64)(%rdx), RB0z, RB0z; + vpxord (5 * 64)(%rdx), RB1z, RB1z; + vpxord (6 * 64)(%rdx), RB2z, RB2z; + vpxord (7 * 64)(%rdx), RB3z, RB3z; + + vmovdqu32 RA0z, (0 * 64)(%rsi); + vmovdqu32 RA1z, (1 * 64)(%rsi); + vmovdqu32 RA2z, (2 * 64)(%rsi); + vmovdqu32 RA3z, (3 * 64)(%rsi); + vmovdqu32 RB0z, (4 * 64)(%rsi); + vmovdqu32 RB1z, (5 * 64)(%rsi); + vmovdqu32 RB2z, (6 * 64)(%rsi); + vmovdqu32 RB3z, (7 * 64)(%rsi); + + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_cfb_dec_blk32,.-_gcry_sm4_gfni_avx512_cfb_dec_blk32;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_ocb_enc_blk32 +ELF(.type _gcry_sm4_gfni_avx512_ocb_enc_blk32,@function;) +_gcry_sm4_gfni_avx512_ocb_enc_blk32: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (32 blocks) + * %rdx: src (32 blocks) + * %rcx: offset + * %r8 : checksum + * %r9 : L pointers (void *L[32]) + */ + CFI_STARTPROC(); + vpopcntb %zmm16, %zmm16; /* spec stop for old AVX512 CPUs */ + + subq $(5 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(5 * 8); + + movq %r12, (0 * 8)(%rsp); + movq %r13, (1 * 8)(%rsp); + movq %r14, (2 * 8)(%rsp); + movq %r15, (3 * 8)(%rsp); + movq %rbx, (4 * 8)(%rsp); + CFI_REL_OFFSET(%r12, 0 * 8); + CFI_REL_OFFSET(%r13, 1 * 8); + CFI_REL_OFFSET(%r14, 2 * 8); + CFI_REL_OFFSET(%r15, 3 * 8); + CFI_REL_OFFSET(%rbx, 4 * 8); + + vmovdqu (%rcx), RTMP0x; + + /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ + /* Checksum_i = Checksum_{i-1} xor P_i */ + /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ + +#define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg, zplain) \ + vmovdqu32 (n * 64)(%rdx), zplain; \ + vpxor (l0reg), RTMP0x, RNOTx; \ + vpxor (l1reg), RNOTx, RTMP0x; \ + vinserti64x2 $1, RTMP0x, RNOT, RNOT; \ + vpxor (l2reg), RTMP0x, RTMP0x; \ + vinserti64x2 $2, RTMP0x, RNOTz, RNOTz; \ + vpxor (l3reg), RTMP0x, RTMP0x; \ + vinserti64x2 $3, RTMP0x, RNOTz, RNOTz; \ + vpxord zplain, RNOTz, zreg; \ + vmovdqu32 RNOTz, (n * 64)(%rsi); + +#define OCB_LOAD_PTRS(n) \ + movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \ + movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \ + movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \ + movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \ + movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \ + movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \ + movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \ + movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx; + + OCB_LOAD_PTRS(0); + OCB_INPUT(0, %r10, %r11, %r12, %r13, RA0z, RTMP1z); + OCB_INPUT(1, %r14, %r15, %rax, %rbx, RA1z, RTMP2z); + OCB_LOAD_PTRS(2); + OCB_INPUT(2, %r10, %r11, %r12, %r13, RA2z, RTMP3z); + vpternlogd $0x96, RTMP1z, RTMP2z, RTMP3z; + OCB_INPUT(3, %r14, %r15, %rax, %rbx, RA3z, RTMP4z); + OCB_LOAD_PTRS(4); + OCB_INPUT(4, %r10, %r11, %r12, %r13, RB0z, RX0z); + OCB_INPUT(5, %r14, %r15, %rax, %rbx, RB1z, RX1z); + vpternlogd $0x96, RTMP4z, RX0z, RX1z; + OCB_LOAD_PTRS(6); + OCB_INPUT(6, %r10, %r11, %r12, %r13, RB2z, RTMP4z); + OCB_INPUT(7, %r14, %r15, %rax, %rbx, RB3z, RX0z); +#undef OCB_LOAD_PTRS +#undef OCB_INPUT + + vpternlogd $0x96, RTMP3z, RTMP4z, RX0z; + vpxord RX1z, RX0z, RNOTz; + vextracti64x4 $1, RNOTz, RTMP1; + vpxor RTMP1, RNOT, RNOT; + vextracti128 $1, RNOT, RTMP1x; + vpternlogd $0x96, (%r8), RTMP1x, RNOTx; + + movq (0 * 8)(%rsp), %r12; + movq (1 * 8)(%rsp), %r13; + movq (2 * 8)(%rsp), %r14; + movq (3 * 8)(%rsp), %r15; + movq (4 * 8)(%rsp), %rbx; + CFI_RESTORE(%r12); + CFI_RESTORE(%r13); + CFI_RESTORE(%r14); + CFI_RESTORE(%r15); + CFI_RESTORE(%rbx); + + vmovdqu RTMP0x, (%rcx); + vmovdqu RNOTx, (%r8); + + call __sm4_gfni_crypt_blk32; + + addq $(5 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(-5 * 8); + + vpxord (0 * 64)(%rsi), RA0z, RA0z; + vpxord (1 * 64)(%rsi), RA1z, RA1z; + vpxord (2 * 64)(%rsi), RA2z, RA2z; + vpxord (3 * 64)(%rsi), RA3z, RA3z; + vpxord (4 * 64)(%rsi), RB0z, RB0z; + vpxord (5 * 64)(%rsi), RB1z, RB1z; + vpxord (6 * 64)(%rsi), RB2z, RB2z; + vpxord (7 * 64)(%rsi), RB3z, RB3z; + + vmovdqu32 RA0z, (0 * 64)(%rsi); + vmovdqu32 RA1z, (1 * 64)(%rsi); + vmovdqu32 RA2z, (2 * 64)(%rsi); + vmovdqu32 RA3z, (3 * 64)(%rsi); + vmovdqu32 RB0z, (4 * 64)(%rsi); + vmovdqu32 RB1z, (5 * 64)(%rsi); + vmovdqu32 RB2z, (6 * 64)(%rsi); + vmovdqu32 RB3z, (7 * 64)(%rsi); + + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_ocb_enc_blk32,.-_gcry_sm4_gfni_avx512_ocb_enc_blk32;) + +.align 16 +.globl _gcry_sm4_gfni_avx512_ocb_dec_blk32 +ELF(.type _gcry_sm4_gfni_avx512_ocb_dec_blk32,@function;) +_gcry_sm4_gfni_avx512_ocb_dec_blk32: + /* input: + * %rdi: ctx, CTX + * %rsi: dst (32 blocks) + * %rdx: src (32 blocks) + * %rcx: offset + * %r8 : checksum + * %r9 : L pointers (void *L[32]) + */ + CFI_STARTPROC(); + vpopcntb %zmm16, %zmm16; /* spec stop for old AVX512 CPUs */ + + subq $(5 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(5 * 8); + + movq %r12, (0 * 8)(%rsp); + movq %r13, (1 * 8)(%rsp); + movq %r14, (2 * 8)(%rsp); + movq %r15, (3 * 8)(%rsp); + movq %rbx, (4 * 8)(%rsp); + CFI_REL_OFFSET(%r12, 0 * 8); + CFI_REL_OFFSET(%r13, 1 * 8); + CFI_REL_OFFSET(%r14, 2 * 8); + CFI_REL_OFFSET(%r15, 3 * 8); + CFI_REL_OFFSET(%rbx, 4 * 8); + + vmovdqu (%rcx), RTMP0x; + + /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ + /* C_i = Offset_i xor DECIPHER(K, P_i xor Offset_i) */ + +#define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg) \ + vmovdqu32 (n * 64)(%rdx), RTMP1z; \ + vpxor (l0reg), RTMP0x, RNOTx; \ + vpxor (l1reg), RNOTx, RTMP0x; \ + vinserti64x2 $1, RTMP0x, RNOT, RNOT; \ + vpxor (l2reg), RTMP0x, RTMP0x; \ + vinserti64x2 $2, RTMP0x, RNOTz, RNOTz; \ + vpxor (l3reg), RTMP0x, RTMP0x; \ + vinserti64x2 $3, RTMP0x, RNOTz, RNOTz; \ + vpxord RTMP1z, RNOTz, zreg; \ + vmovdqu32 RNOTz, (n * 64)(%rsi); + +#define OCB_LOAD_PTRS(n) \ + movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \ + movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \ + movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \ + movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \ + movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \ + movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \ + movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \ + movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx; + + OCB_LOAD_PTRS(0); + OCB_INPUT(0, %r10, %r11, %r12, %r13, RA0z); + OCB_INPUT(1, %r14, %r15, %rax, %rbx, RA1z); + OCB_LOAD_PTRS(2); + OCB_INPUT(2, %r10, %r11, %r12, %r13, RA2z); + OCB_INPUT(3, %r14, %r15, %rax, %rbx, RA3z); + OCB_LOAD_PTRS(4); + OCB_INPUT(4, %r10, %r11, %r12, %r13, RB0z); + OCB_INPUT(5, %r14, %r15, %rax, %rbx, RB1z); + OCB_LOAD_PTRS(6); + OCB_INPUT(6, %r10, %r11, %r12, %r13, RB2z); + OCB_INPUT(7, %r14, %r15, %rax, %rbx, RB3z); +#undef OCB_LOAD_PTRS +#undef OCB_INPUT + + movq (0 * 8)(%rsp), %r12; + movq (1 * 8)(%rsp), %r13; + movq (2 * 8)(%rsp), %r14; + movq (3 * 8)(%rsp), %r15; + movq (4 * 8)(%rsp), %rbx; + CFI_RESTORE(%r12); + CFI_RESTORE(%r13); + CFI_RESTORE(%r14); + CFI_RESTORE(%r15); + CFI_RESTORE(%rbx); + + vmovdqu RTMP0x, (%rcx); + + call __sm4_gfni_crypt_blk32; + + addq $(5 * 8), %rsp; + CFI_ADJUST_CFA_OFFSET(-5 * 8); + + vpxord (0 * 64)(%rsi), RA0z, RA0z; + vpxord (1 * 64)(%rsi), RA1z, RA1z; + vpxord (2 * 64)(%rsi), RA2z, RA2z; + vpxord (3 * 64)(%rsi), RA3z, RA3z; + vpxord (4 * 64)(%rsi), RB0z, RB0z; + vpxord (5 * 64)(%rsi), RB1z, RB1z; + vpxord (6 * 64)(%rsi), RB2z, RB2z; + vpxord (7 * 64)(%rsi), RB3z, RB3z; + + vmovdqu32 RA0z, (0 * 64)(%rsi); + vmovdqu32 RA1z, (1 * 64)(%rsi); + vmovdqu32 RA2z, (2 * 64)(%rsi); + vmovdqu32 RA3z, (3 * 64)(%rsi); + vmovdqu32 RB0z, (4 * 64)(%rsi); + vmovdqu32 RB1z, (5 * 64)(%rsi); + vmovdqu32 RB2z, (6 * 64)(%rsi); + vmovdqu32 RB3z, (7 * 64)(%rsi); + + /* Checksum_i = Checksum_{i-1} xor C_i */ + vpternlogd $0x96, RA0z, RA1z, RA2z; + vpternlogd $0x96, RA3z, RB0z, RB1z; + vpternlogd $0x96, RB2z, RB3z, RA2z; + vpxord RA2z, RB1z, RTMP1z; + + vextracti64x4 $1, RTMP1z, RNOT; + vpxor RNOT, RTMP1, RTMP1; + vextracti128 $1, RTMP1, RNOTx; + vpternlogd $0x96, (%r8), RNOTx, RTMP1x; + vmovdqu RTMP1x, (%r8); + + vzeroall; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_sm4_gfni_avx512_ocb_dec_blk32,.-_gcry_sm4_gfni_avx512_ocb_dec_blk32;) + +#endif /*defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT)*/ +#endif /*__x86_64*/ diff --git a/cipher/sm4.c b/cipher/sm4.c index 062a14f4..02c399a9 100644 --- a/cipher/sm4.c +++ b/cipher/sm4.c @@ -65,10 +65,20 @@ # endif #endif +/* USE_GFNI_AVX512 inidicates whether to compile with Intel GFNI/AVX512 code. */ +#undef USE_GFNI_AVX512 +#if defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT) +# if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ + defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) +# define USE_GFNI_AVX512 1 +# endif +#endif + /* Assembly implementations use SystemV ABI, ABI conversion and additional * stack to store XMM6-XMM15 needed on Win64. */ #undef ASM_FUNC_ABI -#if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2) || defined(USE_GFNI_AVX2) +#if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2) || \ + defined(USE_GFNI_AVX2) || defined(USE_GFNI_AVX512) # ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS # define ASM_FUNC_ABI __attribute__((sysv_abi)) # else @@ -125,10 +135,15 @@ static size_t _gcry_sm4_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, static size_t _gcry_sm4_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks); +typedef unsigned int (*crypt_blk1_16_fn_t) (const void *ctx, byte *out, + const byte *in, + unsigned int num_blks); + typedef struct { u32 rkey_enc[32]; u32 rkey_dec[32]; + crypt_blk1_16_fn_t crypt_blk1_16; #ifdef USE_AESNI_AVX unsigned int use_aesni_avx:1; #endif @@ -138,6 +153,9 @@ typedef struct #ifdef USE_GFNI_AVX2 unsigned int use_gfni_avx2:1; #endif +#ifdef USE_GFNI_AVX512 + unsigned int use_gfni_avx512:1; +#endif #ifdef USE_AARCH64_SIMD unsigned int use_aarch64_simd:1; #endif @@ -149,10 +167,6 @@ typedef struct #endif } SM4_context; -typedef unsigned int (*crypt_blk1_16_fn_t) (const void *ctx, byte *out, - const byte *in, - unsigned int num_blks); - static const u32 fk[4] = { 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc @@ -217,6 +231,8 @@ static const u32 ck[] = 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279 }; +static inline crypt_blk1_16_fn_t sm4_get_crypt_blk1_16_fn(SM4_context *ctx); + #ifdef USE_AESNI_AVX extern void _gcry_sm4_aesni_avx_expand_key(const byte *key, u32 *rk_enc, u32 *rk_dec, const u32 *fk, @@ -374,6 +390,86 @@ sm4_gfni_avx2_crypt_blk1_16(const void *rk, byte *out, const byte *in, #endif /* USE_GFNI_AVX2 */ +#ifdef USE_GFNI_AVX512 +extern void _gcry_sm4_gfni_avx512_expand_key(const byte *key, u32 *rk_enc, + u32 *rk_dec, const u32 *fk, + const u32 *ck) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_ctr_enc(const u32 *rk_enc, byte *out, + const byte *in, + byte *ctr) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_cbc_dec(const u32 *rk_dec, byte *out, + const byte *in, + byte *iv) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_cfb_dec(const u32 *rk_enc, byte *out, + const byte *in, + byte *iv) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_ocb_enc(const u32 *rk_enc, + unsigned char *out, + const unsigned char *in, + unsigned char *offset, + unsigned char *checksum, + const u64 Ls[16]) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_ocb_dec(const u32 *rk_dec, + unsigned char *out, + const unsigned char *in, + unsigned char *offset, + unsigned char *checksum, + const u64 Ls[16]) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_ocb_auth(const u32 *rk_enc, + const unsigned char *abuf, + unsigned char *offset, + unsigned char *checksum, + const u64 Ls[16]) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_ctr_enc_blk32(const u32 *rk_enc, byte *out, + const byte *in, + byte *ctr) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_cbc_dec_blk32(const u32 *rk_enc, byte *out, + const byte *in, + byte *iv) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_cfb_dec_blk32(const u32 *rk_enc, byte *out, + const byte *in, + byte *iv) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_ocb_enc_blk32(const u32 *rk_enc, + unsigned char *out, + const unsigned char *in, + unsigned char *offset, + unsigned char *checksum, + const u64 Ls[32]) ASM_FUNC_ABI; + +extern void _gcry_sm4_gfni_avx512_ocb_dec_blk32(const u32 *rk_dec, + unsigned char *out, + const unsigned char *in, + unsigned char *offset, + unsigned char *checksum, + const u64 Ls[32]) ASM_FUNC_ABI; + +extern unsigned int +_gcry_sm4_gfni_avx512_crypt_blk1_16(const u32 *rk, byte *out, const byte *in, + unsigned int num_blks) ASM_FUNC_ABI; + +extern unsigned int +_gcry_sm4_gfni_avx512_crypt_blk32(const u32 *rk, byte *out, + const byte *in) ASM_FUNC_ABI; + +static inline unsigned int +sm4_gfni_avx512_crypt_blk1_16(const void *rk, byte *out, const byte *in, + unsigned int num_blks) +{ + return _gcry_sm4_gfni_avx512_crypt_blk1_16(rk, out, in, num_blks); +} + +#endif /* USE_GFNI_AVX2 */ + #ifdef USE_AARCH64_SIMD extern void _gcry_sm4_aarch64_crypt(const u32 *rk, byte *out, const byte *in, @@ -561,6 +657,15 @@ sm4_expand_key (SM4_context *ctx, const byte *key) u32 rk[4]; int i; +#ifdef USE_GFNI_AVX512 + if (ctx->use_gfni_avx512) + { + _gcry_sm4_gfni_avx512_expand_key (key, ctx->rkey_enc, ctx->rkey_dec, + fk, ck); + return; + } +#endif + #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) { @@ -645,6 +750,9 @@ sm4_setkey (void *context, const byte *key, const unsigned keylen, #ifdef USE_GFNI_AVX2 ctx->use_gfni_avx2 = (hwf & HWF_INTEL_GFNI) && (hwf & HWF_INTEL_AVX2); #endif +#ifdef USE_GFNI_AVX512 + ctx->use_gfni_avx512 = (hwf & HWF_INTEL_GFNI) && (hwf & HWF_INTEL_AVX512); +#endif #ifdef USE_AARCH64_SIMD ctx->use_aarch64_simd = !!(hwf & HWF_ARM_NEON); #endif @@ -670,6 +778,8 @@ sm4_setkey (void *context, const byte *key, const unsigned keylen, } #endif + ctx->crypt_blk1_16 = sm4_get_crypt_blk1_16_fn(ctx); + /* Setup bulk encryption routines. */ memset (bulk_ops, 0, sizeof(*bulk_ops)); bulk_ops->cbc_dec = _gcry_sm4_cbc_dec; @@ -715,6 +825,11 @@ sm4_encrypt (void *context, byte *outbuf, const byte *inbuf) { SM4_context *ctx = context; +#ifdef USE_GFNI_AVX512 + if (ctx->use_gfni_avx512) + return sm4_gfni_avx512_crypt_blk1_16(ctx->rkey_enc, outbuf, inbuf, 1); +#endif + #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) return sm4_gfni_avx2_crypt_blk1_16(ctx->rkey_enc, outbuf, inbuf, 1); @@ -735,6 +850,11 @@ sm4_decrypt (void *context, byte *outbuf, const byte *inbuf) { SM4_context *ctx = context; +#ifdef USE_GFNI_AVX512 + if (ctx->use_gfni_avx512) + return sm4_gfni_avx512_crypt_blk1_16(ctx->rkey_dec, outbuf, inbuf, 1); +#endif + #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) return sm4_gfni_avx2_crypt_blk1_16(ctx->rkey_dec, outbuf, inbuf, 1); @@ -834,6 +954,12 @@ sm4_get_crypt_blk1_16_fn(SM4_context *ctx) { if (0) ; +#ifdef USE_GFNI_AVX512 + else if (ctx->use_gfni_avx512) + { + return &sm4_gfni_avx512_crypt_blk1_16; + } +#endif #ifdef USE_GFNI_AVX2 else if (ctx->use_gfni_avx2) { @@ -890,6 +1016,32 @@ _gcry_sm4_ctr_enc(void *context, unsigned char *ctr, const byte *inbuf = inbuf_arg; int burn_stack_depth = 0; +#ifdef USE_GFNI_AVX512 + if (ctx->use_gfni_avx512) + { + /* Process data in 32 block chunks. */ + while (nblocks >= 32) + { + _gcry_sm4_gfni_avx512_ctr_enc_blk32(ctx->rkey_enc, + outbuf, inbuf, ctr); + + nblocks -= 32; + outbuf += 32 * 16; + inbuf += 32 * 16; + } + + /* Process data in 16 block chunks. */ + if (nblocks >= 16) + { + _gcry_sm4_gfni_avx512_ctr_enc(ctx->rkey_enc, outbuf, inbuf, ctr); + + nblocks -= 16; + outbuf += 16 * 16; + inbuf += 16 * 16; + } + } +#endif + #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) { @@ -982,7 +1134,7 @@ _gcry_sm4_ctr_enc(void *context, unsigned char *ctr, /* Process remaining blocks. */ if (nblocks) { - crypt_blk1_16_fn_t crypt_blk1_16 = sm4_get_crypt_blk1_16_fn(ctx); + crypt_blk1_16_fn_t crypt_blk1_16 = ctx->crypt_blk1_16; byte tmpbuf[16 * 16]; unsigned int tmp_used = 16; size_t nburn; @@ -1011,6 +1163,31 @@ _gcry_sm4_cbc_dec(void *context, unsigned char *iv, const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; +#ifdef USE_GFNI_AVX512 + if (ctx->use_gfni_avx512) + { + /* Process data in 32 block chunks. */ + while (nblocks >= 32) + { + _gcry_sm4_gfni_avx512_cbc_dec_blk32(ctx->rkey_dec, outbuf, inbuf, iv); + + nblocks -= 32; + outbuf += 32 * 16; + inbuf += 32 * 16; + } + + /* Process data in 16 block chunks. */ + if (nblocks >= 16) + { + _gcry_sm4_gfni_avx512_cbc_dec(ctx->rkey_dec, outbuf, inbuf, iv); + + nblocks -= 16; + outbuf += 16 * 16; + inbuf += 16 * 16; + } + } +#endif + #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) { @@ -1103,7 +1280,7 @@ _gcry_sm4_cbc_dec(void *context, unsigned char *iv, /* Process remaining blocks. */ if (nblocks) { - crypt_blk1_16_fn_t crypt_blk1_16 = sm4_get_crypt_blk1_16_fn(ctx); + crypt_blk1_16_fn_t crypt_blk1_16 = ctx->crypt_blk1_16; unsigned char tmpbuf[16 * 16]; unsigned int tmp_used = 16; size_t nburn; @@ -1132,6 +1309,31 @@ _gcry_sm4_cfb_dec(void *context, unsigned char *iv, const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; +#ifdef USE_GFNI_AVX512 + if (ctx->use_gfni_avx512) + { + /* Process data in 32 block chunks. */ + while (nblocks >= 32) + { + _gcry_sm4_gfni_avx512_cfb_dec_blk32(ctx->rkey_enc, outbuf, inbuf, iv); + + nblocks -= 32; + outbuf += 32 * 16; + inbuf += 32 * 16; + } + + /* Process data in 16 block chunks. */ + if (nblocks >= 16) + { + _gcry_sm4_gfni_avx512_cfb_dec(ctx->rkey_enc, outbuf, inbuf, iv); + + nblocks -= 16; + outbuf += 16 * 16; + inbuf += 16 * 16; + } + } +#endif + #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) { @@ -1224,7 +1426,7 @@ _gcry_sm4_cfb_dec(void *context, unsigned char *iv, /* Process remaining blocks. */ if (nblocks) { - crypt_blk1_16_fn_t crypt_blk1_16 = sm4_get_crypt_blk1_16_fn(ctx); + crypt_blk1_16_fn_t crypt_blk1_16 = ctx->crypt_blk1_16; unsigned char tmpbuf[16 * 16]; unsigned int tmp_used = 16; size_t nburn; @@ -1241,6 +1443,52 @@ _gcry_sm4_cfb_dec(void *context, unsigned char *iv, _gcry_burn_stack(burn_stack_depth); } +static unsigned int +sm4_crypt_blk1_32 (const SM4_context *ctx, byte *outbuf, const byte *inbuf, + unsigned int num_blks, const u32 *rk) +{ + unsigned int stack_burn_size = 0; + unsigned int nburn; + + gcry_assert (num_blks <= 32); + +#ifdef USE_GFNI_AVX512 + if (num_blks == 32 && ctx->use_gfni_avx512) + { + return _gcry_sm4_gfni_avx512_crypt_blk32 (rk, outbuf, inbuf); + } +#endif + + do + { + unsigned int curr_blks = num_blks > 16 ? 16 : num_blks; + nburn = ctx->crypt_blk1_16 (rk, outbuf, inbuf, curr_blks); + stack_burn_size = nburn > stack_burn_size ? nburn : stack_burn_size; + outbuf += curr_blks * 16; + inbuf += curr_blks * 16; + num_blks -= curr_blks; + } + while (num_blks > 0); + + return stack_burn_size; +} + +static unsigned int +sm4_encrypt_blk1_32 (const void *context, byte *out, const byte *in, + unsigned int num_blks) +{ + const SM4_context *ctx = context; + return sm4_crypt_blk1_32 (ctx, out, in, num_blks, ctx->rkey_enc); +} + +static unsigned int +sm4_decrypt_blk1_32 (const void *context, byte *out, const byte *in, + unsigned int num_blks) +{ + const SM4_context *ctx = context; + return sm4_crypt_blk1_32 (ctx, out, in, num_blks, ctx->rkey_dec); +} + /* Bulk encryption/decryption of complete blocks in XTS mode. */ static void _gcry_sm4_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, @@ -1254,13 +1502,13 @@ _gcry_sm4_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, /* Process remaining blocks. */ if (nblocks) { - crypt_blk1_16_fn_t crypt_blk1_16 = sm4_get_crypt_blk1_16_fn(ctx); - u32 *rk = encrypt ? ctx->rkey_enc : ctx->rkey_dec; - unsigned char tmpbuf[16 * 16]; + unsigned char tmpbuf[32 * 16]; unsigned int tmp_used = 16; size_t nburn; - nburn = bulk_xts_crypt_128(rk, crypt_blk1_16, outbuf, inbuf, nblocks, + nburn = bulk_xts_crypt_128(ctx, encrypt ? sm4_encrypt_blk1_32 + : sm4_decrypt_blk1_32, + outbuf, inbuf, nblocks, tweak, tmpbuf, sizeof(tmpbuf) / 16, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; @@ -1283,6 +1531,39 @@ _gcry_sm4_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, u64 blkn = c->u_mode.ocb.data_nblocks; int burn_stack_depth = 0; +#ifdef USE_GFNI_AVX512 + if (ctx->use_gfni_avx512) + { + u64 Ls[32]; + u64 *l; + + if (nblocks >= 32) + { + l = bulk_ocb_prepare_L_pointers_array_blk32 (c, Ls, blkn); + + /* Process data in 32 block chunks. */ + while (nblocks >= 32) + { + blkn += 32; + *l = (uintptr_t)(void *)ocb_get_l (c, blkn - blkn % 32); + + if (encrypt) + _gcry_sm4_gfni_avx512_ocb_enc_blk32 (ctx->rkey_enc, outbuf, + inbuf, c->u_iv.iv, + c->u_ctr.ctr, Ls); + else + _gcry_sm4_gfni_avx512_ocb_dec_blk32 (ctx->rkey_dec, outbuf, + inbuf, c->u_iv.iv, + c->u_ctr.ctr, Ls); + + nblocks -= 32; + outbuf += 32 * 16; + inbuf += 32 * 16; + } + } + } +#endif + #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) { @@ -1379,7 +1660,7 @@ _gcry_sm4_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, /* Process remaining blocks. */ if (nblocks) { - crypt_blk1_16_fn_t crypt_blk1_16 = sm4_get_crypt_blk1_16_fn(ctx); + crypt_blk1_16_fn_t crypt_blk1_16 = ctx->crypt_blk1_16; u32 *rk = encrypt ? ctx->rkey_enc : ctx->rkey_dec; unsigned char tmpbuf[16 * 16]; unsigned int tmp_used = 16; @@ -1410,6 +1691,33 @@ _gcry_sm4_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks) u64 blkn = c->u_mode.ocb.aad_nblocks; int burn_stack_depth = 0; +#ifdef USE_GFNI_AVX512 + if (ctx->use_gfni_avx512) + { + u64 Ls[16]; + u64 *l; + + if (nblocks >= 16) + { + l = bulk_ocb_prepare_L_pointers_array_blk16 (c, Ls, blkn); + + /* Process data in 16 block chunks. */ + while (nblocks >= 16) + { + blkn += 16; + *l = (uintptr_t)(void *)ocb_get_l (c, blkn - blkn % 16); + + _gcry_sm4_gfni_avx512_ocb_auth (ctx->rkey_enc, abuf, + c->u_mode.ocb.aad_offset, + c->u_mode.ocb.aad_sum, Ls); + + nblocks -= 16; + abuf += 16 * 16; + } + } + } +#endif + #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) { @@ -1494,7 +1802,7 @@ _gcry_sm4_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks) /* Process remaining blocks. */ if (nblocks) { - crypt_blk1_16_fn_t crypt_blk1_16 = sm4_get_crypt_blk1_16_fn(ctx); + crypt_blk1_16_fn_t crypt_blk1_16 = ctx->crypt_blk1_16; unsigned char tmpbuf[16 * 16]; unsigned int tmp_used = 16; size_t nburn; diff --git a/configure.ac b/configure.ac index b55510d8..34ec058e 100644 --- a/configure.ac +++ b/configure.ac @@ -2952,6 +2952,7 @@ if test "$found" = "1" ; then GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-aesni-avx-amd64.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-aesni-avx2-amd64.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-gfni-avx2-amd64.lo" + GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-gfni-avx512-amd64.lo" ;; aarch64-*-*) # Build with the assembly implementation |