summaryrefslogtreecommitdiff
path: root/lib/liboqs/src/sig/falcon
diff options
context:
space:
mode:
Diffstat (limited to 'lib/liboqs/src/sig/falcon')
-rw-r--r--lib/liboqs/src/sig/falcon/Makefile49
-rw-r--r--lib/liboqs/src/sig/falcon/config.mk17
-rw-r--r--lib/liboqs/src/sig/falcon/falcon.gyp40
-rw-r--r--lib/liboqs/src/sig/falcon/manifest.mn24
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/Makefile49
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/api.h80
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/codec.c555
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/common.c294
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/config.mk17
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fft.c700
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fpr.c1890
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fpr.h253
-rwxr-xr-xlib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/inner.c70
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/inner.h794
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/keygen.c4231
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/manifest.mn32
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/pqclean.c386
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/pqclean_falcon-1024_clean.gyp48
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/rng.c201
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/sign.c1254
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/vrfy.c853
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/Makefile49
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/api.h80
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/codec.c555
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/common.c294
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/config.mk17
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fft.c700
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fpr.c1890
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fpr.h253
-rwxr-xr-xlib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/inner.c70
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/inner.h793
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/keygen.c4231
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/manifest.mn32
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/pqclean.c384
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/pqclean_falcon-512_clean.gyp48
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/rng.c201
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/sign.c1254
-rw-r--r--lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/vrfy.c853
-rw-r--r--lib/liboqs/src/sig/falcon/sig_falcon.h30
-rw-r--r--lib/liboqs/src/sig/falcon/sig_falcon_1024.c90
-rw-r--r--lib/liboqs/src/sig/falcon/sig_falcon_512.c90
41 files changed, 23751 insertions, 0 deletions
diff --git a/lib/liboqs/src/sig/falcon/Makefile b/lib/liboqs/src/sig/falcon/Makefile
new file mode 100644
index 000000000..fe090f3ff
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/Makefile
@@ -0,0 +1,49 @@
+#! gmake
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#######################################################################
+# (1) Include initial platform-independent assignments (MANDATORY). #
+#######################################################################
+
+include manifest.mn
+
+#######################################################################
+# (2) Include "global" configuration information. (OPTIONAL) #
+#######################################################################
+
+USE_GCOV =
+include $(CORE_DEPTH)/coreconf/config.mk
+
+#######################################################################
+# (3) Include "component" configuration information. (OPTIONAL) #
+#######################################################################
+
+
+
+#######################################################################
+# (4) Include "local" platform-dependent assignments (OPTIONAL). #
+#######################################################################
+
+include config.mk
+
+#######################################################################
+# (5) Execute "global" rules. (OPTIONAL) #
+#######################################################################
+
+include $(CORE_DEPTH)/coreconf/rules.mk
+
+#######################################################################
+# (6) Execute "component" rules. (OPTIONAL) #
+#######################################################################
+
+
+
+#######################################################################
+# (7) Execute "local" rules. (OPTIONAL). #
+#######################################################################
+
+WARNING_CFLAGS = $(NULL)
+
diff --git a/lib/liboqs/src/sig/falcon/config.mk b/lib/liboqs/src/sig/falcon/config.mk
new file mode 100644
index 000000000..b28c9ce64
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/config.mk
@@ -0,0 +1,17 @@
+# DO NOT EDIT: generated from config.mk.subdirs.template
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# add fixes for platform integration issues here.
+#
+# liboqs programs expect the public include files to be in oqs/xxxx,
+# So we put liboqs in it's own module, oqs, and point to the dist files
+INCLUDES += -I$(CORE_DEPTH)/lib/liboqs/src/common/pqclean_shims -I$(CORE_DEPTH)/lib/liboqs/src/common/sha3/xkcp_low/KeccakP-1600/plain-64bits
+DEFINES +=
+
+ifeq ($(OS_ARCH), Darwin)
+DEFINES += -DOQS_HAVE_ALIGNED_ALLOC -DOQS_HAVE_MEMALIGN -DOQS_HAVE_POSIX_MEMALIGN
+endif
+
diff --git a/lib/liboqs/src/sig/falcon/falcon.gyp b/lib/liboqs/src/sig/falcon/falcon.gyp
new file mode 100644
index 000000000..73050d6ae
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/falcon.gyp
@@ -0,0 +1,40 @@
+# DO NOT EDIT: generated from subdir.gyp.template
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+{
+ 'includes': [
+ '../../../../../coreconf/config.gypi'
+ ],
+ 'targets': [
+ {
+ 'target_name': 'oqs_src_sig_falcon',
+ 'type': 'static_library',
+ 'sources': [
+ 'sig_falcon_512.c',
+ 'sig_falcon_1024.c',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/exports.gyp:nss_exports'
+ ]
+ }
+ ],
+ 'target_defaults': {
+ 'defines': [
+ ],
+ 'include_dirs': [
+ '<(DEPTH)/lib/liboqs/src/common/pqclean_shims',
+ '<(DEPTH)/lib/liboqs/src/common/sha3/xkcp_low/KeccakP-1600/plain-64bits',
+ ],
+ [ 'OS=="mac"', {
+ 'defines': [
+ 'OQS_HAVE_POSIX_MEMALIGN',
+ 'OQS_HAVE_ALIGNED_ALLOC',
+ 'OQS_HAVE_MEMALIGN'
+ ]
+ }]
+ },
+ 'variables': {
+ 'module': 'oqs'
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/manifest.mn b/lib/liboqs/src/sig/falcon/manifest.mn
new file mode 100644
index 000000000..a0f3f2ee9
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/manifest.mn
@@ -0,0 +1,24 @@
+# DO NOT EDIT: generated from manifest.mn.subdirs.template
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+CORE_DEPTH = ../../../../..
+
+MODULE = oqs
+
+LIBRARY_NAME = oqs_src_sig_falcon
+SHARED_LIBRARY = $(NULL)
+
+CSRCS = \
+ sig_falcon_512.c \
+ sig_falcon_1024.c \
+ $(NULL)
+
+# only add module debugging in opt builds if DEBUG_PKCS11 is set
+ifdef DEBUG_PKCS11
+ DEFINES += -DDEBUG_MODULE
+endif
+
+# This part of the code, including all sub-dirs, can be optimized for size
+export ALLOW_OPT_CODE_SIZE = 1
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/Makefile b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/Makefile
new file mode 100644
index 000000000..fe090f3ff
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/Makefile
@@ -0,0 +1,49 @@
+#! gmake
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#######################################################################
+# (1) Include initial platform-independent assignments (MANDATORY). #
+#######################################################################
+
+include manifest.mn
+
+#######################################################################
+# (2) Include "global" configuration information. (OPTIONAL) #
+#######################################################################
+
+USE_GCOV =
+include $(CORE_DEPTH)/coreconf/config.mk
+
+#######################################################################
+# (3) Include "component" configuration information. (OPTIONAL) #
+#######################################################################
+
+
+
+#######################################################################
+# (4) Include "local" platform-dependent assignments (OPTIONAL). #
+#######################################################################
+
+include config.mk
+
+#######################################################################
+# (5) Execute "global" rules. (OPTIONAL) #
+#######################################################################
+
+include $(CORE_DEPTH)/coreconf/rules.mk
+
+#######################################################################
+# (6) Execute "component" rules. (OPTIONAL) #
+#######################################################################
+
+
+
+#######################################################################
+# (7) Execute "local" rules. (OPTIONAL). #
+#######################################################################
+
+WARNING_CFLAGS = $(NULL)
+
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/api.h b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/api.h
new file mode 100644
index 000000000..7a1c6569c
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/api.h
@@ -0,0 +1,80 @@
+#ifndef PQCLEAN_FALCON1024_CLEAN_API_H
+#define PQCLEAN_FALCON1024_CLEAN_API_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES 2305
+#define PQCLEAN_FALCON1024_CLEAN_CRYPTO_PUBLICKEYBYTES 1793
+#define PQCLEAN_FALCON1024_CLEAN_CRYPTO_BYTES 1330
+
+#define PQCLEAN_FALCON1024_CLEAN_CRYPTO_ALGNAME "Falcon-1024"
+
+/*
+ * Generate a new key pair. Public key goes into pk[], private key in sk[].
+ * Key sizes are exact (in bytes):
+ * public (pk): PQCLEAN_FALCON1024_CLEAN_CRYPTO_PUBLICKEYBYTES
+ * private (sk): PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON1024_CLEAN_crypto_sign_keypair(
+ uint8_t *pk, uint8_t *sk);
+
+/*
+ * Compute a signature on a provided message (m, mlen), with a given
+ * private key (sk). Signature is written in sig[], with length written
+ * into *siglen. Signature length is variable; maximum signature length
+ * (in bytes) is PQCLEAN_FALCON1024_CLEAN_CRYPTO_BYTES.
+ *
+ * sig[], m[] and sk[] may overlap each other arbitrarily.
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON1024_CLEAN_crypto_sign_signature(
+ uint8_t *sig, size_t *siglen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk);
+
+/*
+ * Verify a signature (sig, siglen) on a message (m, mlen) with a given
+ * public key (pk).
+ *
+ * sig[], m[] and pk[] may overlap each other arbitrarily.
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON1024_CLEAN_crypto_sign_verify(
+ const uint8_t *sig, size_t siglen,
+ const uint8_t *m, size_t mlen, const uint8_t *pk);
+
+/*
+ * Compute a signature on a message and pack the signature and message
+ * into a single object, written into sm[]. The length of that output is
+ * written in *smlen; that length may be larger than the message length
+ * (mlen) by up to PQCLEAN_FALCON1024_CLEAN_CRYPTO_BYTES.
+ *
+ * sm[] and m[] may overlap each other arbitrarily; however, sm[] shall
+ * not overlap with sk[].
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON1024_CLEAN_crypto_sign(
+ uint8_t *sm, size_t *smlen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk);
+
+/*
+ * Open a signed message object (sm, smlen) and verify the signature;
+ * on success, the message itself is written into m[] and its length
+ * into *mlen. The message is shorter than the signed message object,
+ * but the size difference depends on the signature value; the difference
+ * may range up to PQCLEAN_FALCON1024_CLEAN_CRYPTO_BYTES.
+ *
+ * m[], sm[] and pk[] may overlap each other arbitrarily.
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON1024_CLEAN_crypto_sign_open(
+ uint8_t *m, size_t *mlen,
+ const uint8_t *sm, size_t smlen, const uint8_t *pk);
+
+#endif
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/codec.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/codec.c
new file mode 100644
index 000000000..c5ab4938b
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/codec.c
@@ -0,0 +1,555 @@
+#include "inner.h"
+
+/*
+ * Encoding/decoding of keys and signatures.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON1024_CLEAN_modq_encode(
+ void *out, size_t max_out_len,
+ const uint16_t *x, unsigned logn) {
+ size_t n, out_len, u;
+ uint8_t *buf;
+ uint32_t acc;
+ int acc_len;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ if (x[u] >= 12289) {
+ return 0;
+ }
+ }
+ out_len = ((n * 14) + 7) >> 3;
+ if (out == NULL) {
+ return out_len;
+ }
+ if (out_len > max_out_len) {
+ return 0;
+ }
+ buf = out;
+ acc = 0;
+ acc_len = 0;
+ for (u = 0; u < n; u ++) {
+ acc = (acc << 14) | x[u];
+ acc_len += 14;
+ while (acc_len >= 8) {
+ acc_len -= 8;
+ *buf ++ = (uint8_t)(acc >> acc_len);
+ }
+ }
+ if (acc_len > 0) {
+ *buf = (uint8_t)(acc << (8 - acc_len));
+ }
+ return out_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON1024_CLEAN_modq_decode(
+ uint16_t *x, unsigned logn,
+ const void *in, size_t max_in_len) {
+ size_t n, in_len, u;
+ const uint8_t *buf;
+ uint32_t acc;
+ int acc_len;
+
+ n = (size_t)1 << logn;
+ in_len = ((n * 14) + 7) >> 3;
+ if (in_len > max_in_len) {
+ return 0;
+ }
+ buf = in;
+ acc = 0;
+ acc_len = 0;
+ u = 0;
+ while (u < n) {
+ acc = (acc << 8) | (*buf ++);
+ acc_len += 8;
+ if (acc_len >= 14) {
+ unsigned w;
+
+ acc_len -= 14;
+ w = (acc >> acc_len) & 0x3FFF;
+ if (w >= 12289) {
+ return 0;
+ }
+ x[u ++] = (uint16_t)w;
+ }
+ }
+ if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) {
+ return 0;
+ }
+ return in_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON1024_CLEAN_trim_i16_encode(
+ void *out, size_t max_out_len,
+ const int16_t *x, unsigned logn, unsigned bits) {
+ size_t n, u, out_len;
+ int minv, maxv;
+ uint8_t *buf;
+ uint32_t acc, mask;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ maxv = (1 << (bits - 1)) - 1;
+ minv = -maxv;
+ for (u = 0; u < n; u ++) {
+ if (x[u] < minv || x[u] > maxv) {
+ return 0;
+ }
+ }
+ out_len = ((n * bits) + 7) >> 3;
+ if (out == NULL) {
+ return out_len;
+ }
+ if (out_len > max_out_len) {
+ return 0;
+ }
+ buf = out;
+ acc = 0;
+ acc_len = 0;
+ mask = ((uint32_t)1 << bits) - 1;
+ for (u = 0; u < n; u ++) {
+ acc = (acc << bits) | ((uint16_t)x[u] & mask);
+ acc_len += bits;
+ while (acc_len >= 8) {
+ acc_len -= 8;
+ *buf ++ = (uint8_t)(acc >> acc_len);
+ }
+ }
+ if (acc_len > 0) {
+ *buf ++ = (uint8_t)(acc << (8 - acc_len));
+ }
+ return out_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON1024_CLEAN_trim_i16_decode(
+ int16_t *x, unsigned logn, unsigned bits,
+ const void *in, size_t max_in_len) {
+ size_t n, in_len;
+ const uint8_t *buf;
+ size_t u;
+ uint32_t acc, mask1, mask2;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ in_len = ((n * bits) + 7) >> 3;
+ if (in_len > max_in_len) {
+ return 0;
+ }
+ buf = in;
+ u = 0;
+ acc = 0;
+ acc_len = 0;
+ mask1 = ((uint32_t)1 << bits) - 1;
+ mask2 = (uint32_t)1 << (bits - 1);
+ while (u < n) {
+ acc = (acc << 8) | *buf ++;
+ acc_len += 8;
+ while (acc_len >= bits && u < n) {
+ uint32_t w;
+
+ acc_len -= bits;
+ w = (acc >> acc_len) & mask1;
+ w |= -(w & mask2);
+ if (w == -mask2) {
+ /*
+ * The -2^(bits-1) value is forbidden.
+ */
+ return 0;
+ }
+ w |= -(w & mask2);
+ x[u ++] = (int16_t) * (int32_t *)&w;
+ }
+ }
+ if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) {
+ /*
+ * Extra bits in the last byte must be zero.
+ */
+ return 0;
+ }
+ return in_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON1024_CLEAN_trim_i8_encode(
+ void *out, size_t max_out_len,
+ const int8_t *x, unsigned logn, unsigned bits) {
+ size_t n, u, out_len;
+ int minv, maxv;
+ uint8_t *buf;
+ uint32_t acc, mask;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ maxv = (1 << (bits - 1)) - 1;
+ minv = -maxv;
+ for (u = 0; u < n; u ++) {
+ if (x[u] < minv || x[u] > maxv) {
+ return 0;
+ }
+ }
+ out_len = ((n * bits) + 7) >> 3;
+ if (out == NULL) {
+ return out_len;
+ }
+ if (out_len > max_out_len) {
+ return 0;
+ }
+ buf = out;
+ acc = 0;
+ acc_len = 0;
+ mask = ((uint32_t)1 << bits) - 1;
+ for (u = 0; u < n; u ++) {
+ acc = (acc << bits) | ((uint8_t)x[u] & mask);
+ acc_len += bits;
+ while (acc_len >= 8) {
+ acc_len -= 8;
+ *buf ++ = (uint8_t)(acc >> acc_len);
+ }
+ }
+ if (acc_len > 0) {
+ *buf ++ = (uint8_t)(acc << (8 - acc_len));
+ }
+ return out_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON1024_CLEAN_trim_i8_decode(
+ int8_t *x, unsigned logn, unsigned bits,
+ const void *in, size_t max_in_len) {
+ size_t n, in_len;
+ const uint8_t *buf;
+ size_t u;
+ uint32_t acc, mask1, mask2;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ in_len = ((n * bits) + 7) >> 3;
+ if (in_len > max_in_len) {
+ return 0;
+ }
+ buf = in;
+ u = 0;
+ acc = 0;
+ acc_len = 0;
+ mask1 = ((uint32_t)1 << bits) - 1;
+ mask2 = (uint32_t)1 << (bits - 1);
+ while (u < n) {
+ acc = (acc << 8) | *buf ++;
+ acc_len += 8;
+ while (acc_len >= bits && u < n) {
+ uint32_t w;
+
+ acc_len -= bits;
+ w = (acc >> acc_len) & mask1;
+ w |= -(w & mask2);
+ if (w == -mask2) {
+ /*
+ * The -2^(bits-1) value is forbidden.
+ */
+ return 0;
+ }
+ x[u ++] = (int8_t) * (int32_t *)&w;
+ }
+ }
+ if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) {
+ /*
+ * Extra bits in the last byte must be zero.
+ */
+ return 0;
+ }
+ return in_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON1024_CLEAN_comp_encode(
+ void *out, size_t max_out_len,
+ const int16_t *x, unsigned logn) {
+ uint8_t *buf;
+ size_t n, u, v;
+ uint32_t acc;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ buf = out;
+
+ /*
+ * Make sure that all values are within the -2047..+2047 range.
+ */
+ for (u = 0; u < n; u ++) {
+ if (x[u] < -2047 || x[u] > +2047) {
+ return 0;
+ }
+ }
+
+ acc = 0;
+ acc_len = 0;
+ v = 0;
+ for (u = 0; u < n; u ++) {
+ int t;
+ unsigned w;
+
+ /*
+ * Get sign and absolute value of next integer; push the
+ * sign bit.
+ */
+ acc <<= 1;
+ t = x[u];
+ if (t < 0) {
+ t = -t;
+ acc |= 1;
+ }
+ w = (unsigned)t;
+
+ /*
+ * Push the low 7 bits of the absolute value.
+ */
+ acc <<= 7;
+ acc |= w & 127u;
+ w >>= 7;
+
+ /*
+ * We pushed exactly 8 bits.
+ */
+ acc_len += 8;
+
+ /*
+ * Push as many zeros as necessary, then a one. Since the
+ * absolute value is at most 2047, w can only range up to
+ * 15 at this point, thus we will add at most 16 bits
+ * here. With the 8 bits above and possibly up to 7 bits
+ * from previous iterations, we may go up to 31 bits, which
+ * will fit in the accumulator, which is an uint32_t.
+ */
+ acc <<= (w + 1);
+ acc |= 1;
+ acc_len += w + 1;
+
+ /*
+ * Produce all full bytes.
+ */
+ while (acc_len >= 8) {
+ acc_len -= 8;
+ if (buf != NULL) {
+ if (v >= max_out_len) {
+ return 0;
+ }
+ buf[v] = (uint8_t)(acc >> acc_len);
+ }
+ v ++;
+ }
+ }
+
+ /*
+ * Flush remaining bits (if any).
+ */
+ if (acc_len > 0) {
+ if (buf != NULL) {
+ if (v >= max_out_len) {
+ return 0;
+ }
+ buf[v] = (uint8_t)(acc << (8 - acc_len));
+ }
+ v ++;
+ }
+
+ return v;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON1024_CLEAN_comp_decode(
+ int16_t *x, unsigned logn,
+ const void *in, size_t max_in_len) {
+ const uint8_t *buf;
+ size_t n, u, v;
+ uint32_t acc;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ buf = in;
+ acc = 0;
+ acc_len = 0;
+ v = 0;
+ for (u = 0; u < n; u ++) {
+ unsigned b, s, m;
+
+ /*
+ * Get next eight bits: sign and low seven bits of the
+ * absolute value.
+ */
+ if (v >= max_in_len) {
+ return 0;
+ }
+ acc = (acc << 8) | (uint32_t)buf[v ++];
+ b = acc >> acc_len;
+ s = b & 128;
+ m = b & 127;
+
+ /*
+ * Get next bits until a 1 is reached.
+ */
+ for (;;) {
+ if (acc_len == 0) {
+ if (v >= max_in_len) {
+ return 0;
+ }
+ acc = (acc << 8) | (uint32_t)buf[v ++];
+ acc_len = 8;
+ }
+ acc_len --;
+ if (((acc >> acc_len) & 1) != 0) {
+ break;
+ }
+ m += 128;
+ if (m > 2047) {
+ return 0;
+ }
+ }
+ x[u] = (int16_t) m;
+ if (s) {
+ x[u] = (int16_t) - x[u];
+ }
+ }
+ return v;
+}
+
+/*
+ * Key elements and signatures are polynomials with small integer
+ * coefficients. Here are some statistics gathered over many
+ * generated key pairs (10000 or more for each degree):
+ *
+ * log(n) n max(f,g) std(f,g) max(F,G) std(F,G)
+ * 1 2 129 56.31 143 60.02
+ * 2 4 123 40.93 160 46.52
+ * 3 8 97 28.97 159 38.01
+ * 4 16 100 21.48 154 32.50
+ * 5 32 71 15.41 151 29.36
+ * 6 64 59 11.07 138 27.77
+ * 7 128 39 7.91 144 27.00
+ * 8 256 32 5.63 148 26.61
+ * 9 512 22 4.00 137 26.46
+ * 10 1024 15 2.84 146 26.41
+ *
+ * We want a compact storage format for private key, and, as part of
+ * key generation, we are allowed to reject some keys which would
+ * otherwise be fine (this does not induce any noticeable vulnerability
+ * as long as we reject only a small proportion of possible keys).
+ * Hence, we enforce at key generation time maximum values for the
+ * elements of f, g, F and G, so that their encoding can be expressed
+ * in fixed-width values. Limits have been chosen so that generated
+ * keys are almost always within bounds, thus not impacting neither
+ * security or performance.
+ *
+ * IMPORTANT: the code assumes that all coefficients of f, g, F and G
+ * ultimately fit in the -127..+127 range. Thus, none of the elements
+ * of max_fg_bits[] and max_FG_bits[] shall be greater than 8.
+ */
+
+const uint8_t PQCLEAN_FALCON1024_CLEAN_max_fg_bits[] = {
+ 0, /* unused */
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 7,
+ 7,
+ 6,
+ 6,
+ 5
+};
+
+const uint8_t PQCLEAN_FALCON1024_CLEAN_max_FG_bits[] = {
+ 0, /* unused */
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8
+};
+
+/*
+ * When generating a new key pair, we can always reject keys which
+ * feature an abnormally large coefficient. This can also be done for
+ * signatures, albeit with some care: in case the signature process is
+ * used in a derandomized setup (explicitly seeded with the message and
+ * private key), we have to follow the specification faithfully, and the
+ * specification only enforces a limit on the L2 norm of the signature
+ * vector. The limit on the L2 norm implies that the absolute value of
+ * a coefficient of the signature cannot be more than the following:
+ *
+ * log(n) n max sig coeff (theoretical)
+ * 1 2 412
+ * 2 4 583
+ * 3 8 824
+ * 4 16 1166
+ * 5 32 1649
+ * 6 64 2332
+ * 7 128 3299
+ * 8 256 4665
+ * 9 512 6598
+ * 10 1024 9331
+ *
+ * However, the largest observed signature coefficients during our
+ * experiments was 1077 (in absolute value), hence we can assume that,
+ * with overwhelming probability, signature coefficients will fit
+ * in -2047..2047, i.e. 12 bits.
+ */
+
+const uint8_t PQCLEAN_FALCON1024_CLEAN_max_sig_bits[] = {
+ 0, /* unused */
+ 10,
+ 11,
+ 11,
+ 12,
+ 12,
+ 12,
+ 12,
+ 12,
+ 12,
+ 12
+};
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/common.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/common.c
new file mode 100644
index 000000000..2e3005b2b
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/common.c
@@ -0,0 +1,294 @@
+#include "inner.h"
+
+/*
+ * Support functions for signatures (hash-to-point, norm).
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_hash_to_point_vartime(
+ inner_shake256_context *sc,
+ uint16_t *x, unsigned logn) {
+ /*
+ * This is the straightforward per-the-spec implementation. It
+ * is not constant-time, thus it might reveal information on the
+ * plaintext (at least, enough to check the plaintext against a
+ * list of potential plaintexts) in a scenario where the
+ * attacker does not have access to the signature value or to
+ * the public key, but knows the nonce (without knowledge of the
+ * nonce, the hashed output cannot be matched against potential
+ * plaintexts).
+ */
+ size_t n;
+
+ n = (size_t)1 << logn;
+ while (n > 0) {
+ uint8_t buf[2];
+ uint32_t w;
+
+ inner_shake256_extract(sc, (void *)buf, sizeof buf);
+ w = ((unsigned)buf[0] << 8) | (unsigned)buf[1];
+ if (w < 61445) {
+ while (w >= 12289) {
+ w -= 12289;
+ }
+ *x ++ = (uint16_t)w;
+ n --;
+ }
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_hash_to_point_ct(
+ inner_shake256_context *sc,
+ uint16_t *x, unsigned logn, uint8_t *tmp) {
+ /*
+ * Each 16-bit sample is a value in 0..65535. The value is
+ * kept if it falls in 0..61444 (because 61445 = 5*12289)
+ * and rejected otherwise; thus, each sample has probability
+ * about 0.93758 of being selected.
+ *
+ * We want to oversample enough to be sure that we will
+ * have enough values with probability at least 1 - 2^(-256).
+ * Depending on degree N, this leads to the following
+ * required oversampling:
+ *
+ * logn n oversampling
+ * 1 2 65
+ * 2 4 67
+ * 3 8 71
+ * 4 16 77
+ * 5 32 86
+ * 6 64 100
+ * 7 128 122
+ * 8 256 154
+ * 9 512 205
+ * 10 1024 287
+ *
+ * If logn >= 7, then the provided temporary buffer is large
+ * enough. Otherwise, we use a stack buffer of 63 entries
+ * (i.e. 126 bytes) for the values that do not fit in tmp[].
+ */
+
+ static const uint16_t overtab[] = {
+ 0, /* unused */
+ 65,
+ 67,
+ 71,
+ 77,
+ 86,
+ 100,
+ 122,
+ 154,
+ 205,
+ 287
+ };
+
+ unsigned n, n2, u, m, p, over;
+ uint16_t *tt1, tt2[63];
+
+ /*
+ * We first generate m 16-bit value. Values 0..n-1 go to x[].
+ * Values n..2*n-1 go to tt1[]. Values 2*n and later go to tt2[].
+ * We also reduce modulo q the values; rejected values are set
+ * to 0xFFFF.
+ */
+ n = 1U << logn;
+ n2 = n << 1;
+ over = overtab[logn];
+ m = n + over;
+ tt1 = (uint16_t *)tmp;
+ for (u = 0; u < m; u ++) {
+ uint8_t buf[2];
+ uint32_t w, wr;
+
+ inner_shake256_extract(sc, buf, sizeof buf);
+ w = ((uint32_t)buf[0] << 8) | (uint32_t)buf[1];
+ wr = w - ((uint32_t)24578 & (((w - 24578) >> 31) - 1));
+ wr = wr - ((uint32_t)24578 & (((wr - 24578) >> 31) - 1));
+ wr = wr - ((uint32_t)12289 & (((wr - 12289) >> 31) - 1));
+ wr |= ((w - 61445) >> 31) - 1;
+ if (u < n) {
+ x[u] = (uint16_t)wr;
+ } else if (u < n2) {
+ tt1[u - n] = (uint16_t)wr;
+ } else {
+ tt2[u - n2] = (uint16_t)wr;
+ }
+ }
+
+ /*
+ * Now we must "squeeze out" the invalid values. We do this in
+ * a logarithmic sequence of passes; each pass computes where a
+ * value should go, and moves it down by 'p' slots if necessary,
+ * where 'p' uses an increasing powers-of-two scale. It can be
+ * shown that in all cases where the loop decides that a value
+ * has to be moved down by p slots, the destination slot is
+ * "free" (i.e. contains an invalid value).
+ */
+ for (p = 1; p <= over; p <<= 1) {
+ unsigned v;
+
+ /*
+ * In the loop below:
+ *
+ * - v contains the index of the final destination of
+ * the value; it is recomputed dynamically based on
+ * whether values are valid or not.
+ *
+ * - u is the index of the value we consider ("source");
+ * its address is s.
+ *
+ * - The loop may swap the value with the one at index
+ * u-p. The address of the swap destination is d.
+ */
+ v = 0;
+ for (u = 0; u < m; u ++) {
+ uint16_t *s, *d;
+ unsigned j, sv, dv, mk;
+
+ if (u < n) {
+ s = &x[u];
+ } else if (u < n2) {
+ s = &tt1[u - n];
+ } else {
+ s = &tt2[u - n2];
+ }
+ sv = *s;
+
+ /*
+ * The value in sv should ultimately go to
+ * address v, i.e. jump back by u-v slots.
+ */
+ j = u - v;
+
+ /*
+ * We increment v for the next iteration, but
+ * only if the source value is valid. The mask
+ * 'mk' is -1 if the value is valid, 0 otherwise,
+ * so we _subtract_ mk.
+ */
+ mk = (sv >> 15) - 1U;
+ v -= mk;
+
+ /*
+ * In this loop we consider jumps by p slots; if
+ * u < p then there is nothing more to do.
+ */
+ if (u < p) {
+ continue;
+ }
+
+ /*
+ * Destination for the swap: value at address u-p.
+ */
+ if ((u - p) < n) {
+ d = &x[u - p];
+ } else if ((u - p) < n2) {
+ d = &tt1[(u - p) - n];
+ } else {
+ d = &tt2[(u - p) - n2];
+ }
+ dv = *d;
+
+ /*
+ * The swap should be performed only if the source
+ * is valid AND the jump j has its 'p' bit set.
+ */
+ mk &= -(((j & p) + 0x1FF) >> 9);
+
+ *s = (uint16_t)(sv ^ (mk & (sv ^ dv)));
+ *d = (uint16_t)(dv ^ (mk & (sv ^ dv)));
+ }
+ }
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON1024_CLEAN_is_short(
+ const int16_t *s1, const int16_t *s2, unsigned logn) {
+ /*
+ * We use the l2-norm. Code below uses only 32-bit operations to
+ * compute the square of the norm with saturation to 2^32-1 if
+ * the value exceeds 2^31-1.
+ */
+ size_t n, u;
+ uint32_t s, ng;
+
+ n = (size_t)1 << logn;
+ s = 0;
+ ng = 0;
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = s1[u];
+ s += (uint32_t)(z * z);
+ ng |= s;
+ z = s2[u];
+ s += (uint32_t)(z * z);
+ ng |= s;
+ }
+ s |= -(ng >> 31);
+
+ /*
+ * Acceptance bound on the l2-norm is:
+ * 1.2*1.55*sqrt(q)*sqrt(2*N)
+ * Value 7085 is floor((1.2^2)*(1.55^2)*2*1024).
+ */
+ return s < (((uint32_t)7085 * (uint32_t)12289) >> (10 - logn));
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON1024_CLEAN_is_short_half(
+ uint32_t sqn, const int16_t *s2, unsigned logn) {
+ size_t n, u;
+ uint32_t ng;
+
+ n = (size_t)1 << logn;
+ ng = -(sqn >> 31);
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = s2[u];
+ sqn += (uint32_t)(z * z);
+ ng |= sqn;
+ }
+ sqn |= -(ng >> 31);
+
+ /*
+ * Acceptance bound on the l2-norm is:
+ * 1.2*1.55*sqrt(q)*sqrt(2*N)
+ * Value 7085 is floor((1.2^2)*(1.55^2)*2*1024).
+ */
+ return sqn < (((uint32_t)7085 * (uint32_t)12289) >> (10 - logn));
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/config.mk b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/config.mk
new file mode 100644
index 000000000..b28c9ce64
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/config.mk
@@ -0,0 +1,17 @@
+# DO NOT EDIT: generated from config.mk.subdirs.template
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# add fixes for platform integration issues here.
+#
+# liboqs programs expect the public include files to be in oqs/xxxx,
+# So we put liboqs in it's own module, oqs, and point to the dist files
+INCLUDES += -I$(CORE_DEPTH)/lib/liboqs/src/common/pqclean_shims -I$(CORE_DEPTH)/lib/liboqs/src/common/sha3/xkcp_low/KeccakP-1600/plain-64bits
+DEFINES +=
+
+ifeq ($(OS_ARCH), Darwin)
+DEFINES += -DOQS_HAVE_ALIGNED_ALLOC -DOQS_HAVE_MEMALIGN -DOQS_HAVE_POSIX_MEMALIGN
+endif
+
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fft.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fft.c
new file mode 100644
index 000000000..a25bac4ec
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fft.c
@@ -0,0 +1,700 @@
+#include "inner.h"
+
+/*
+ * FFT code.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/*
+ * Rules for complex number macros:
+ * --------------------------------
+ *
+ * Operand order is: destination, source1, source2...
+ *
+ * Each operand is a real and an imaginary part.
+ *
+ * All overlaps are allowed.
+ */
+
+/*
+ * Addition of two complex numbers (d = a + b).
+ */
+#define FPC_ADD(d_re, d_im, a_re, a_im, b_re, b_im) do { \
+ fpr fpct_re, fpct_im; \
+ fpct_re = fpr_add(a_re, b_re); \
+ fpct_im = fpr_add(a_im, b_im); \
+ (d_re) = fpct_re; \
+ (d_im) = fpct_im; \
+ } while (0)
+
+/*
+ * Subtraction of two complex numbers (d = a - b).
+ */
+#define FPC_SUB(d_re, d_im, a_re, a_im, b_re, b_im) do { \
+ fpr fpct_re, fpct_im; \
+ fpct_re = fpr_sub(a_re, b_re); \
+ fpct_im = fpr_sub(a_im, b_im); \
+ (d_re) = fpct_re; \
+ (d_im) = fpct_im; \
+ } while (0)
+
+/*
+ * Multplication of two complex numbers (d = a * b).
+ */
+#define FPC_MUL(d_re, d_im, a_re, a_im, b_re, b_im) do { \
+ fpr fpct_a_re, fpct_a_im; \
+ fpr fpct_b_re, fpct_b_im; \
+ fpr fpct_d_re, fpct_d_im; \
+ fpct_a_re = (a_re); \
+ fpct_a_im = (a_im); \
+ fpct_b_re = (b_re); \
+ fpct_b_im = (b_im); \
+ fpct_d_re = fpr_sub( \
+ fpr_mul(fpct_a_re, fpct_b_re), \
+ fpr_mul(fpct_a_im, fpct_b_im)); \
+ fpct_d_im = fpr_add( \
+ fpr_mul(fpct_a_re, fpct_b_im), \
+ fpr_mul(fpct_a_im, fpct_b_re)); \
+ (d_re) = fpct_d_re; \
+ (d_im) = fpct_d_im; \
+ } while (0)
+
+/*
+ * Squaring of a complex number (d = a * a).
+ */
+#define FPC_SQR(d_re, d_im, a_re, a_im) do { \
+ fpr fpct_a_re, fpct_a_im; \
+ fpr fpct_d_re, fpct_d_im; \
+ fpct_a_re = (a_re); \
+ fpct_a_im = (a_im); \
+ fpct_d_re = fpr_sub(fpr_sqr(fpct_a_re), fpr_sqr(fpct_a_im)); \
+ fpct_d_im = fpr_double(fpr_mul(fpct_a_re, fpct_a_im)); \
+ (d_re) = fpct_d_re; \
+ (d_im) = fpct_d_im; \
+ } while (0)
+
+/*
+ * Inversion of a complex number (d = 1 / a).
+ */
+#define FPC_INV(d_re, d_im, a_re, a_im) do { \
+ fpr fpct_a_re, fpct_a_im; \
+ fpr fpct_d_re, fpct_d_im; \
+ fpr fpct_m; \
+ fpct_a_re = (a_re); \
+ fpct_a_im = (a_im); \
+ fpct_m = fpr_add(fpr_sqr(fpct_a_re), fpr_sqr(fpct_a_im)); \
+ fpct_m = fpr_inv(fpct_m); \
+ fpct_d_re = fpr_mul(fpct_a_re, fpct_m); \
+ fpct_d_im = fpr_mul(fpr_neg(fpct_a_im), fpct_m); \
+ (d_re) = fpct_d_re; \
+ (d_im) = fpct_d_im; \
+ } while (0)
+
+/*
+ * Division of complex numbers (d = a / b).
+ */
+#define FPC_DIV(d_re, d_im, a_re, a_im, b_re, b_im) do { \
+ fpr fpct_a_re, fpct_a_im; \
+ fpr fpct_b_re, fpct_b_im; \
+ fpr fpct_d_re, fpct_d_im; \
+ fpr fpct_m; \
+ fpct_a_re = (a_re); \
+ fpct_a_im = (a_im); \
+ fpct_b_re = (b_re); \
+ fpct_b_im = (b_im); \
+ fpct_m = fpr_add(fpr_sqr(fpct_b_re), fpr_sqr(fpct_b_im)); \
+ fpct_m = fpr_inv(fpct_m); \
+ fpct_b_re = fpr_mul(fpct_b_re, fpct_m); \
+ fpct_b_im = fpr_mul(fpr_neg(fpct_b_im), fpct_m); \
+ fpct_d_re = fpr_sub( \
+ fpr_mul(fpct_a_re, fpct_b_re), \
+ fpr_mul(fpct_a_im, fpct_b_im)); \
+ fpct_d_im = fpr_add( \
+ fpr_mul(fpct_a_re, fpct_b_im), \
+ fpr_mul(fpct_a_im, fpct_b_re)); \
+ (d_re) = fpct_d_re; \
+ (d_im) = fpct_d_im; \
+ } while (0)
+
+/*
+ * Let w = exp(i*pi/N); w is a primitive 2N-th root of 1. We define the
+ * values w_j = w^(2j+1) for all j from 0 to N-1: these are the roots
+ * of X^N+1 in the field of complex numbers. A crucial property is that
+ * w_{N-1-j} = conj(w_j) = 1/w_j for all j.
+ *
+ * FFT representation of a polynomial f (taken modulo X^N+1) is the
+ * set of values f(w_j). Since f is real, conj(f(w_j)) = f(conj(w_j)),
+ * thus f(w_{N-1-j}) = conj(f(w_j)). We thus store only half the values,
+ * for j = 0 to N/2-1; the other half can be recomputed easily when (if)
+ * needed. A consequence is that FFT representation has the same size
+ * as normal representation: N/2 complex numbers use N real numbers (each
+ * complex number is the combination of a real and an imaginary part).
+ *
+ * We use a specific ordering which makes computations easier. Let rev()
+ * be the bit-reversal function over log(N) bits. For j in 0..N/2-1, we
+ * store the real and imaginary parts of f(w_j) in slots:
+ *
+ * Re(f(w_j)) -> slot rev(j)/2
+ * Im(f(w_j)) -> slot rev(j)/2+N/2
+ *
+ * (Note that rev(j) is even for j < N/2.)
+ */
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_FFT(fpr *f, unsigned logn) {
+ /*
+ * FFT algorithm in bit-reversal order uses the following
+ * iterative algorithm:
+ *
+ * t = N
+ * for m = 1; m < N; m *= 2:
+ * ht = t/2
+ * for i1 = 0; i1 < m; i1 ++:
+ * j1 = i1 * t
+ * s = GM[m + i1]
+ * for j = j1; j < (j1 + ht); j ++:
+ * x = f[j]
+ * y = s * f[j + ht]
+ * f[j] = x + y
+ * f[j + ht] = x - y
+ * t = ht
+ *
+ * GM[k] contains w^rev(k) for primitive root w = exp(i*pi/N).
+ *
+ * In the description above, f[] is supposed to contain complex
+ * numbers. In our in-memory representation, the real and
+ * imaginary parts of f[k] are in array slots k and k+N/2.
+ *
+ * We only keep the first half of the complex numbers. We can
+ * see that after the first iteration, the first and second halves
+ * of the array of complex numbers have separate lives, so we
+ * simply ignore the second part.
+ */
+
+ unsigned u;
+ size_t t, n, hn, m;
+
+ /*
+ * First iteration: compute f[j] + i * f[j+N/2] for all j < N/2
+ * (because GM[1] = w^rev(1) = w^(N/2) = i).
+ * In our chosen representation, this is a no-op: everything is
+ * already where it should be.
+ */
+
+ /*
+ * Subsequent iterations are truncated to use only the first
+ * half of values.
+ */
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ t = hn;
+ for (u = 1, m = 2; u < logn; u ++, m <<= 1) {
+ size_t ht, hm, i1, j1;
+
+ ht = t >> 1;
+ hm = m >> 1;
+ for (i1 = 0, j1 = 0; i1 < hm; i1 ++, j1 += t) {
+ size_t j, j2;
+
+ j2 = j1 + ht;
+ fpr s_re, s_im;
+
+ s_re = fpr_gm_tab[((m + i1) << 1) + 0];
+ s_im = fpr_gm_tab[((m + i1) << 1) + 1];
+ for (j = j1; j < j2; j ++) {
+ fpr x_re, x_im, y_re, y_im;
+
+ x_re = f[j];
+ x_im = f[j + hn];
+ y_re = f[j + ht];
+ y_im = f[j + ht + hn];
+ FPC_MUL(y_re, y_im, y_re, y_im, s_re, s_im);
+ FPC_ADD(f[j], f[j + hn],
+ x_re, x_im, y_re, y_im);
+ FPC_SUB(f[j + ht], f[j + ht + hn],
+ x_re, x_im, y_re, y_im);
+ }
+ }
+ t = ht;
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_iFFT(fpr *f, unsigned logn) {
+ /*
+ * Inverse FFT algorithm in bit-reversal order uses the following
+ * iterative algorithm:
+ *
+ * t = 1
+ * for m = N; m > 1; m /= 2:
+ * hm = m/2
+ * dt = t*2
+ * for i1 = 0; i1 < hm; i1 ++:
+ * j1 = i1 * dt
+ * s = iGM[hm + i1]
+ * for j = j1; j < (j1 + t); j ++:
+ * x = f[j]
+ * y = f[j + t]
+ * f[j] = x + y
+ * f[j + t] = s * (x - y)
+ * t = dt
+ * for i1 = 0; i1 < N; i1 ++:
+ * f[i1] = f[i1] / N
+ *
+ * iGM[k] contains (1/w)^rev(k) for primitive root w = exp(i*pi/N)
+ * (actually, iGM[k] = 1/GM[k] = conj(GM[k])).
+ *
+ * In the main loop (not counting the final division loop), in
+ * all iterations except the last, the first and second half of f[]
+ * (as an array of complex numbers) are separate. In our chosen
+ * representation, we do not keep the second half.
+ *
+ * The last iteration recombines the recomputed half with the
+ * implicit half, and should yield only real numbers since the
+ * target polynomial is real; moreover, s = i at that step.
+ * Thus, when considering x and y:
+ * y = conj(x) since the final f[j] must be real
+ * Therefore, f[j] is filled with 2*Re(x), and f[j + t] is
+ * filled with 2*Im(x).
+ * But we already have Re(x) and Im(x) in array slots j and j+t
+ * in our chosen representation. That last iteration is thus a
+ * simple doubling of the values in all the array.
+ *
+ * We make the last iteration a no-op by tweaking the final
+ * division into a division by N/2, not N.
+ */
+ size_t u, n, hn, t, m;
+
+ n = (size_t)1 << logn;
+ t = 1;
+ m = n;
+ hn = n >> 1;
+ for (u = logn; u > 1; u --) {
+ size_t hm, dt, i1, j1;
+
+ hm = m >> 1;
+ dt = t << 1;
+ for (i1 = 0, j1 = 0; j1 < hn; i1 ++, j1 += dt) {
+ size_t j, j2;
+
+ j2 = j1 + t;
+ fpr s_re, s_im;
+
+ s_re = fpr_gm_tab[((hm + i1) << 1) + 0];
+ s_im = fpr_neg(fpr_gm_tab[((hm + i1) << 1) + 1]);
+ for (j = j1; j < j2; j ++) {
+ fpr x_re, x_im, y_re, y_im;
+
+ x_re = f[j];
+ x_im = f[j + hn];
+ y_re = f[j + t];
+ y_im = f[j + t + hn];
+ FPC_ADD(f[j], f[j + hn],
+ x_re, x_im, y_re, y_im);
+ FPC_SUB(x_re, x_im, x_re, x_im, y_re, y_im);
+ FPC_MUL(f[j + t], f[j + t + hn],
+ x_re, x_im, s_re, s_im);
+ }
+ }
+ t = dt;
+ m = hm;
+ }
+
+ /*
+ * Last iteration is a no-op, provided that we divide by N/2
+ * instead of N. We need to make a special case for logn = 0.
+ */
+ if (logn > 0) {
+ fpr ni;
+
+ ni = fpr_p2_tab[logn];
+ for (u = 0; u < n; u ++) {
+ f[u] = fpr_mul(f[u], ni);
+ }
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_add(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ a[u] = fpr_add(a[u], b[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_sub(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ a[u] = fpr_sub(a[u], b[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_neg(fpr *a, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ a[u] = fpr_neg(a[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_adj_fft(fpr *a, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = (n >> 1); u < n; u ++) {
+ a[u] = fpr_neg(a[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ b_re = b[u];
+ b_im = b[u + hn];
+ FPC_MUL(a[u], a[u + hn], a_re, a_im, b_re, b_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_muladj_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ b_re = b[u];
+ b_im = fpr_neg(b[u + hn]);
+ FPC_MUL(a[u], a[u + hn], a_re, a_im, b_re, b_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(fpr *a, unsigned logn) {
+ /*
+ * Since each coefficient is multiplied with its own conjugate,
+ * the result contains only real values.
+ */
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ a[u] = fpr_add(fpr_sqr(a_re), fpr_sqr(a_im));
+ a[u + hn] = fpr_zero;
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_mulconst(fpr *a, fpr x, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ a[u] = fpr_mul(a[u], x);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_div_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ b_re = b[u];
+ b_im = b[u + hn];
+ FPC_DIV(a[u], a[u + hn], a_re, a_im, b_re, b_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_invnorm2_fft(fpr *d,
+ const fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im;
+ fpr b_re, b_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ b_re = b[u];
+ b_im = b[u + hn];
+ d[u] = fpr_inv(fpr_add(
+ fpr_add(fpr_sqr(a_re), fpr_sqr(a_im)),
+ fpr_add(fpr_sqr(b_re), fpr_sqr(b_im))));
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_add_muladj_fft(fpr *d,
+ const fpr *F, const fpr *G,
+ const fpr *f, const fpr *g, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr F_re, F_im, G_re, G_im;
+ fpr f_re, f_im, g_re, g_im;
+ fpr a_re, a_im, b_re, b_im;
+
+ F_re = F[u];
+ F_im = F[u + hn];
+ G_re = G[u];
+ G_im = G[u + hn];
+ f_re = f[u];
+ f_im = f[u + hn];
+ g_re = g[u];
+ g_im = g[u + hn];
+
+ FPC_MUL(a_re, a_im, F_re, F_im, f_re, fpr_neg(f_im));
+ FPC_MUL(b_re, b_im, G_re, G_im, g_re, fpr_neg(g_im));
+ d[u] = fpr_add(a_re, b_re);
+ d[u + hn] = fpr_add(a_im, b_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_mul_autoadj_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ a[u] = fpr_mul(a[u], b[u]);
+ a[u + hn] = fpr_mul(a[u + hn], b[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_div_autoadj_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr ib;
+
+ ib = fpr_inv(b[u]);
+ a[u] = fpr_mul(a[u], ib);
+ a[u + hn] = fpr_mul(a[u + hn], ib);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_LDL_fft(
+ const fpr *g00,
+ fpr *g01, fpr *g11, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr g00_re, g00_im, g01_re, g01_im, g11_re, g11_im;
+ fpr mu_re, mu_im;
+
+ g00_re = g00[u];
+ g00_im = g00[u + hn];
+ g01_re = g01[u];
+ g01_im = g01[u + hn];
+ g11_re = g11[u];
+ g11_im = g11[u + hn];
+ FPC_DIV(mu_re, mu_im, g01_re, g01_im, g00_re, g00_im);
+ FPC_MUL(g01_re, g01_im, mu_re, mu_im, g01_re, fpr_neg(g01_im));
+ FPC_SUB(g11[u], g11[u + hn], g11_re, g11_im, g01_re, g01_im);
+ g01[u] = mu_re;
+ g01[u + hn] = fpr_neg(mu_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_LDLmv_fft(
+ fpr *d11, fpr *l10,
+ const fpr *g00, const fpr *g01,
+ const fpr *g11, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr g00_re, g00_im, g01_re, g01_im, g11_re, g11_im;
+ fpr mu_re, mu_im;
+
+ g00_re = g00[u];
+ g00_im = g00[u + hn];
+ g01_re = g01[u];
+ g01_im = g01[u + hn];
+ g11_re = g11[u];
+ g11_im = g11[u + hn];
+ FPC_DIV(mu_re, mu_im, g01_re, g01_im, g00_re, g00_im);
+ FPC_MUL(g01_re, g01_im, mu_re, mu_im, g01_re, fpr_neg(g01_im));
+ FPC_SUB(d11[u], d11[u + hn], g11_re, g11_im, g01_re, g01_im);
+ l10[u] = mu_re;
+ l10[u + hn] = fpr_neg(mu_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_split_fft(
+ fpr *f0, fpr *f1,
+ const fpr *f, unsigned logn) {
+ /*
+ * The FFT representation we use is in bit-reversed order
+ * (element i contains f(w^(rev(i))), where rev() is the
+ * bit-reversal function over the ring degree. This changes
+ * indexes with regards to the Falcon specification.
+ */
+ size_t n, hn, qn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ qn = hn >> 1;
+
+ /*
+ * We process complex values by pairs. For logn = 1, there is only
+ * one complex value (the other one is the implicit conjugate),
+ * so we add the two lines below because the loop will be
+ * skipped.
+ */
+ f0[0] = f[0];
+ f1[0] = f[hn];
+
+ for (u = 0; u < qn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+ fpr t_re, t_im;
+
+ a_re = f[(u << 1) + 0];
+ a_im = f[(u << 1) + 0 + hn];
+ b_re = f[(u << 1) + 1];
+ b_im = f[(u << 1) + 1 + hn];
+
+ FPC_ADD(t_re, t_im, a_re, a_im, b_re, b_im);
+ f0[u] = fpr_half(t_re);
+ f0[u + qn] = fpr_half(t_im);
+
+ FPC_SUB(t_re, t_im, a_re, a_im, b_re, b_im);
+ FPC_MUL(t_re, t_im, t_re, t_im,
+ fpr_gm_tab[((u + hn) << 1) + 0],
+ fpr_neg(fpr_gm_tab[((u + hn) << 1) + 1]));
+ f1[u] = fpr_half(t_re);
+ f1[u + qn] = fpr_half(t_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_poly_merge_fft(
+ fpr *f,
+ const fpr *f0, const fpr *f1, unsigned logn) {
+ size_t n, hn, qn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ qn = hn >> 1;
+
+ /*
+ * An extra copy to handle the special case logn = 1.
+ */
+ f[0] = f0[0];
+ f[hn] = f1[0];
+
+ for (u = 0; u < qn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+ fpr t_re, t_im;
+
+ a_re = f0[u];
+ a_im = f0[u + qn];
+ FPC_MUL(b_re, b_im, f1[u], f1[u + qn],
+ fpr_gm_tab[((u + hn) << 1) + 0],
+ fpr_gm_tab[((u + hn) << 1) + 1]);
+ FPC_ADD(t_re, t_im, a_re, a_im, b_re, b_im);
+ f[(u << 1) + 0] = t_re;
+ f[(u << 1) + 0 + hn] = t_im;
+ FPC_SUB(t_re, t_im, a_re, a_im, b_re, b_im);
+ f[(u << 1) + 1] = t_re;
+ f[(u << 1) + 1 + hn] = t_im;
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fpr.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fpr.c
new file mode 100644
index 000000000..669c825ee
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fpr.c
@@ -0,0 +1,1890 @@
+#include "inner.h"
+
+/*
+ * Floating-point operations.
+ *
+ * This file implements the non-inline functions declared in
+ * fpr.h, as well as the constants for FFT / iFFT.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+
+/*
+ * Normalize a provided unsigned integer to the 2^63..2^64-1 range by
+ * left-shifting it if necessary. The exponent e is adjusted accordingly
+ * (i.e. if the value was left-shifted by n bits, then n is subtracted
+ * from e). If source m is 0, then it remains 0, but e is altered.
+ * Both m and e must be simple variables (no expressions allowed).
+ */
+#define FPR_NORM64(m, e) do { \
+ uint32_t nt; \
+ \
+ (e) -= 63; \
+ \
+ nt = (uint32_t)((m) >> 32); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 32)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 5); \
+ \
+ nt = (uint32_t)((m) >> 48); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 16)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 4); \
+ \
+ nt = (uint32_t)((m) >> 56); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 8)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 3); \
+ \
+ nt = (uint32_t)((m) >> 60); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 4)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 2); \
+ \
+ nt = (uint32_t)((m) >> 62); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 2)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 1); \
+ \
+ nt = (uint32_t)((m) >> 63); \
+ (m) ^= ((m) ^ ((m) << 1)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt); \
+ } while (0)
+
+uint64_t
+fpr_ursh(uint64_t x, int n) {
+ x ^= (x ^ (x >> 32)) & -(uint64_t)(n >> 5);
+ return x >> (n & 31);
+}
+
+int64_t
+fpr_irsh(int64_t x, int n) {
+ x ^= (x ^ (x >> 32)) & -(int64_t)(n >> 5);
+ return x >> (n & 31);
+}
+
+uint64_t
+fpr_ulsh(uint64_t x, int n) {
+ x ^= (x ^ (x << 32)) & -(uint64_t)(n >> 5);
+ return x << (n & 31);
+}
+
+fpr
+FPR(int s, int e, uint64_t m) {
+ fpr x;
+ uint32_t t;
+ unsigned f;
+
+ /*
+ * If e >= -1076, then the value is "normal"; otherwise, it
+ * should be a subnormal, which we clamp down to zero.
+ */
+ e += 1076;
+ t = (uint32_t)e >> 31;
+ m &= (uint64_t)t - 1;
+
+ /*
+ * If m = 0 then we want a zero; make e = 0 too, but conserve
+ * the sign.
+ */
+ t = (uint32_t)(m >> 54);
+ e &= -(int)t;
+
+ /*
+ * The 52 mantissa bits come from m. Value m has its top bit set
+ * (unless it is a zero); we leave it "as is": the top bit will
+ * increment the exponent by 1, except when m = 0, which is
+ * exactly what we want.
+ */
+ x = (((uint64_t)s << 63) | (m >> 2)) + ((uint64_t)(uint32_t)e << 52);
+
+ /*
+ * Rounding: if the low three bits of m are 011, 110 or 111,
+ * then the value should be incremented to get the next
+ * representable value. This implements the usual
+ * round-to-nearest rule (with preference to even values in case
+ * of a tie). Note that the increment may make a carry spill
+ * into the exponent field, which is again exactly what we want
+ * in that case.
+ */
+ f = (unsigned)m & 7U;
+ x += (0xC8U >> f) & 1;
+ return x;
+}
+
+fpr
+fpr_scaled(int64_t i, int sc) {
+ /*
+ * To convert from int to float, we have to do the following:
+ * 1. Get the absolute value of the input, and its sign
+ * 2. Shift right or left the value as appropriate
+ * 3. Pack the result
+ *
+ * We can assume that the source integer is not -2^63.
+ */
+ int s, e;
+ uint32_t t;
+ uint64_t m;
+
+ /*
+ * Extract sign bit.
+ * We have: -i = 1 + ~i
+ */
+ s = (int)((uint64_t)i >> 63);
+ i ^= -(int64_t)s;
+ i += s;
+
+ /*
+ * For now we suppose that i != 0.
+ * Otherwise, we set m to i and left-shift it as much as needed
+ * to get a 1 in the top bit. We can do that in a logarithmic
+ * number of conditional shifts.
+ */
+ m = (uint64_t)i;
+ e = 9 + sc;
+ FPR_NORM64(m, e);
+
+ /*
+ * Now m is in the 2^63..2^64-1 range. We must divide it by 512;
+ * if one of the dropped bits is a 1, this should go into the
+ * "sticky bit".
+ */
+ m |= ((uint32_t)m & 0x1FF) + 0x1FF;
+ m >>= 9;
+
+ /*
+ * Corrective action: if i = 0 then all of the above was
+ * incorrect, and we clamp e and m down to zero.
+ */
+ t = (uint32_t)((uint64_t)(i | -i) >> 63);
+ m &= -(uint64_t)t;
+ e &= -(int)t;
+
+ /*
+ * Assemble back everything. The FPR() function will handle cases
+ * where e is too low.
+ */
+ return FPR(s, e, m);
+}
+
+fpr
+fpr_of(int64_t i) {
+ return fpr_scaled(i, 0);
+}
+
+int64_t
+fpr_rint(fpr x) {
+ uint64_t m, d;
+ int e;
+ uint32_t s, dd, f;
+
+ /*
+ * We assume that the value fits in -(2^63-1)..+(2^63-1). We can
+ * thus extract the mantissa as a 63-bit integer, then right-shift
+ * it as needed.
+ */
+ m = ((x << 10) | ((uint64_t)1 << 62)) & (((uint64_t)1 << 63) - 1);
+ e = 1085 - ((int)(x >> 52) & 0x7FF);
+
+ /*
+ * If a shift of more than 63 bits is needed, then simply set m
+ * to zero. This also covers the case of an input operand equal
+ * to zero.
+ */
+ m &= -(uint64_t)((uint32_t)(e - 64) >> 31);
+ e &= 63;
+
+ /*
+ * Right-shift m as needed. Shift count is e. Proper rounding
+ * mandates that:
+ * - If the highest dropped bit is zero, then round low.
+ * - If the highest dropped bit is one, and at least one of the
+ * other dropped bits is one, then round up.
+ * - If the highest dropped bit is one, and all other dropped
+ * bits are zero, then round up if the lowest kept bit is 1,
+ * or low otherwise (i.e. ties are broken by "rounding to even").
+ *
+ * We thus first extract a word consisting of all the dropped bit
+ * AND the lowest kept bit; then we shrink it down to three bits,
+ * the lowest being "sticky".
+ */
+ d = fpr_ulsh(m, 63 - e);
+ dd = (uint32_t)d | ((uint32_t)(d >> 32) & 0x1FFFFFFF);
+ f = (uint32_t)(d >> 61) | ((dd | -dd) >> 31);
+ m = fpr_ursh(m, e) + (uint64_t)((0xC8U >> f) & 1U);
+
+ /*
+ * Apply the sign bit.
+ */
+ s = (uint32_t)(x >> 63);
+ return ((int64_t)m ^ -(int64_t)s) + (int64_t)s;
+}
+
+int64_t
+fpr_floor(fpr x) {
+ uint64_t t;
+ int64_t xi;
+ int e, cc;
+
+ /*
+ * We extract the integer as a _signed_ 64-bit integer with
+ * a scaling factor. Since we assume that the value fits
+ * in the -(2^63-1)..+(2^63-1) range, we can left-shift the
+ * absolute value to make it in the 2^62..2^63-1 range: we
+ * will only need a right-shift afterwards.
+ */
+ e = (int)(x >> 52) & 0x7FF;
+ t = x >> 63;
+ xi = (int64_t)(((x << 10) | ((uint64_t)1 << 62))
+ & (((uint64_t)1 << 63) - 1));
+ xi = (xi ^ -(int64_t)t) + (int64_t)t;
+ cc = 1085 - e;
+
+ /*
+ * We perform an arithmetic right-shift on the value. This
+ * applies floor() semantics on both positive and negative values
+ * (rounding toward minus infinity).
+ */
+ xi = fpr_irsh(xi, cc & 63);
+
+ /*
+ * If the true shift count was 64 or more, then we should instead
+ * replace xi with 0 (if nonnegative) or -1 (if negative). Edge
+ * case: -0 will be floored to -1, not 0 (whether this is correct
+ * is debatable; in any case, the other functions normalize zero
+ * to +0).
+ *
+ * For an input of zero, the non-shifted xi was incorrect (we used
+ * a top implicit bit of value 1, not 0), but this does not matter
+ * since this operation will clamp it down.
+ */
+ xi ^= (xi ^ -(int64_t)t) & -(int64_t)((uint32_t)(63 - cc) >> 31);
+ return xi;
+}
+
+int64_t
+fpr_trunc(fpr x) {
+ uint64_t t, xu;
+ int e, cc;
+
+ /*
+ * Extract the absolute value. Since we assume that the value
+ * fits in the -(2^63-1)..+(2^63-1) range, we can left-shift
+ * the absolute value into the 2^62..2^63-1 range, and then
+ * do a right shift afterwards.
+ */
+ e = (int)(x >> 52) & 0x7FF;
+ xu = ((x << 10) | ((uint64_t)1 << 62)) & (((uint64_t)1 << 63) - 1);
+ cc = 1085 - e;
+ xu = fpr_ursh(xu, cc & 63);
+
+ /*
+ * If the exponent is too low (cc > 63), then the shift was wrong
+ * and we must clamp the value to 0. This also covers the case
+ * of an input equal to zero.
+ */
+ xu &= -(uint64_t)((uint32_t)(cc - 64) >> 31);
+
+ /*
+ * Apply back the sign, if the source value is negative.
+ */
+ t = x >> 63;
+ xu = (xu ^ -t) + t;
+ return *(int64_t *)&xu;
+}
+
+fpr
+fpr_add(fpr x, fpr y) {
+ uint64_t m, xu, yu, za;
+ uint32_t cs;
+ int ex, ey, sx, sy, cc;
+
+ /*
+ * Make sure that the first operand (x) has the larger absolute
+ * value. This guarantees that the exponent of y is less than
+ * or equal to the exponent of x, and, if they are equal, then
+ * the mantissa of y will not be greater than the mantissa of x.
+ *
+ * After this swap, the result will have the sign x, except in
+ * the following edge case: abs(x) = abs(y), and x and y have
+ * opposite sign bits; in that case, the result shall be +0
+ * even if the sign bit of x is 1. To handle this case properly,
+ * we do the swap is abs(x) = abs(y) AND the sign of x is 1.
+ */
+ m = ((uint64_t)1 << 63) - 1;
+ za = (x & m) - (y & m);
+ cs = (uint32_t)(za >> 63)
+ | ((1U - (uint32_t)(-za >> 63)) & (uint32_t)(x >> 63));
+ m = (x ^ y) & -(uint64_t)cs;
+ x ^= m;
+ y ^= m;
+
+ /*
+ * Extract sign bits, exponents and mantissas. The mantissas are
+ * scaled up to 2^55..2^56-1, and the exponent is unbiased. If
+ * an operand is zero, its mantissa is set to 0 at this step, and
+ * its exponent will be -1078.
+ */
+ ex = (int)(x >> 52);
+ sx = ex >> 11;
+ ex &= 0x7FF;
+ m = (uint64_t)(uint32_t)((ex + 0x7FF) >> 11) << 52;
+ xu = ((x & (((uint64_t)1 << 52) - 1)) | m) << 3;
+ ex -= 1078;
+ ey = (int)(y >> 52);
+ sy = ey >> 11;
+ ey &= 0x7FF;
+ m = (uint64_t)(uint32_t)((ey + 0x7FF) >> 11) << 52;
+ yu = ((y & (((uint64_t)1 << 52) - 1)) | m) << 3;
+ ey -= 1078;
+
+ /*
+ * x has the larger exponent; hence, we only need to right-shift y.
+ * If the shift count is larger than 59 bits then we clamp the
+ * value to zero.
+ */
+ cc = ex - ey;
+ yu &= -(uint64_t)((uint32_t)(cc - 60) >> 31);
+ cc &= 63;
+
+ /*
+ * The lowest bit of yu is "sticky".
+ */
+ m = fpr_ulsh(1, cc) - 1;
+ yu |= (yu & m) + m;
+ yu = fpr_ursh(yu, cc);
+
+ /*
+ * If the operands have the same sign, then we add the mantissas;
+ * otherwise, we subtract the mantissas.
+ */
+ xu += yu - ((yu << 1) & -(uint64_t)(sx ^ sy));
+
+ /*
+ * The result may be smaller, or slightly larger. We normalize
+ * it to the 2^63..2^64-1 range (if xu is zero, then it stays
+ * at zero).
+ */
+ FPR_NORM64(xu, ex);
+
+ /*
+ * Scale down the value to 2^54..s^55-1, handling the last bit
+ * as sticky.
+ */
+ xu |= ((uint32_t)xu & 0x1FF) + 0x1FF;
+ xu >>= 9;
+ ex += 9;
+
+ /*
+ * In general, the result has the sign of x. However, if the
+ * result is exactly zero, then the following situations may
+ * be encountered:
+ * x > 0, y = -x -> result should be +0
+ * x < 0, y = -x -> result should be +0
+ * x = +0, y = +0 -> result should be +0
+ * x = -0, y = +0 -> result should be +0
+ * x = +0, y = -0 -> result should be +0
+ * x = -0, y = -0 -> result should be -0
+ *
+ * But at the conditional swap step at the start of the
+ * function, we ensured that if abs(x) = abs(y) and the
+ * sign of x was 1, then x and y were swapped. Thus, the
+ * two following cases cannot actually happen:
+ * x < 0, y = -x
+ * x = -0, y = +0
+ * In all other cases, the sign bit of x is conserved, which
+ * is what the FPR() function does. The FPR() function also
+ * properly clamps values to zero when the exponent is too
+ * low, but does not alter the sign in that case.
+ */
+ return FPR(sx, ex, xu);
+}
+
+fpr
+fpr_sub(fpr x, fpr y) {
+ y ^= (uint64_t)1 << 63;
+ return fpr_add(x, y);
+}
+
+fpr
+fpr_neg(fpr x) {
+ x ^= (uint64_t)1 << 63;
+ return x;
+}
+
+fpr
+fpr_half(fpr x) {
+ /*
+ * To divide a value by 2, we just have to subtract 1 from its
+ * exponent, but we have to take care of zero.
+ */
+ uint32_t t;
+
+ x -= (uint64_t)1 << 52;
+ t = (((uint32_t)(x >> 52) & 0x7FF) + 1) >> 11;
+ x &= (uint64_t)t - 1;
+ return x;
+}
+
+fpr
+fpr_double(fpr x) {
+ /*
+ * To double a value, we just increment by one the exponent. We
+ * don't care about infinites or NaNs; however, 0 is a
+ * special case.
+ */
+ x += (uint64_t)((((unsigned)(x >> 52) & 0x7FFU) + 0x7FFU) >> 11) << 52;
+ return x;
+}
+
+fpr
+fpr_mul(fpr x, fpr y) {
+ uint64_t xu, yu, w, zu, zv;
+ uint32_t x0, x1, y0, y1, z0, z1, z2;
+ int ex, ey, d, e, s;
+
+ /*
+ * Extract absolute values as scaled unsigned integers. We
+ * don't extract exponents yet.
+ */
+ xu = (x & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+ yu = (y & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+
+ /*
+ * We have two 53-bit integers to multiply; we need to split
+ * each into a lower half and a upper half. Moreover, we
+ * prefer to have lower halves to be of 25 bits each, for
+ * reasons explained later on.
+ */
+ x0 = (uint32_t)xu & 0x01FFFFFF;
+ x1 = (uint32_t)(xu >> 25);
+ y0 = (uint32_t)yu & 0x01FFFFFF;
+ y1 = (uint32_t)(yu >> 25);
+ w = (uint64_t)x0 * (uint64_t)y0;
+ z0 = (uint32_t)w & 0x01FFFFFF;
+ z1 = (uint32_t)(w >> 25);
+ w = (uint64_t)x0 * (uint64_t)y1;
+ z1 += (uint32_t)w & 0x01FFFFFF;
+ z2 = (uint32_t)(w >> 25);
+ w = (uint64_t)x1 * (uint64_t)y0;
+ z1 += (uint32_t)w & 0x01FFFFFF;
+ z2 += (uint32_t)(w >> 25);
+ zu = (uint64_t)x1 * (uint64_t)y1;
+ z2 += (z1 >> 25);
+ z1 &= 0x01FFFFFF;
+ zu += z2;
+
+ /*
+ * Since xu and yu are both in the 2^52..2^53-1 range, the
+ * product is in the 2^104..2^106-1 range. We first reassemble
+ * it and round it into the 2^54..2^56-1 range; the bottom bit
+ * is made "sticky". Since the low limbs z0 and z1 are 25 bits
+ * each, we just take the upper part (zu), and consider z0 and
+ * z1 only for purposes of stickiness.
+ * (This is the reason why we chose 25-bit limbs above.)
+ */
+ zu |= ((z0 | z1) + 0x01FFFFFF) >> 25;
+
+ /*
+ * We normalize zu to the 2^54..s^55-1 range: it could be one
+ * bit too large at this point. This is done with a conditional
+ * right-shift that takes into account the sticky bit.
+ */
+ zv = (zu >> 1) | (zu & 1);
+ w = zu >> 55;
+ zu ^= (zu ^ zv) & -w;
+
+ /*
+ * Get the aggregate scaling factor:
+ *
+ * - Each exponent is biased by 1023.
+ *
+ * - Integral mantissas are scaled by 2^52, hence an
+ * extra 52 bias for each exponent.
+ *
+ * - However, we right-shifted z by 50 bits, and then
+ * by 0 or 1 extra bit (depending on the value of w).
+ *
+ * In total, we must add the exponents, then subtract
+ * 2 * (1023 + 52), then add 50 + w.
+ */
+ ex = (int)((x >> 52) & 0x7FF);
+ ey = (int)((y >> 52) & 0x7FF);
+ e = ex + ey - 2100 + (int)w;
+
+ /*
+ * Sign bit is the XOR of the operand sign bits.
+ */
+ s = (int)((x ^ y) >> 63);
+
+ /*
+ * Corrective actions for zeros: if either of the operands is
+ * zero, then the computations above were wrong. Test for zero
+ * is whether ex or ey is zero. We just have to set the mantissa
+ * (zu) to zero, the FPR() function will normalize e.
+ */
+ d = ((ex + 0x7FF) & (ey + 0x7FF)) >> 11;
+ zu &= -(uint64_t)d;
+
+ /*
+ * FPR() packs the result and applies proper rounding.
+ */
+ return FPR(s, e, zu);
+}
+
+fpr
+fpr_sqr(fpr x) {
+ return fpr_mul(x, x);
+}
+
+fpr
+fpr_div(fpr x, fpr y) {
+ uint64_t xu, yu, q, q2, w;
+ int i, ex, ey, e, d, s;
+
+ /*
+ * Extract mantissas of x and y (unsigned).
+ */
+ xu = (x & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+ yu = (y & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+
+ /*
+ * Perform bit-by-bit division of xu by yu. We run it for 55 bits.
+ */
+ q = 0;
+ for (i = 0; i < 55; i ++) {
+ /*
+ * If yu is less than or equal xu, then subtract it and
+ * push a 1 in the quotient; otherwise, leave xu unchanged
+ * and push a 0.
+ */
+ uint64_t b;
+
+ b = ((xu - yu) >> 63) - 1;
+ xu -= b & yu;
+ q |= b & 1;
+ xu <<= 1;
+ q <<= 1;
+ }
+
+ /*
+ * We got 55 bits in the quotient, followed by an extra zero. We
+ * want that 56th bit to be "sticky": it should be a 1 if and
+ * only if the remainder (xu) is non-zero.
+ */
+ q |= (xu | -xu) >> 63;
+
+ /*
+ * Quotient is at most 2^56-1. Its top bit may be zero, but in
+ * that case the next-to-top bit will be a one, since the
+ * initial xu and yu were both in the 2^52..2^53-1 range.
+ * We perform a conditional shift to normalize q to the
+ * 2^54..2^55-1 range (with the bottom bit being sticky).
+ */
+ q2 = (q >> 1) | (q & 1);
+ w = q >> 55;
+ q ^= (q ^ q2) & -w;
+
+ /*
+ * Extract exponents to compute the scaling factor:
+ *
+ * - Each exponent is biased and we scaled them up by
+ * 52 bits; but these biases will cancel out.
+ *
+ * - The division loop produced a 55-bit shifted result,
+ * so we must scale it down by 55 bits.
+ *
+ * - If w = 1, we right-shifted the integer by 1 bit,
+ * hence we must add 1 to the scaling.
+ */
+ ex = (int)((x >> 52) & 0x7FF);
+ ey = (int)((y >> 52) & 0x7FF);
+ e = ex - ey - 55 + (int)w;
+
+ /*
+ * Sign is the XOR of the signs of the operands.
+ */
+ s = (int)((x ^ y) >> 63);
+
+ /*
+ * Corrective actions for zeros: if x = 0, then the computation
+ * is wrong, and we must clamp e and q to 0. We do not care
+ * about the case y = 0 (as per assumptions in this module,
+ * the caller does not perform divisions by zero).
+ */
+ d = (ex + 0x7FF) >> 11;
+ s &= d;
+ e &= -d;
+ q &= -(uint64_t)d;
+
+ /*
+ * FPR() packs the result and applies proper rounding.
+ */
+ return FPR(s, e, q);
+}
+
+fpr
+fpr_inv(fpr x) {
+ return fpr_div(4607182418800017408u, x);
+}
+
+fpr
+fpr_sqrt(fpr x) {
+ uint64_t xu, q, s, r;
+ int ex, e;
+
+ /*
+ * Extract the mantissa and the exponent. We don't care about
+ * the sign: by assumption, the operand is nonnegative.
+ * We want the "true" exponent corresponding to a mantissa
+ * in the 1..2 range.
+ */
+ xu = (x & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+ ex = (int)((x >> 52) & 0x7FF);
+ e = ex - 1023;
+
+ /*
+ * If the exponent is odd, double the mantissa and decrement
+ * the exponent. The exponent is then halved to account for
+ * the square root.
+ */
+ xu += xu & -(uint64_t)(e & 1);
+ e >>= 1;
+
+ /*
+ * Double the mantissa.
+ */
+ xu <<= 1;
+
+ /*
+ * We now have a mantissa in the 2^53..2^55-1 range. It
+ * represents a value between 1 (inclusive) and 4 (exclusive)
+ * in fixed point notation (with 53 fractional bits). We
+ * compute the square root bit by bit.
+ */
+ q = 0;
+ s = 0;
+ r = (uint64_t)1 << 53;
+ for (int i = 0; i < 54; i ++) {
+ uint64_t t, b;
+
+ t = s + r;
+ b = ((xu - t) >> 63) - 1;
+ s += (r << 1) & b;
+ xu -= t & b;
+ q += r & b;
+ xu <<= 1;
+ r >>= 1;
+ }
+
+ /*
+ * Now, q is a rounded-low 54-bit value, with a leading 1,
+ * 52 fractional digits, and an additional guard bit. We add
+ * an extra sticky bit to account for what remains of the operand.
+ */
+ q <<= 1;
+ q |= (xu | -xu) >> 63;
+
+ /*
+ * Result q is in the 2^54..2^55-1 range; we bias the exponent
+ * by 54 bits (the value e at that point contains the "true"
+ * exponent, but q is now considered an integer, i.e. scaled
+ * up.
+ */
+ e -= 54;
+
+ /*
+ * Corrective action for an operand of value zero.
+ */
+ q &= -(uint64_t)((ex + 0x7FF) >> 11);
+
+ /*
+ * Apply rounding and back result.
+ */
+ return FPR(0, e, q);
+}
+
+int
+fpr_lt(fpr x, fpr y) {
+ /*
+ * If both x and y are positive, then a signed comparison yields
+ * the proper result:
+ * - For positive values, the order is preserved.
+ * - The sign bit is at the same place as in integers, so
+ * sign is preserved.
+ * Moreover, we can compute [x < y] as sgn(x-y) and the computation
+ * of x-y will not overflow.
+ *
+ * If the signs differ, then sgn(x) gives the proper result.
+ *
+ * If both x and y are negative, then the order is reversed.
+ * Hence [x < y] = sgn(y-x). We must compute this separately from
+ * sgn(x-y); simply inverting sgn(x-y) would not handle the edge
+ * case x = y properly.
+ */
+ int cc0, cc1;
+ int64_t sx;
+ int64_t sy;
+
+ sx = *(int64_t *)&x;
+ sy = *(int64_t *)&y;
+ sy &= ~((sx ^ sy) >> 63); /* set sy=0 if signs differ */
+
+ cc0 = (int)((sx - sy) >> 63) & 1; /* Neither subtraction overflows when */
+ cc1 = (int)((sy - sx) >> 63) & 1; /* the signs are the same. */
+
+ return cc0 ^ ((cc0 ^ cc1) & (int)((x & y) >> 63));
+}
+
+uint64_t
+fpr_expm_p63(fpr x, fpr ccs) {
+ /*
+ * Polynomial approximation of exp(-x) is taken from FACCT:
+ * https://eprint.iacr.org/2018/1234
+ * Specifically, values are extracted from the implementation
+ * referenced from the FACCT article, and available at:
+ * https://github.com/raykzhao/gaussian
+ * Here, the coefficients have been scaled up by 2^63 and
+ * converted to integers.
+ *
+ * Tests over more than 24 billions of random inputs in the
+ * 0..log(2) range have never shown a deviation larger than
+ * 2^(-50) from the true mathematical value.
+ */
+ static const uint64_t C[] = {
+ 0x00000004741183A3u,
+ 0x00000036548CFC06u,
+ 0x0000024FDCBF140Au,
+ 0x0000171D939DE045u,
+ 0x0000D00CF58F6F84u,
+ 0x000680681CF796E3u,
+ 0x002D82D8305B0FEAu,
+ 0x011111110E066FD0u,
+ 0x0555555555070F00u,
+ 0x155555555581FF00u,
+ 0x400000000002B400u,
+ 0x7FFFFFFFFFFF4800u,
+ 0x8000000000000000u
+ };
+
+ uint64_t z, y;
+ size_t u;
+ uint32_t z0, z1, y0, y1;
+ uint64_t a, b;
+
+ y = C[0];
+ z = (uint64_t)fpr_trunc(fpr_mul(x, fpr_ptwo63)) << 1;
+ for (u = 1; u < (sizeof C) / sizeof(C[0]); u ++) {
+ /*
+ * Compute product z * y over 128 bits, but keep only
+ * the top 64 bits.
+ *
+ * TODO: On some architectures/compilers we could use
+ * some intrinsics (__umulh() on MSVC) or other compiler
+ * extensions (unsigned __int128 on GCC / Clang) for
+ * improved speed; however, most 64-bit architectures
+ * also have appropriate IEEE754 floating-point support,
+ * which is better.
+ */
+ uint64_t c;
+
+ z0 = (uint32_t)z;
+ z1 = (uint32_t)(z >> 32);
+ y0 = (uint32_t)y;
+ y1 = (uint32_t)(y >> 32);
+ a = ((uint64_t)z0 * (uint64_t)y1)
+ + (((uint64_t)z0 * (uint64_t)y0) >> 32);
+ b = ((uint64_t)z1 * (uint64_t)y0);
+ c = (a >> 32) + (b >> 32);
+ c += (((uint64_t)(uint32_t)a + (uint64_t)(uint32_t)b) >> 32);
+ c += (uint64_t)z1 * (uint64_t)y1;
+ y = C[u] - c;
+ }
+
+ /*
+ * The scaling factor must be applied at the end. Since y is now
+ * in fixed-point notation, we have to convert the factor to the
+ * same format, and do an extra integer multiplication.
+ */
+ z = (uint64_t)fpr_trunc(fpr_mul(ccs, fpr_ptwo63)) << 1;
+ z0 = (uint32_t)z;
+ z1 = (uint32_t)(z >> 32);
+ y0 = (uint32_t)y;
+ y1 = (uint32_t)(y >> 32);
+ a = ((uint64_t)z0 * (uint64_t)y1)
+ + (((uint64_t)z0 * (uint64_t)y0) >> 32);
+ b = ((uint64_t)z1 * (uint64_t)y0);
+ y = (a >> 32) + (b >> 32);
+ y += (((uint64_t)(uint32_t)a + (uint64_t)(uint32_t)b) >> 32);
+ y += (uint64_t)z1 * (uint64_t)y1;
+
+ return y;
+}
+
+const fpr fpr_gm_tab[] = {
+ 0, 0,
+ 9223372036854775808U, 4607182418800017408U,
+ 4604544271217802189U, 4604544271217802189U,
+ 13827916308072577997U, 4604544271217802189U,
+ 4606496786581982534U, 4600565431771507043U,
+ 13823937468626282851U, 4606496786581982534U,
+ 4600565431771507043U, 4606496786581982534U,
+ 13829868823436758342U, 4600565431771507043U,
+ 4607009347991985328U, 4596196889902818827U,
+ 13819568926757594635U, 4607009347991985328U,
+ 4603179351334086856U, 4605664432017547683U,
+ 13829036468872323491U, 4603179351334086856U,
+ 4605664432017547683U, 4603179351334086856U,
+ 13826551388188862664U, 4605664432017547683U,
+ 4596196889902818827U, 4607009347991985328U,
+ 13830381384846761136U, 4596196889902818827U,
+ 4607139046673687846U, 4591727299969791020U,
+ 13815099336824566828U, 4607139046673687846U,
+ 4603889326261607894U, 4605137878724712257U,
+ 13828509915579488065U, 4603889326261607894U,
+ 4606118860100255153U, 4602163548591158843U,
+ 13825535585445934651U, 4606118860100255153U,
+ 4598900923775164166U, 4606794571824115162U,
+ 13830166608678890970U, 4598900923775164166U,
+ 4606794571824115162U, 4598900923775164166U,
+ 13822272960629939974U, 4606794571824115162U,
+ 4602163548591158843U, 4606118860100255153U,
+ 13829490896955030961U, 4602163548591158843U,
+ 4605137878724712257U, 4603889326261607894U,
+ 13827261363116383702U, 4605137878724712257U,
+ 4591727299969791020U, 4607139046673687846U,
+ 13830511083528463654U, 4591727299969791020U,
+ 4607171569234046334U, 4587232218149935124U,
+ 13810604255004710932U, 4607171569234046334U,
+ 4604224084862889120U, 4604849113969373103U,
+ 13828221150824148911U, 4604224084862889120U,
+ 4606317631232591731U, 4601373767755717824U,
+ 13824745804610493632U, 4606317631232591731U,
+ 4599740487990714333U, 4606655894547498725U,
+ 13830027931402274533U, 4599740487990714333U,
+ 4606912484326125783U, 4597922303871901467U,
+ 13821294340726677275U, 4606912484326125783U,
+ 4602805845399633902U, 4605900952042040894U,
+ 13829272988896816702U, 4602805845399633902U,
+ 4605409869824231233U, 4603540801876750389U,
+ 13826912838731526197U, 4605409869824231233U,
+ 4594454542771183930U, 4607084929468638487U,
+ 13830456966323414295U, 4594454542771183930U,
+ 4607084929468638487U, 4594454542771183930U,
+ 13817826579625959738U, 4607084929468638487U,
+ 4603540801876750389U, 4605409869824231233U,
+ 13828781906679007041U, 4603540801876750389U,
+ 4605900952042040894U, 4602805845399633902U,
+ 13826177882254409710U, 4605900952042040894U,
+ 4597922303871901467U, 4606912484326125783U,
+ 13830284521180901591U, 4597922303871901467U,
+ 4606655894547498725U, 4599740487990714333U,
+ 13823112524845490141U, 4606655894547498725U,
+ 4601373767755717824U, 4606317631232591731U,
+ 13829689668087367539U, 4601373767755717824U,
+ 4604849113969373103U, 4604224084862889120U,
+ 13827596121717664928U, 4604849113969373103U,
+ 4587232218149935124U, 4607171569234046334U,
+ 13830543606088822142U, 4587232218149935124U,
+ 4607179706000002317U, 4582730748936808062U,
+ 13806102785791583870U, 4607179706000002317U,
+ 4604386048625945823U, 4604698657331085206U,
+ 13828070694185861014U, 4604386048625945823U,
+ 4606409688975526202U, 4600971798440897930U,
+ 13824343835295673738U, 4606409688975526202U,
+ 4600154912527631775U, 4606578871587619388U,
+ 13829950908442395196U, 4600154912527631775U,
+ 4606963563043808649U, 4597061974398750563U,
+ 13820434011253526371U, 4606963563043808649U,
+ 4602994049708411683U, 4605784983948558848U,
+ 13829157020803334656U, 4602994049708411683U,
+ 4605539368864982914U, 4603361638657888991U,
+ 13826733675512664799U, 4605539368864982914U,
+ 4595327571478659014U, 4607049811591515049U,
+ 13830421848446290857U, 4595327571478659014U,
+ 4607114680469659603U, 4593485039402578702U,
+ 13816857076257354510U, 4607114680469659603U,
+ 4603716733069447353U, 4605276012900672507U,
+ 13828648049755448315U, 4603716733069447353U,
+ 4606012266443150634U, 4602550884377336506U,
+ 13825922921232112314U, 4606012266443150634U,
+ 4598476289818621559U, 4606856142606846307U,
+ 13830228179461622115U, 4598476289818621559U,
+ 4606727809065869586U, 4599322407794599425U,
+ 13822694444649375233U, 4606727809065869586U,
+ 4601771097584682078U, 4606220668805321205U,
+ 13829592705660097013U, 4601771097584682078U,
+ 4604995550503212910U, 4604058477489546729U,
+ 13827430514344322537U, 4604995550503212910U,
+ 4589965306122607094U, 4607158013403433018U,
+ 13830530050258208826U, 4589965306122607094U,
+ 4607158013403433018U, 4589965306122607094U,
+ 13813337342977382902U, 4607158013403433018U,
+ 4604058477489546729U, 4604995550503212910U,
+ 13828367587357988718U, 4604058477489546729U,
+ 4606220668805321205U, 4601771097584682078U,
+ 13825143134439457886U, 4606220668805321205U,
+ 4599322407794599425U, 4606727809065869586U,
+ 13830099845920645394U, 4599322407794599425U,
+ 4606856142606846307U, 4598476289818621559U,
+ 13821848326673397367U, 4606856142606846307U,
+ 4602550884377336506U, 4606012266443150634U,
+ 13829384303297926442U, 4602550884377336506U,
+ 4605276012900672507U, 4603716733069447353U,
+ 13827088769924223161U, 4605276012900672507U,
+ 4593485039402578702U, 4607114680469659603U,
+ 13830486717324435411U, 4593485039402578702U,
+ 4607049811591515049U, 4595327571478659014U,
+ 13818699608333434822U, 4607049811591515049U,
+ 4603361638657888991U, 4605539368864982914U,
+ 13828911405719758722U, 4603361638657888991U,
+ 4605784983948558848U, 4602994049708411683U,
+ 13826366086563187491U, 4605784983948558848U,
+ 4597061974398750563U, 4606963563043808649U,
+ 13830335599898584457U, 4597061974398750563U,
+ 4606578871587619388U, 4600154912527631775U,
+ 13823526949382407583U, 4606578871587619388U,
+ 4600971798440897930U, 4606409688975526202U,
+ 13829781725830302010U, 4600971798440897930U,
+ 4604698657331085206U, 4604386048625945823U,
+ 13827758085480721631U, 4604698657331085206U,
+ 4582730748936808062U, 4607179706000002317U,
+ 13830551742854778125U, 4582730748936808062U,
+ 4607181740574479067U, 4578227681973159812U,
+ 13801599718827935620U, 4607181740574479067U,
+ 4604465633578481725U, 4604621949701367983U,
+ 13827993986556143791U, 4604465633578481725U,
+ 4606453861145241227U, 4600769149537129431U,
+ 13824141186391905239U, 4606453861145241227U,
+ 4600360675823176935U, 4606538458821337243U,
+ 13829910495676113051U, 4600360675823176935U,
+ 4606987119037722413U, 4596629994023683153U,
+ 13820002030878458961U, 4606987119037722413U,
+ 4603087070374583113U, 4605725276488455441U,
+ 13829097313343231249U, 4603087070374583113U,
+ 4605602459698789090U, 4603270878689749849U,
+ 13826642915544525657U, 4605602459698789090U,
+ 4595762727260045105U, 4607030246558998647U,
+ 13830402283413774455U, 4595762727260045105U,
+ 4607127537664763515U, 4592606767730311893U,
+ 13815978804585087701U, 4607127537664763515U,
+ 4603803453461190356U, 4605207475328619533U,
+ 13828579512183395341U, 4603803453461190356U,
+ 4606066157444814153U, 4602357870542944470U,
+ 13825729907397720278U, 4606066157444814153U,
+ 4598688984595225406U, 4606826008603986804U,
+ 13830198045458762612U, 4598688984595225406U,
+ 4606761837001494797U, 4599112075441176914U,
+ 13822484112295952722U, 4606761837001494797U,
+ 4601967947786150793U, 4606170366472647579U,
+ 13829542403327423387U, 4601967947786150793U,
+ 4605067233569943231U, 4603974338538572089U,
+ 13827346375393347897U, 4605067233569943231U,
+ 4590846768565625881U, 4607149205763218185U,
+ 13830521242617993993U, 4590846768565625881U,
+ 4607165468267934125U, 4588998070480937184U,
+ 13812370107335712992U, 4607165468267934125U,
+ 4604141730443515286U, 4604922840319727473U,
+ 13828294877174503281U, 4604141730443515286U,
+ 4606269759522929756U, 4601573027631668967U,
+ 13824945064486444775U, 4606269759522929756U,
+ 4599531889160152938U, 4606692493141721470U,
+ 13830064529996497278U, 4599531889160152938U,
+ 4606884969294623682U, 4598262871476403630U,
+ 13821634908331179438U, 4606884969294623682U,
+ 4602710690099904183U, 4605957195211051218U,
+ 13829329232065827026U, 4602710690099904183U,
+ 4605343481119364930U, 4603629178146150899U,
+ 13827001215000926707U, 4605343481119364930U,
+ 4594016801320007031U, 4607100477024622401U,
+ 13830472513879398209U, 4594016801320007031U,
+ 4607068040143112603U, 4594891488091520602U,
+ 13818263524946296410U, 4607068040143112603U,
+ 4603451617570386922U, 4605475169017376660U,
+ 13828847205872152468U, 4603451617570386922U,
+ 4605843545406134034U, 4602900303344142735U,
+ 13826272340198918543U, 4605843545406134034U,
+ 4597492765973365521U, 4606938683557690074U,
+ 13830310720412465882U, 4597492765973365521U,
+ 4606618018794815019U, 4599948172872067014U,
+ 13823320209726842822U, 4606618018794815019U,
+ 4601173347964633034U, 4606364276725003740U,
+ 13829736313579779548U, 4601173347964633034U,
+ 4604774382555066977U, 4604305528345395596U,
+ 13827677565200171404U, 4604774382555066977U,
+ 4585465300892538317U, 4607176315382986589U,
+ 13830548352237762397U, 4585465300892538317U,
+ 4607176315382986589U, 4585465300892538317U,
+ 13808837337747314125U, 4607176315382986589U,
+ 4604305528345395596U, 4604774382555066977U,
+ 13828146419409842785U, 4604305528345395596U,
+ 4606364276725003740U, 4601173347964633034U,
+ 13824545384819408842U, 4606364276725003740U,
+ 4599948172872067014U, 4606618018794815019U,
+ 13829990055649590827U, 4599948172872067014U,
+ 4606938683557690074U, 4597492765973365521U,
+ 13820864802828141329U, 4606938683557690074U,
+ 4602900303344142735U, 4605843545406134034U,
+ 13829215582260909842U, 4602900303344142735U,
+ 4605475169017376660U, 4603451617570386922U,
+ 13826823654425162730U, 4605475169017376660U,
+ 4594891488091520602U, 4607068040143112603U,
+ 13830440076997888411U, 4594891488091520602U,
+ 4607100477024622401U, 4594016801320007031U,
+ 13817388838174782839U, 4607100477024622401U,
+ 4603629178146150899U, 4605343481119364930U,
+ 13828715517974140738U, 4603629178146150899U,
+ 4605957195211051218U, 4602710690099904183U,
+ 13826082726954679991U, 4605957195211051218U,
+ 4598262871476403630U, 4606884969294623682U,
+ 13830257006149399490U, 4598262871476403630U,
+ 4606692493141721470U, 4599531889160152938U,
+ 13822903926014928746U, 4606692493141721470U,
+ 4601573027631668967U, 4606269759522929756U,
+ 13829641796377705564U, 4601573027631668967U,
+ 4604922840319727473U, 4604141730443515286U,
+ 13827513767298291094U, 4604922840319727473U,
+ 4588998070480937184U, 4607165468267934125U,
+ 13830537505122709933U, 4588998070480937184U,
+ 4607149205763218185U, 4590846768565625881U,
+ 13814218805420401689U, 4607149205763218185U,
+ 4603974338538572089U, 4605067233569943231U,
+ 13828439270424719039U, 4603974338538572089U,
+ 4606170366472647579U, 4601967947786150793U,
+ 13825339984640926601U, 4606170366472647579U,
+ 4599112075441176914U, 4606761837001494797U,
+ 13830133873856270605U, 4599112075441176914U,
+ 4606826008603986804U, 4598688984595225406U,
+ 13822061021450001214U, 4606826008603986804U,
+ 4602357870542944470U, 4606066157444814153U,
+ 13829438194299589961U, 4602357870542944470U,
+ 4605207475328619533U, 4603803453461190356U,
+ 13827175490315966164U, 4605207475328619533U,
+ 4592606767730311893U, 4607127537664763515U,
+ 13830499574519539323U, 4592606767730311893U,
+ 4607030246558998647U, 4595762727260045105U,
+ 13819134764114820913U, 4607030246558998647U,
+ 4603270878689749849U, 4605602459698789090U,
+ 13828974496553564898U, 4603270878689749849U,
+ 4605725276488455441U, 4603087070374583113U,
+ 13826459107229358921U, 4605725276488455441U,
+ 4596629994023683153U, 4606987119037722413U,
+ 13830359155892498221U, 4596629994023683153U,
+ 4606538458821337243U, 4600360675823176935U,
+ 13823732712677952743U, 4606538458821337243U,
+ 4600769149537129431U, 4606453861145241227U,
+ 13829825898000017035U, 4600769149537129431U,
+ 4604621949701367983U, 4604465633578481725U,
+ 13827837670433257533U, 4604621949701367983U,
+ 4578227681973159812U, 4607181740574479067U,
+ 13830553777429254875U, 4578227681973159812U,
+ 4607182249242036882U, 4573724215515480177U,
+ 13797096252370255985U, 4607182249242036882U,
+ 4604505071555817232U, 4604583231088591477U,
+ 13827955267943367285U, 4604505071555817232U,
+ 4606475480113671417U, 4600667422348321968U,
+ 13824039459203097776U, 4606475480113671417U,
+ 4600463181646572228U, 4606517779747998088U,
+ 13829889816602773896U, 4600463181646572228U,
+ 4606998399608725124U, 4596413578358834022U,
+ 13819785615213609830U, 4606998399608725124U,
+ 4603133304188877240U, 4605694995810664660U,
+ 13829067032665440468U, 4603133304188877240U,
+ 4605633586259814045U, 4603225210076562971U,
+ 13826597246931338779U, 4605633586259814045U,
+ 4595979936813835462U, 4607019963775302583U,
+ 13830392000630078391U, 4595979936813835462U,
+ 4607133460805585796U, 4592167175087283203U,
+ 13815539211942059011U, 4607133460805585796U,
+ 4603846496621587377U, 4605172808754305228U,
+ 13828544845609081036U, 4603846496621587377U,
+ 4606092657816072624U, 4602260871257280788U,
+ 13825632908112056596U, 4606092657816072624U,
+ 4598795050632330097U, 4606810452769876110U,
+ 13830182489624651918U, 4598795050632330097U,
+ 4606778366364612594U, 4599006600037663623U,
+ 13822378636892439431U, 4606778366364612594U,
+ 4602065906208722008U, 4606144763310860551U,
+ 13829516800165636359U, 4602065906208722008U,
+ 4605102686554936490U, 4603931940768740167U,
+ 13827303977623515975U, 4605102686554936490U,
+ 4591287158938884897U, 4607144295058764886U,
+ 13830516331913540694U, 4591287158938884897U,
+ 4607168688050493276U, 4588115294056142819U,
+ 13811487330910918627U, 4607168688050493276U,
+ 4604183020748362039U, 4604886103475043762U,
+ 13828258140329819570U, 4604183020748362039U,
+ 4606293848208650998U, 4601473544562720001U,
+ 13824845581417495809U, 4606293848208650998U,
+ 4599636300858866724U, 4606674353838411301U,
+ 13830046390693187109U, 4599636300858866724U,
+ 4606898891031025132U, 4598136582470364665U,
+ 13821508619325140473U, 4606898891031025132U,
+ 4602758354025980442U, 4605929219593405673U,
+ 13829301256448181481U, 4602758354025980442U,
+ 4605376811039722786U, 4603585091850767959U,
+ 13826957128705543767U, 4605376811039722786U,
+ 4594235767444503503U, 4607092871118901179U,
+ 13830464907973676987U, 4594235767444503503U,
+ 4607076652372832968U, 4594673119063280916U,
+ 13818045155918056724U, 4607076652372832968U,
+ 4603496309891590679U, 4605442656228245717U,
+ 13828814693083021525U, 4603496309891590679U,
+ 4605872393621214213U, 4602853162432841185U,
+ 13826225199287616993U, 4605872393621214213U,
+ 4597707695679609371U, 4606925748668145757U,
+ 13830297785522921565U, 4597707695679609371U,
+ 4606637115963965612U, 4599844446633109139U,
+ 13823216483487884947U, 4606637115963965612U,
+ 4601273700967202825U, 4606341107699334546U,
+ 13829713144554110354U, 4601273700967202825U,
+ 4604811873195349477U, 4604264921241055824U,
+ 13827636958095831632U, 4604811873195349477U,
+ 4586348876009622851U, 4607174111710118367U,
+ 13830546148564894175U, 4586348876009622851U,
+ 4607178180169683960U, 4584498631466405633U,
+ 13807870668321181441U, 4607178180169683960U,
+ 4604345904647073908U, 4604736643460027021U,
+ 13828108680314802829U, 4604345904647073908U,
+ 4606387137437298591U, 4601072712526242277U,
+ 13824444749381018085U, 4606387137437298591U,
+ 4600051662802353687U, 4606598603759044570U,
+ 13829970640613820378U, 4600051662802353687U,
+ 4606951288507767453U, 4597277522845151878U,
+ 13820649559699927686U, 4606951288507767453U,
+ 4602947266358709886U, 4605814408482919348U,
+ 13829186445337695156U, 4602947266358709886U,
+ 4605507406967535927U, 4603406726595779752U,
+ 13826778763450555560U, 4605507406967535927U,
+ 4595109641634432498U, 4607059093103722971U,
+ 13830431129958498779U, 4595109641634432498U,
+ 4607107746899444102U, 4593797652641645341U,
+ 13817169689496421149U, 4607107746899444102U,
+ 4603673059103075106U, 4605309881318010327U,
+ 13828681918172786135U, 4603673059103075106U,
+ 4605984877841711338U, 4602646891659203088U,
+ 13826018928513978896U, 4605984877841711338U,
+ 4598369669086960528U, 4606870719641066940U,
+ 13830242756495842748U, 4598369669086960528U,
+ 4606710311774494716U, 4599427256825614420U,
+ 13822799293680390228U, 4606710311774494716U,
+ 4601672213217083403U, 4606245366082353408U,
+ 13829617402937129216U, 4601672213217083403U,
+ 4604959323120302796U, 4604100215502905499U,
+ 13827472252357681307U, 4604959323120302796U,
+ 4589524267239410099U, 4607161910007591876U,
+ 13830533946862367684U, 4589524267239410099U,
+ 4607153778602162496U, 4590406145430462614U,
+ 13813778182285238422U, 4607153778602162496U,
+ 4604016517974851588U, 4605031521104517324U,
+ 13828403557959293132U, 4604016517974851588U,
+ 4606195668621671667U, 4601869677011524443U,
+ 13825241713866300251U, 4606195668621671667U,
+ 4599217346014614711U, 4606744984357082948U,
+ 13830117021211858756U, 4599217346014614711U,
+ 4606841238740778884U, 4598582729657176439U,
+ 13821954766511952247U, 4606841238740778884U,
+ 4602454542796181607U, 4606039359984203741U,
+ 13829411396838979549U, 4602454542796181607U,
+ 4605241877142478242U, 4603760198400967492U,
+ 13827132235255743300U, 4605241877142478242U,
+ 4593046061348462537U, 4607121277474223905U,
+ 13830493314328999713U, 4593046061348462537U,
+ 4607040195955932526U, 4595545269419264690U,
+ 13818917306274040498U, 4607040195955932526U,
+ 4603316355454250015U, 4605571053506370248U,
+ 13828943090361146056U, 4603316355454250015U,
+ 4605755272910869620U, 4603040651631881451U,
+ 13826412688486657259U, 4605755272910869620U,
+ 4596846128749438754U, 4606975506703684317U,
+ 13830347543558460125U, 4596846128749438754U,
+ 4606558823023444576U, 4600257918160607478U,
+ 13823629955015383286U, 4606558823023444576U,
+ 4600870609507958271U, 4606431930490633905U,
+ 13829803967345409713U, 4600870609507958271U,
+ 4604660425598397818U, 4604425958770613225U,
+ 13827797995625389033U, 4604660425598397818U,
+ 4580962600092897021U, 4607180892816495009U,
+ 13830552929671270817U, 4580962600092897021U,
+ 4607180892816495009U, 4580962600092897021U,
+ 13804334636947672829U, 4607180892816495009U,
+ 4604425958770613225U, 4604660425598397818U,
+ 13828032462453173626U, 4604425958770613225U,
+ 4606431930490633905U, 4600870609507958271U,
+ 13824242646362734079U, 4606431930490633905U,
+ 4600257918160607478U, 4606558823023444576U,
+ 13829930859878220384U, 4600257918160607478U,
+ 4606975506703684317U, 4596846128749438754U,
+ 13820218165604214562U, 4606975506703684317U,
+ 4603040651631881451U, 4605755272910869620U,
+ 13829127309765645428U, 4603040651631881451U,
+ 4605571053506370248U, 4603316355454250015U,
+ 13826688392309025823U, 4605571053506370248U,
+ 4595545269419264690U, 4607040195955932526U,
+ 13830412232810708334U, 4595545269419264690U,
+ 4607121277474223905U, 4593046061348462537U,
+ 13816418098203238345U, 4607121277474223905U,
+ 4603760198400967492U, 4605241877142478242U,
+ 13828613913997254050U, 4603760198400967492U,
+ 4606039359984203741U, 4602454542796181607U,
+ 13825826579650957415U, 4606039359984203741U,
+ 4598582729657176439U, 4606841238740778884U,
+ 13830213275595554692U, 4598582729657176439U,
+ 4606744984357082948U, 4599217346014614711U,
+ 13822589382869390519U, 4606744984357082948U,
+ 4601869677011524443U, 4606195668621671667U,
+ 13829567705476447475U, 4601869677011524443U,
+ 4605031521104517324U, 4604016517974851588U,
+ 13827388554829627396U, 4605031521104517324U,
+ 4590406145430462614U, 4607153778602162496U,
+ 13830525815456938304U, 4590406145430462614U,
+ 4607161910007591876U, 4589524267239410099U,
+ 13812896304094185907U, 4607161910007591876U,
+ 4604100215502905499U, 4604959323120302796U,
+ 13828331359975078604U, 4604100215502905499U,
+ 4606245366082353408U, 4601672213217083403U,
+ 13825044250071859211U, 4606245366082353408U,
+ 4599427256825614420U, 4606710311774494716U,
+ 13830082348629270524U, 4599427256825614420U,
+ 4606870719641066940U, 4598369669086960528U,
+ 13821741705941736336U, 4606870719641066940U,
+ 4602646891659203088U, 4605984877841711338U,
+ 13829356914696487146U, 4602646891659203088U,
+ 4605309881318010327U, 4603673059103075106U,
+ 13827045095957850914U, 4605309881318010327U,
+ 4593797652641645341U, 4607107746899444102U,
+ 13830479783754219910U, 4593797652641645341U,
+ 4607059093103722971U, 4595109641634432498U,
+ 13818481678489208306U, 4607059093103722971U,
+ 4603406726595779752U, 4605507406967535927U,
+ 13828879443822311735U, 4603406726595779752U,
+ 4605814408482919348U, 4602947266358709886U,
+ 13826319303213485694U, 4605814408482919348U,
+ 4597277522845151878U, 4606951288507767453U,
+ 13830323325362543261U, 4597277522845151878U,
+ 4606598603759044570U, 4600051662802353687U,
+ 13823423699657129495U, 4606598603759044570U,
+ 4601072712526242277U, 4606387137437298591U,
+ 13829759174292074399U, 4601072712526242277U,
+ 4604736643460027021U, 4604345904647073908U,
+ 13827717941501849716U, 4604736643460027021U,
+ 4584498631466405633U, 4607178180169683960U,
+ 13830550217024459768U, 4584498631466405633U,
+ 4607174111710118367U, 4586348876009622851U,
+ 13809720912864398659U, 4607174111710118367U,
+ 4604264921241055824U, 4604811873195349477U,
+ 13828183910050125285U, 4604264921241055824U,
+ 4606341107699334546U, 4601273700967202825U,
+ 13824645737821978633U, 4606341107699334546U,
+ 4599844446633109139U, 4606637115963965612U,
+ 13830009152818741420U, 4599844446633109139U,
+ 4606925748668145757U, 4597707695679609371U,
+ 13821079732534385179U, 4606925748668145757U,
+ 4602853162432841185U, 4605872393621214213U,
+ 13829244430475990021U, 4602853162432841185U,
+ 4605442656228245717U, 4603496309891590679U,
+ 13826868346746366487U, 4605442656228245717U,
+ 4594673119063280916U, 4607076652372832968U,
+ 13830448689227608776U, 4594673119063280916U,
+ 4607092871118901179U, 4594235767444503503U,
+ 13817607804299279311U, 4607092871118901179U,
+ 4603585091850767959U, 4605376811039722786U,
+ 13828748847894498594U, 4603585091850767959U,
+ 4605929219593405673U, 4602758354025980442U,
+ 13826130390880756250U, 4605929219593405673U,
+ 4598136582470364665U, 4606898891031025132U,
+ 13830270927885800940U, 4598136582470364665U,
+ 4606674353838411301U, 4599636300858866724U,
+ 13823008337713642532U, 4606674353838411301U,
+ 4601473544562720001U, 4606293848208650998U,
+ 13829665885063426806U, 4601473544562720001U,
+ 4604886103475043762U, 4604183020748362039U,
+ 13827555057603137847U, 4604886103475043762U,
+ 4588115294056142819U, 4607168688050493276U,
+ 13830540724905269084U, 4588115294056142819U,
+ 4607144295058764886U, 4591287158938884897U,
+ 13814659195793660705U, 4607144295058764886U,
+ 4603931940768740167U, 4605102686554936490U,
+ 13828474723409712298U, 4603931940768740167U,
+ 4606144763310860551U, 4602065906208722008U,
+ 13825437943063497816U, 4606144763310860551U,
+ 4599006600037663623U, 4606778366364612594U,
+ 13830150403219388402U, 4599006600037663623U,
+ 4606810452769876110U, 4598795050632330097U,
+ 13822167087487105905U, 4606810452769876110U,
+ 4602260871257280788U, 4606092657816072624U,
+ 13829464694670848432U, 4602260871257280788U,
+ 4605172808754305228U, 4603846496621587377U,
+ 13827218533476363185U, 4605172808754305228U,
+ 4592167175087283203U, 4607133460805585796U,
+ 13830505497660361604U, 4592167175087283203U,
+ 4607019963775302583U, 4595979936813835462U,
+ 13819351973668611270U, 4607019963775302583U,
+ 4603225210076562971U, 4605633586259814045U,
+ 13829005623114589853U, 4603225210076562971U,
+ 4605694995810664660U, 4603133304188877240U,
+ 13826505341043653048U, 4605694995810664660U,
+ 4596413578358834022U, 4606998399608725124U,
+ 13830370436463500932U, 4596413578358834022U,
+ 4606517779747998088U, 4600463181646572228U,
+ 13823835218501348036U, 4606517779747998088U,
+ 4600667422348321968U, 4606475480113671417U,
+ 13829847516968447225U, 4600667422348321968U,
+ 4604583231088591477U, 4604505071555817232U,
+ 13827877108410593040U, 4604583231088591477U,
+ 4573724215515480177U, 4607182249242036882U,
+ 13830554286096812690U, 4573724215515480177U,
+ 4607182376410422530U, 4569220649180767418U,
+ 13792592686035543226U, 4607182376410422530U,
+ 4604524701268679793U, 4604563781218984604U,
+ 13827935818073760412U, 4604524701268679793U,
+ 4606486172460753999U, 4600616459743653188U,
+ 13823988496598428996U, 4606486172460753999U,
+ 4600514338912178239U, 4606507322377452870U,
+ 13829879359232228678U, 4600514338912178239U,
+ 4607003915349878877U, 4596305267720071930U,
+ 13819677304574847738U, 4607003915349878877U,
+ 4603156351203636159U, 4605679749231851918U,
+ 13829051786086627726U, 4603156351203636159U,
+ 4605649044311923410U, 4603202304363743346U,
+ 13826574341218519154U, 4605649044311923410U,
+ 4596088445927168004U, 4607014697483910382U,
+ 13830386734338686190U, 4596088445927168004U,
+ 4607136295912168606U, 4591947271803021404U,
+ 13815319308657797212U, 4607136295912168606U,
+ 4603867938232615808U, 4605155376589456981U,
+ 13828527413444232789U, 4603867938232615808U,
+ 4606105796280968177U, 4602212250118051877U,
+ 13825584286972827685U, 4606105796280968177U,
+ 4598848011564831930U, 4606802552898869248U,
+ 13830174589753645056U, 4598848011564831930U,
+ 4606786509620734768U, 4598953786765296928U,
+ 13822325823620072736U, 4606786509620734768U,
+ 4602114767134999006U, 4606131849150971908U,
+ 13829503886005747716U, 4602114767134999006U,
+ 4605120315324767624U, 4603910660507251362U,
+ 13827282697362027170U, 4605120315324767624U,
+ 4591507261658050721U, 4607141713064252300U,
+ 13830513749919028108U, 4591507261658050721U,
+ 4607170170974224083U, 4587673791460508439U,
+ 13811045828315284247U, 4607170170974224083U,
+ 4604203581176243359U, 4604867640218014515U,
+ 13828239677072790323U, 4604203581176243359U,
+ 4606305777984577632U, 4601423692641949331U,
+ 13824795729496725139U, 4606305777984577632U,
+ 4599688422741010356U, 4606665164148251002U,
+ 13830037201003026810U, 4599688422741010356U,
+ 4606905728766014348U, 4598029484874872834U,
+ 13821401521729648642U, 4606905728766014348U,
+ 4602782121393764535U, 4605915122243179241U,
+ 13829287159097955049U, 4602782121393764535U,
+ 4605393374401988274U, 4603562972219549215U,
+ 13826935009074325023U, 4605393374401988274U,
+ 4594345179472540681U, 4607088942243446236U,
+ 13830460979098222044U, 4594345179472540681U,
+ 4607080832832247697U, 4594563856311064231U,
+ 13817935893165840039U, 4607080832832247697U,
+ 4603518581031047189U, 4605426297151190466U,
+ 13828798334005966274U, 4603518581031047189U,
+ 4605886709123365959U, 4602829525820289164U,
+ 13826201562675064972U, 4605886709123365959U,
+ 4597815040470278984U, 4606919157647773535U,
+ 13830291194502549343U, 4597815040470278984U,
+ 4606646545123403481U, 4599792496117920694U,
+ 13823164532972696502U, 4606646545123403481U,
+ 4601323770373937522U, 4606329407841126011U,
+ 13829701444695901819U, 4601323770373937522U,
+ 4604830524903495634U, 4604244531615310815U,
+ 13827616568470086623U, 4604830524903495634U,
+ 4586790578280679046U, 4607172882816799076U,
+ 13830544919671574884U, 4586790578280679046U,
+ 4607178985458280057U, 4583614727651146525U,
+ 13806986764505922333U, 4607178985458280057U,
+ 4604366005771528720U, 4604717681185626434U,
+ 13828089718040402242U, 4604366005771528720U,
+ 4606398451906509788U, 4601022290077223616U,
+ 13824394326931999424U, 4606398451906509788U,
+ 4600103317933788342U, 4606588777269136769U,
+ 13829960814123912577U, 4600103317933788342U,
+ 4606957467106717424U, 4597169786279785693U,
+ 13820541823134561501U, 4606957467106717424U,
+ 4602970680601913687U, 4605799732098147061U,
+ 13829171768952922869U, 4602970680601913687U,
+ 4605523422498301790U, 4603384207141321914U,
+ 13826756243996097722U, 4605523422498301790U,
+ 4595218635031890910U, 4607054494135176056U,
+ 13830426530989951864U, 4595218635031890910U,
+ 4607111255739239816U, 4593688012422887515U,
+ 13817060049277663323U, 4607111255739239816U,
+ 4603694922063032361U, 4605292980606880364U,
+ 13828665017461656172U, 4603694922063032361U,
+ 4605998608960791335U, 4602598930031891166U,
+ 13825970966886666974U, 4605998608960791335U,
+ 4598423001813699022U, 4606863472012527185U,
+ 13830235508867302993U, 4598423001813699022U,
+ 4606719100629313491U, 4599374859150636784U,
+ 13822746896005412592U, 4606719100629313491U,
+ 4601721693286060937U, 4606233055365547081U,
+ 13829605092220322889U, 4601721693286060937U,
+ 4604977468824438271U, 4604079374282302598U,
+ 13827451411137078406U, 4604977468824438271U,
+ 4589744810590291021U, 4607160003989618959U,
+ 13830532040844394767U, 4589744810590291021U,
+ 4607155938267770208U, 4590185751760970393U,
+ 13813557788615746201U, 4607155938267770208U,
+ 4604037525321326463U, 4605013567986435066U,
+ 13828385604841210874U, 4604037525321326463U,
+ 4606208206518262803U, 4601820425647934753U,
+ 13825192462502710561U, 4606208206518262803U,
+ 4599269903251194481U, 4606736437002195879U,
+ 13830108473856971687U, 4599269903251194481U,
+ 4606848731493011465U, 4598529532600161144U,
+ 13821901569454936952U, 4606848731493011465U,
+ 4602502755147763107U, 4606025850160239809U,
+ 13829397887015015617U, 4602502755147763107U,
+ 4605258978359093269U, 4603738491917026584U,
+ 13827110528771802392U, 4605258978359093269U,
+ 4593265590854265407U, 4607118021058468598U,
+ 13830490057913244406U, 4593265590854265407U,
+ 4607045045516813836U, 4595436449949385485U,
+ 13818808486804161293U, 4607045045516813836U,
+ 4603339021357904144U, 4605555245917486022U,
+ 13828927282772261830U, 4603339021357904144U,
+ 4605770164172969910U, 4603017373458244943U,
+ 13826389410313020751U, 4605770164172969910U,
+ 4596954088216812973U, 4606969576261663845U,
+ 13830341613116439653U, 4596954088216812973U,
+ 4606568886807728474U, 4600206446098256018U,
+ 13823578482953031826U, 4606568886807728474U,
+ 4600921238092511730U, 4606420848538580260U,
+ 13829792885393356068U, 4600921238092511730U,
+ 4604679572075463103U, 4604406033021674239U,
+ 13827778069876450047U, 4604679572075463103U,
+ 4581846703643734566U, 4607180341788068727U,
+ 13830552378642844535U, 4581846703643734566U,
+ 4607181359080094673U, 4579996072175835083U,
+ 13803368109030610891U, 4607181359080094673U,
+ 4604445825685214043U, 4604641218080103285U,
+ 13828013254934879093U, 4604445825685214043U,
+ 4606442934727379583U, 4600819913163773071U,
+ 13824191950018548879U, 4606442934727379583U,
+ 4600309328230211502U, 4606548680329491866U,
+ 13829920717184267674U, 4600309328230211502U,
+ 4606981354314050484U, 4596738097012783531U,
+ 13820110133867559339U, 4606981354314050484U,
+ 4603063884010218172U, 4605740310302420207U,
+ 13829112347157196015U, 4603063884010218172U,
+ 4605586791482848547U, 4603293641160266722U,
+ 13826665678015042530U, 4605586791482848547U,
+ 4595654028864046335U, 4607035262954517034U,
+ 13830407299809292842U, 4595654028864046335U,
+ 4607124449686274900U, 4592826452951465409U,
+ 13816198489806241217U, 4607124449686274900U,
+ 4603781852316960384U, 4605224709411790590U,
+ 13828596746266566398U, 4603781852316960384U,
+ 4606052795787882823U, 4602406247776385022U,
+ 13825778284631160830U, 4606052795787882823U,
+ 4598635880488956483U, 4606833664420673202U,
+ 13830205701275449010U, 4598635880488956483U,
+ 4606753451050079834U, 4599164736579548843U,
+ 13822536773434324651U, 4606753451050079834U,
+ 4601918851211878557U, 4606183055233559255U,
+ 13829555092088335063U, 4601918851211878557U,
+ 4605049409688478101U, 4603995455647851249U,
+ 13827367492502627057U, 4605049409688478101U,
+ 4590626485056654602U, 4607151534426937478U,
+ 13830523571281713286U, 4590626485056654602U,
+ 4607163731439411601U, 4589303678145802340U,
+ 13812675715000578148U, 4607163731439411601U,
+ 4604121000955189926U, 4604941113561600762U,
+ 13828313150416376570U, 4604121000955189926U,
+ 4606257600839867033U, 4601622657843474729U,
+ 13824994694698250537U, 4606257600839867033U,
+ 4599479600326345459U, 4606701442584137310U,
+ 13830073479438913118U, 4599479600326345459U,
+ 4606877885424248132U, 4598316292140394014U,
+ 13821688328995169822U, 4606877885424248132U,
+ 4602686793990243041U, 4605971073215153165U,
+ 13829343110069928973U, 4602686793990243041U,
+ 4605326714874986465U, 4603651144395358093U,
+ 13827023181250133901U, 4605326714874986465U,
+ 4593907249284540294U, 4607104153983298999U,
+ 13830476190838074807U, 4593907249284540294U,
+ 4607063608453868552U, 4595000592312171144U,
+ 13818372629166946952U, 4607063608453868552U,
+ 4603429196809300824U, 4605491322423429598U,
+ 13828863359278205406U, 4603429196809300824U,
+ 4605829012964735987U, 4602923807199184054U,
+ 13826295844053959862U, 4605829012964735987U,
+ 4597385183080791534U, 4606945027305114062U,
+ 13830317064159889870U, 4597385183080791534U,
+ 4606608350964852124U, 4599999947619525579U,
+ 13823371984474301387U, 4606608350964852124U,
+ 4601123065313358619U, 4606375745674388705U,
+ 13829747782529164513U, 4601123065313358619U,
+ 4604755543975806820U, 4604325745441780828U,
+ 13827697782296556636U, 4604755543975806820U,
+ 4585023436363055487U, 4607177290141793710U,
+ 13830549326996569518U, 4585023436363055487U,
+ 4607175255902437396U, 4585907115494236537U,
+ 13809279152349012345U, 4607175255902437396U,
+ 4604285253548209224U, 4604793159020491611U,
+ 13828165195875267419U, 4604285253548209224U,
+ 4606352730697093817U, 4601223560006786057U,
+ 13824595596861561865U, 4606352730697093817U,
+ 4599896339047301634U, 4606627607157935956U,
+ 13829999644012711764U, 4599896339047301634U,
+ 4606932257325205256U, 4597600270510262682U,
+ 13820972307365038490U, 4606932257325205256U,
+ 4602876755014813164U, 4605858005670328613U,
+ 13829230042525104421U, 4602876755014813164U,
+ 4605458946901419122U, 4603473988668005304U,
+ 13826846025522781112U, 4605458946901419122U,
+ 4594782329999411347U, 4607072388129742377U,
+ 13830444424984518185U, 4594782329999411347U,
+ 4607096716058023245U, 4594126307716900071U,
+ 13817498344571675879U, 4607096716058023245U,
+ 4603607160562208225U, 4605360179893335444U,
+ 13828732216748111252U, 4603607160562208225U,
+ 4605943243960030558U, 4602734543519989142U,
+ 13826106580374764950U, 4605943243960030558U,
+ 4598209407597805010U, 4606891971185517504U,
+ 13830264008040293312U, 4598209407597805010U,
+ 4606683463531482757U, 4599584122834874440U,
+ 13822956159689650248U, 4606683463531482757U,
+ 4601523323048804569U, 4606281842017099424U,
+ 13829653878871875232U, 4601523323048804569U,
+ 4604904503566677638U, 4604162403772767740U,
+ 13827534440627543548U, 4604904503566677638U,
+ 4588556721781247689U, 4607167120476811757U,
+ 13830539157331587565U, 4588556721781247689U,
+ 4607146792632922887U, 4591066993883984169U,
+ 13814439030738759977U, 4607146792632922887U,
+ 4603953166845776383U, 4605084992581147553U,
+ 13828457029435923361U, 4603953166845776383U,
+ 4606157602458368090U, 4602016966272225497U,
+ 13825389003127001305U, 4606157602458368090U,
+ 4599059363095165615U, 4606770142132396069U,
+ 13830142178987171877U, 4599059363095165615U,
+ 4606818271362779153U, 4598742041476147134U,
+ 13822114078330922942U, 4606818271362779153U,
+ 4602309411551204896U, 4606079444829232727U,
+ 13829451481684008535U, 4602309411551204896U,
+ 4605190175055178825U, 4603825001630339212U,
+ 13827197038485115020U, 4605190175055178825U,
+ 4592387007752762956U, 4607130541380624519U,
+ 13830502578235400327U, 4592387007752762956U,
+ 4607025146816593591U, 4595871363584150300U,
+ 13819243400438926108U, 4607025146816593591U,
+ 4603248068256948438U, 4605618058006716661U,
+ 13828990094861492469U, 4603248068256948438U,
+ 4605710171610479304U, 4603110210506737381U,
+ 13826482247361513189U, 4605710171610479304U,
+ 4596521820799644122U, 4606992800820440327U,
+ 13830364837675216135U, 4596521820799644122U,
+ 4606528158595189433U, 4600411960456200676U,
+ 13823783997310976484U, 4606528158595189433U,
+ 4600718319105833937U, 4606464709641375231U,
+ 13829836746496151039U, 4600718319105833937U,
+ 4604602620643553229U, 4604485382263976838U,
+ 13827857419118752646U, 4604602620643553229U,
+ 4576459225186735875U, 4607182037296057423U,
+ 13830554074150833231U, 4576459225186735875U,
+ 4607182037296057423U, 4576459225186735875U,
+ 13799831262041511683U, 4607182037296057423U,
+ 4604485382263976838U, 4604602620643553229U,
+ 13827974657498329037U, 4604485382263976838U,
+ 4606464709641375231U, 4600718319105833937U,
+ 13824090355960609745U, 4606464709641375231U,
+ 4600411960456200676U, 4606528158595189433U,
+ 13829900195449965241U, 4600411960456200676U,
+ 4606992800820440327U, 4596521820799644122U,
+ 13819893857654419930U, 4606992800820440327U,
+ 4603110210506737381U, 4605710171610479304U,
+ 13829082208465255112U, 4603110210506737381U,
+ 4605618058006716661U, 4603248068256948438U,
+ 13826620105111724246U, 4605618058006716661U,
+ 4595871363584150300U, 4607025146816593591U,
+ 13830397183671369399U, 4595871363584150300U,
+ 4607130541380624519U, 4592387007752762956U,
+ 13815759044607538764U, 4607130541380624519U,
+ 4603825001630339212U, 4605190175055178825U,
+ 13828562211909954633U, 4603825001630339212U,
+ 4606079444829232727U, 4602309411551204896U,
+ 13825681448405980704U, 4606079444829232727U,
+ 4598742041476147134U, 4606818271362779153U,
+ 13830190308217554961U, 4598742041476147134U,
+ 4606770142132396069U, 4599059363095165615U,
+ 13822431399949941423U, 4606770142132396069U,
+ 4602016966272225497U, 4606157602458368090U,
+ 13829529639313143898U, 4602016966272225497U,
+ 4605084992581147553U, 4603953166845776383U,
+ 13827325203700552191U, 4605084992581147553U,
+ 4591066993883984169U, 4607146792632922887U,
+ 13830518829487698695U, 4591066993883984169U,
+ 4607167120476811757U, 4588556721781247689U,
+ 13811928758636023497U, 4607167120476811757U,
+ 4604162403772767740U, 4604904503566677638U,
+ 13828276540421453446U, 4604162403772767740U,
+ 4606281842017099424U, 4601523323048804569U,
+ 13824895359903580377U, 4606281842017099424U,
+ 4599584122834874440U, 4606683463531482757U,
+ 13830055500386258565U, 4599584122834874440U,
+ 4606891971185517504U, 4598209407597805010U,
+ 13821581444452580818U, 4606891971185517504U,
+ 4602734543519989142U, 4605943243960030558U,
+ 13829315280814806366U, 4602734543519989142U,
+ 4605360179893335444U, 4603607160562208225U,
+ 13826979197416984033U, 4605360179893335444U,
+ 4594126307716900071U, 4607096716058023245U,
+ 13830468752912799053U, 4594126307716900071U,
+ 4607072388129742377U, 4594782329999411347U,
+ 13818154366854187155U, 4607072388129742377U,
+ 4603473988668005304U, 4605458946901419122U,
+ 13828830983756194930U, 4603473988668005304U,
+ 4605858005670328613U, 4602876755014813164U,
+ 13826248791869588972U, 4605858005670328613U,
+ 4597600270510262682U, 4606932257325205256U,
+ 13830304294179981064U, 4597600270510262682U,
+ 4606627607157935956U, 4599896339047301634U,
+ 13823268375902077442U, 4606627607157935956U,
+ 4601223560006786057U, 4606352730697093817U,
+ 13829724767551869625U, 4601223560006786057U,
+ 4604793159020491611U, 4604285253548209224U,
+ 13827657290402985032U, 4604793159020491611U,
+ 4585907115494236537U, 4607175255902437396U,
+ 13830547292757213204U, 4585907115494236537U,
+ 4607177290141793710U, 4585023436363055487U,
+ 13808395473217831295U, 4607177290141793710U,
+ 4604325745441780828U, 4604755543975806820U,
+ 13828127580830582628U, 4604325745441780828U,
+ 4606375745674388705U, 4601123065313358619U,
+ 13824495102168134427U, 4606375745674388705U,
+ 4599999947619525579U, 4606608350964852124U,
+ 13829980387819627932U, 4599999947619525579U,
+ 4606945027305114062U, 4597385183080791534U,
+ 13820757219935567342U, 4606945027305114062U,
+ 4602923807199184054U, 4605829012964735987U,
+ 13829201049819511795U, 4602923807199184054U,
+ 4605491322423429598U, 4603429196809300824U,
+ 13826801233664076632U, 4605491322423429598U,
+ 4595000592312171144U, 4607063608453868552U,
+ 13830435645308644360U, 4595000592312171144U,
+ 4607104153983298999U, 4593907249284540294U,
+ 13817279286139316102U, 4607104153983298999U,
+ 4603651144395358093U, 4605326714874986465U,
+ 13828698751729762273U, 4603651144395358093U,
+ 4605971073215153165U, 4602686793990243041U,
+ 13826058830845018849U, 4605971073215153165U,
+ 4598316292140394014U, 4606877885424248132U,
+ 13830249922279023940U, 4598316292140394014U,
+ 4606701442584137310U, 4599479600326345459U,
+ 13822851637181121267U, 4606701442584137310U,
+ 4601622657843474729U, 4606257600839867033U,
+ 13829629637694642841U, 4601622657843474729U,
+ 4604941113561600762U, 4604121000955189926U,
+ 13827493037809965734U, 4604941113561600762U,
+ 4589303678145802340U, 4607163731439411601U,
+ 13830535768294187409U, 4589303678145802340U,
+ 4607151534426937478U, 4590626485056654602U,
+ 13813998521911430410U, 4607151534426937478U,
+ 4603995455647851249U, 4605049409688478101U,
+ 13828421446543253909U, 4603995455647851249U,
+ 4606183055233559255U, 4601918851211878557U,
+ 13825290888066654365U, 4606183055233559255U,
+ 4599164736579548843U, 4606753451050079834U,
+ 13830125487904855642U, 4599164736579548843U,
+ 4606833664420673202U, 4598635880488956483U,
+ 13822007917343732291U, 4606833664420673202U,
+ 4602406247776385022U, 4606052795787882823U,
+ 13829424832642658631U, 4602406247776385022U,
+ 4605224709411790590U, 4603781852316960384U,
+ 13827153889171736192U, 4605224709411790590U,
+ 4592826452951465409U, 4607124449686274900U,
+ 13830496486541050708U, 4592826452951465409U,
+ 4607035262954517034U, 4595654028864046335U,
+ 13819026065718822143U, 4607035262954517034U,
+ 4603293641160266722U, 4605586791482848547U,
+ 13828958828337624355U, 4603293641160266722U,
+ 4605740310302420207U, 4603063884010218172U,
+ 13826435920864993980U, 4605740310302420207U,
+ 4596738097012783531U, 4606981354314050484U,
+ 13830353391168826292U, 4596738097012783531U,
+ 4606548680329491866U, 4600309328230211502U,
+ 13823681365084987310U, 4606548680329491866U,
+ 4600819913163773071U, 4606442934727379583U,
+ 13829814971582155391U, 4600819913163773071U,
+ 4604641218080103285U, 4604445825685214043U,
+ 13827817862539989851U, 4604641218080103285U,
+ 4579996072175835083U, 4607181359080094673U,
+ 13830553395934870481U, 4579996072175835083U,
+ 4607180341788068727U, 4581846703643734566U,
+ 13805218740498510374U, 4607180341788068727U,
+ 4604406033021674239U, 4604679572075463103U,
+ 13828051608930238911U, 4604406033021674239U,
+ 4606420848538580260U, 4600921238092511730U,
+ 13824293274947287538U, 4606420848538580260U,
+ 4600206446098256018U, 4606568886807728474U,
+ 13829940923662504282U, 4600206446098256018U,
+ 4606969576261663845U, 4596954088216812973U,
+ 13820326125071588781U, 4606969576261663845U,
+ 4603017373458244943U, 4605770164172969910U,
+ 13829142201027745718U, 4603017373458244943U,
+ 4605555245917486022U, 4603339021357904144U,
+ 13826711058212679952U, 4605555245917486022U,
+ 4595436449949385485U, 4607045045516813836U,
+ 13830417082371589644U, 4595436449949385485U,
+ 4607118021058468598U, 4593265590854265407U,
+ 13816637627709041215U, 4607118021058468598U,
+ 4603738491917026584U, 4605258978359093269U,
+ 13828631015213869077U, 4603738491917026584U,
+ 4606025850160239809U, 4602502755147763107U,
+ 13825874792002538915U, 4606025850160239809U,
+ 4598529532600161144U, 4606848731493011465U,
+ 13830220768347787273U, 4598529532600161144U,
+ 4606736437002195879U, 4599269903251194481U,
+ 13822641940105970289U, 4606736437002195879U,
+ 4601820425647934753U, 4606208206518262803U,
+ 13829580243373038611U, 4601820425647934753U,
+ 4605013567986435066U, 4604037525321326463U,
+ 13827409562176102271U, 4605013567986435066U,
+ 4590185751760970393U, 4607155938267770208U,
+ 13830527975122546016U, 4590185751760970393U,
+ 4607160003989618959U, 4589744810590291021U,
+ 13813116847445066829U, 4607160003989618959U,
+ 4604079374282302598U, 4604977468824438271U,
+ 13828349505679214079U, 4604079374282302598U,
+ 4606233055365547081U, 4601721693286060937U,
+ 13825093730140836745U, 4606233055365547081U,
+ 4599374859150636784U, 4606719100629313491U,
+ 13830091137484089299U, 4599374859150636784U,
+ 4606863472012527185U, 4598423001813699022U,
+ 13821795038668474830U, 4606863472012527185U,
+ 4602598930031891166U, 4605998608960791335U,
+ 13829370645815567143U, 4602598930031891166U,
+ 4605292980606880364U, 4603694922063032361U,
+ 13827066958917808169U, 4605292980606880364U,
+ 4593688012422887515U, 4607111255739239816U,
+ 13830483292594015624U, 4593688012422887515U,
+ 4607054494135176056U, 4595218635031890910U,
+ 13818590671886666718U, 4607054494135176056U,
+ 4603384207141321914U, 4605523422498301790U,
+ 13828895459353077598U, 4603384207141321914U,
+ 4605799732098147061U, 4602970680601913687U,
+ 13826342717456689495U, 4605799732098147061U,
+ 4597169786279785693U, 4606957467106717424U,
+ 13830329503961493232U, 4597169786279785693U,
+ 4606588777269136769U, 4600103317933788342U,
+ 13823475354788564150U, 4606588777269136769U,
+ 4601022290077223616U, 4606398451906509788U,
+ 13829770488761285596U, 4601022290077223616U,
+ 4604717681185626434U, 4604366005771528720U,
+ 13827738042626304528U, 4604717681185626434U,
+ 4583614727651146525U, 4607178985458280057U,
+ 13830551022313055865U, 4583614727651146525U,
+ 4607172882816799076U, 4586790578280679046U,
+ 13810162615135454854U, 4607172882816799076U,
+ 4604244531615310815U, 4604830524903495634U,
+ 13828202561758271442U, 4604244531615310815U,
+ 4606329407841126011U, 4601323770373937522U,
+ 13824695807228713330U, 4606329407841126011U,
+ 4599792496117920694U, 4606646545123403481U,
+ 13830018581978179289U, 4599792496117920694U,
+ 4606919157647773535U, 4597815040470278984U,
+ 13821187077325054792U, 4606919157647773535U,
+ 4602829525820289164U, 4605886709123365959U,
+ 13829258745978141767U, 4602829525820289164U,
+ 4605426297151190466U, 4603518581031047189U,
+ 13826890617885822997U, 4605426297151190466U,
+ 4594563856311064231U, 4607080832832247697U,
+ 13830452869687023505U, 4594563856311064231U,
+ 4607088942243446236U, 4594345179472540681U,
+ 13817717216327316489U, 4607088942243446236U,
+ 4603562972219549215U, 4605393374401988274U,
+ 13828765411256764082U, 4603562972219549215U,
+ 4605915122243179241U, 4602782121393764535U,
+ 13826154158248540343U, 4605915122243179241U,
+ 4598029484874872834U, 4606905728766014348U,
+ 13830277765620790156U, 4598029484874872834U,
+ 4606665164148251002U, 4599688422741010356U,
+ 13823060459595786164U, 4606665164148251002U,
+ 4601423692641949331U, 4606305777984577632U,
+ 13829677814839353440U, 4601423692641949331U,
+ 4604867640218014515U, 4604203581176243359U,
+ 13827575618031019167U, 4604867640218014515U,
+ 4587673791460508439U, 4607170170974224083U,
+ 13830542207828999891U, 4587673791460508439U,
+ 4607141713064252300U, 4591507261658050721U,
+ 13814879298512826529U, 4607141713064252300U,
+ 4603910660507251362U, 4605120315324767624U,
+ 13828492352179543432U, 4603910660507251362U,
+ 4606131849150971908U, 4602114767134999006U,
+ 13825486803989774814U, 4606131849150971908U,
+ 4598953786765296928U, 4606786509620734768U,
+ 13830158546475510576U, 4598953786765296928U,
+ 4606802552898869248U, 4598848011564831930U,
+ 13822220048419607738U, 4606802552898869248U,
+ 4602212250118051877U, 4606105796280968177U,
+ 13829477833135743985U, 4602212250118051877U,
+ 4605155376589456981U, 4603867938232615808U,
+ 13827239975087391616U, 4605155376589456981U,
+ 4591947271803021404U, 4607136295912168606U,
+ 13830508332766944414U, 4591947271803021404U,
+ 4607014697483910382U, 4596088445927168004U,
+ 13819460482781943812U, 4607014697483910382U,
+ 4603202304363743346U, 4605649044311923410U,
+ 13829021081166699218U, 4603202304363743346U,
+ 4605679749231851918U, 4603156351203636159U,
+ 13826528388058411967U, 4605679749231851918U,
+ 4596305267720071930U, 4607003915349878877U,
+ 13830375952204654685U, 4596305267720071930U,
+ 4606507322377452870U, 4600514338912178239U,
+ 13823886375766954047U, 4606507322377452870U,
+ 4600616459743653188U, 4606486172460753999U,
+ 13829858209315529807U, 4600616459743653188U,
+ 4604563781218984604U, 4604524701268679793U,
+ 13827896738123455601U, 4604563781218984604U,
+ 4569220649180767418U, 4607182376410422530U,
+ 13830554413265198338U, 4569220649180767418U
+};
+
+const fpr fpr_p2_tab[] = {
+ 4611686018427387904U,
+ 4607182418800017408U,
+ 4602678819172646912U,
+ 4598175219545276416U,
+ 4593671619917905920U,
+ 4589168020290535424U,
+ 4584664420663164928U,
+ 4580160821035794432U,
+ 4575657221408423936U,
+ 4571153621781053440U,
+ 4566650022153682944U
+};
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fpr.h b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fpr.h
new file mode 100644
index 000000000..a1c122275
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/fpr.h
@@ -0,0 +1,253 @@
+#ifndef PQCLEAN_FALCON1024_CLEAN_FPR_H
+#define PQCLEAN_FALCON1024_CLEAN_FPR_H
+
+/*
+ * Floating-point operations.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* ====================================================================== */
+/*
+ * Custom floating-point implementation with integer arithmetics. We
+ * use IEEE-754 "binary64" format, with some simplifications:
+ *
+ * - Top bit is s = 1 for negative, 0 for positive.
+ *
+ * - Exponent e uses the next 11 bits (bits 52 to 62, inclusive).
+ *
+ * - Mantissa m uses the 52 low bits.
+ *
+ * Encoded value is, in general: (-1)^s * 2^(e-1023) * (1 + m*2^(-52))
+ * i.e. the mantissa really is a 53-bit number (less than 2.0, but not
+ * less than 1.0), but the top bit (equal to 1 by definition) is omitted
+ * in the encoding.
+ *
+ * In IEEE-754, there are some special values:
+ *
+ * - If e = 2047, then the value is either an infinite (m = 0) or
+ * a NaN (m != 0).
+ *
+ * - If e = 0, then the value is either a zero (m = 0) or a subnormal,
+ * aka "denormalized number" (m != 0).
+ *
+ * Of these, we only need the zeros. The caller is responsible for not
+ * providing operands that would lead to infinites, NaNs or subnormals.
+ * If inputs are such that values go out of range, then indeterminate
+ * values are returned (it would still be deterministic, but no specific
+ * value may be relied upon).
+ *
+ * At the C level, the three parts are stored in a 64-bit unsigned
+ * word.
+ *
+ * One may note that a property of the IEEE-754 format is that order
+ * is preserved for positive values: if two positive floating-point
+ * values x and y are such that x < y, then their respective encodings
+ * as _signed_ 64-bit integers i64(x) and i64(y) will be such that
+ * i64(x) < i64(y). For negative values, order is reversed: if x < 0,
+ * y < 0, and x < y, then ia64(x) > ia64(y).
+ *
+ * IMPORTANT ASSUMPTIONS:
+ * ======================
+ *
+ * For proper computations, and constant-time behaviour, we assume the
+ * following:
+ *
+ * - 32x32->64 multiplication (unsigned) has an execution time that
+ * is independent of its operands. This is true of most modern
+ * x86 and ARM cores. Notable exceptions are the ARM Cortex M0, M0+
+ * and M3 (in the M0 and M0+, this is done in software, so it depends
+ * on that routine), and the PowerPC cores from the G3/G4 lines.
+ * For more info, see: https://www.bearssl.org/ctmul.html
+ *
+ * - Left-shifts and right-shifts of 32-bit values have an execution
+ * time which does not depend on the shifted value nor on the
+ * shift count. An historical exception is the Pentium IV, but most
+ * modern CPU have barrel shifters. Some small microcontrollers
+ * might have varying-time shifts (not the ARM Cortex M*, though).
+ *
+ * - Right-shift of a signed negative value performs a sign extension.
+ * As per the C standard, this operation returns an
+ * implementation-defined result (this is NOT an "undefined
+ * behaviour"). On most/all systems, an arithmetic shift is
+ * performed, because this is what makes most sense.
+ */
+
+/*
+ * Normally we should declare the 'fpr' type to be a struct or union
+ * around the internal 64-bit value; however, we want to use the
+ * direct 64-bit integer type to enable a lighter call convention on
+ * ARM platforms. This means that direct (invalid) use of operators
+ * such as '*' or '+' will not be caught by the compiler. We rely on
+ * the "normal" (non-emulated) code to detect such instances.
+ */
+typedef uint64_t fpr;
+
+/*
+ * For computations, we split values into an integral mantissa in the
+ * 2^54..2^55 range, and an (adjusted) exponent. The lowest bit is
+ * "sticky" (it is set to 1 if any of the bits below it is 1); when
+ * re-encoding, the low two bits are dropped, but may induce an
+ * increment in the value for proper rounding.
+ */
+
+/*
+ * Right-shift a 64-bit unsigned value by a possibly secret shift count.
+ * We assumed that the underlying architecture had a barrel shifter for
+ * 32-bit shifts, but for 64-bit shifts on a 32-bit system, this will
+ * typically invoke a software routine that is not necessarily
+ * constant-time; hence the function below.
+ *
+ * Shift count n MUST be in the 0..63 range.
+ */
+#define fpr_ursh PQCLEAN_FALCON1024_CLEAN_fpr_ursh
+uint64_t fpr_ursh(uint64_t x, int n);
+
+/*
+ * Right-shift a 64-bit signed value by a possibly secret shift count
+ * (see fpr_ursh() for the rationale).
+ *
+ * Shift count n MUST be in the 0..63 range.
+ */
+#define fpr_irsh PQCLEAN_FALCON1024_CLEAN_fpr_irsh
+int64_t fpr_irsh(int64_t x, int n);
+
+/*
+ * Left-shift a 64-bit unsigned value by a possibly secret shift count
+ * (see fpr_ursh() for the rationale).
+ *
+ * Shift count n MUST be in the 0..63 range.
+ */
+#define fpr_ulsh PQCLEAN_FALCON1024_CLEAN_fpr_ulsh
+uint64_t fpr_ulsh(uint64_t x, int n);
+
+/*
+ * Expectations:
+ * s = 0 or 1
+ * exponent e is "arbitrary" and unbiased
+ * 2^54 <= m < 2^55
+ * Numerical value is (-1)^2 * m * 2^e
+ *
+ * Exponents which are too low lead to value zero. If the exponent is
+ * too large, the returned value is indeterminate.
+ *
+ * If m = 0, then a zero is returned (using the provided sign).
+ * If e < -1076, then a zero is returned (regardless of the value of m).
+ * If e >= -1076 and e != 0, m must be within the expected range
+ * (2^54 to 2^55-1).
+ */
+#define FPR PQCLEAN_FALCON1024_CLEAN_FPR
+fpr FPR(int s, int e, uint64_t m);
+
+
+#define fpr_scaled PQCLEAN_FALCON1024_CLEAN_fpr_scaled
+fpr fpr_scaled(int64_t i, int sc);
+
+#define fpr_of PQCLEAN_FALCON1024_CLEAN_fpr_of
+fpr fpr_of(int64_t i);
+
+static const fpr fpr_q = 4667981563525332992;
+static const fpr fpr_inverse_of_q = 4545632735260551042;
+static const fpr fpr_inv_2sqrsigma0 = 4594603506513722306;
+static const fpr fpr_inv_sigma = 4573359825155195350;
+static const fpr fpr_sigma_min_9 = 4608495221497168882;
+static const fpr fpr_sigma_min_10 = 4608586345619182117;
+static const fpr fpr_log2 = 4604418534313441775;
+static const fpr fpr_inv_log2 = 4609176140021203710;
+static const fpr fpr_bnorm_max = 4670353323383631276;
+static const fpr fpr_zero = 0;
+static const fpr fpr_one = 4607182418800017408;
+static const fpr fpr_two = 4611686018427387904;
+static const fpr fpr_onehalf = 4602678819172646912;
+static const fpr fpr_invsqrt2 = 4604544271217802189;
+static const fpr fpr_invsqrt8 = 4600040671590431693;
+static const fpr fpr_ptwo31 = 4746794007248502784;
+static const fpr fpr_ptwo31m1 = 4746794007244308480;
+static const fpr fpr_mtwo31m1 = 13970166044099084288U;
+static const fpr fpr_ptwo63m1 = 4890909195324358656;
+static const fpr fpr_mtwo63m1 = 14114281232179134464U;
+static const fpr fpr_ptwo63 = 4890909195324358656;
+
+#define fpr_rint PQCLEAN_FALCON1024_CLEAN_fpr_rint
+int64_t fpr_rint(fpr x);
+
+#define fpr_floor PQCLEAN_FALCON1024_CLEAN_fpr_floor
+int64_t fpr_floor(fpr x);
+
+#define fpr_trunc PQCLEAN_FALCON1024_CLEAN_fpr_trunc
+int64_t fpr_trunc(fpr x);
+
+#define fpr_add PQCLEAN_FALCON1024_CLEAN_fpr_add
+fpr fpr_add(fpr x, fpr y);
+
+#define fpr_sub PQCLEAN_FALCON1024_CLEAN_fpr_sub
+fpr fpr_sub(fpr x, fpr y);
+
+#define fpr_neg PQCLEAN_FALCON1024_CLEAN_fpr_neg
+fpr fpr_neg(fpr x);
+
+#define fpr_half PQCLEAN_FALCON1024_CLEAN_fpr_half
+fpr fpr_half(fpr x);
+
+#define fpr_double PQCLEAN_FALCON1024_CLEAN_fpr_double
+fpr fpr_double(fpr x);
+
+#define fpr_mul PQCLEAN_FALCON1024_CLEAN_fpr_mul
+fpr fpr_mul(fpr x, fpr y);
+
+#define fpr_sqr PQCLEAN_FALCON1024_CLEAN_fpr_sqr
+fpr fpr_sqr(fpr x);
+
+#define fpr_div PQCLEAN_FALCON1024_CLEAN_fpr_div
+fpr fpr_div(fpr x, fpr y);
+
+#define fpr_inv PQCLEAN_FALCON1024_CLEAN_fpr_inv
+fpr fpr_inv(fpr x);
+
+#define fpr_sqrt PQCLEAN_FALCON1024_CLEAN_fpr_sqrt
+fpr fpr_sqrt(fpr x);
+
+#define fpr_lt PQCLEAN_FALCON1024_CLEAN_fpr_lt
+int fpr_lt(fpr x, fpr y);
+
+/*
+ * Compute exp(x) for x such that |x| <= ln 2. We want a precision of 50
+ * bits or so.
+ */
+#define fpr_expm_p63 PQCLEAN_FALCON1024_CLEAN_fpr_expm_p63
+uint64_t fpr_expm_p63(fpr x, fpr ccs);
+
+#define fpr_gm_tab PQCLEAN_FALCON1024_CLEAN_fpr_gm_tab
+extern const fpr fpr_gm_tab[];
+
+#define fpr_p2_tab PQCLEAN_FALCON1024_CLEAN_fpr_p2_tab
+extern const fpr fpr_p2_tab[];
+
+/* ====================================================================== */
+#endif
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/inner.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/inner.c
new file mode 100755
index 000000000..f5c269eda
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/inner.c
@@ -0,0 +1,70 @@
+#include "inner.h"
+
+/*
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ */
+
+unsigned set_fpu_cw(unsigned x) {
+ return x;
+}
+
+
+uint64_t prng_get_u64(prng *p) {
+ size_t u;
+
+ /*
+ * If there are less than 9 bytes in the buffer, we refill it.
+ * This means that we may drop the last few bytes, but this allows
+ * for faster extraction code. Also, it means that we never leave
+ * an empty buffer.
+ */
+ u = p->ptr;
+ if (u >= (sizeof p->buf.d) - 9) {
+ PQCLEAN_FALCON1024_CLEAN_prng_refill(p);
+ u = 0;
+ }
+ p->ptr = u + 8;
+
+ return (uint64_t)p->buf.d[u + 0]
+ | ((uint64_t)p->buf.d[u + 1] << 8)
+ | ((uint64_t)p->buf.d[u + 2] << 16)
+ | ((uint64_t)p->buf.d[u + 3] << 24)
+ | ((uint64_t)p->buf.d[u + 4] << 32)
+ | ((uint64_t)p->buf.d[u + 5] << 40)
+ | ((uint64_t)p->buf.d[u + 6] << 48)
+ | ((uint64_t)p->buf.d[u + 7] << 56);
+}
+
+
+unsigned prng_get_u8(prng *p) {
+ unsigned v;
+
+ v = p->buf.d[p->ptr ++];
+ if (p->ptr == sizeof p->buf.d) {
+ PQCLEAN_FALCON1024_CLEAN_prng_refill(p);
+ }
+ return v;
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/inner.h b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/inner.h
new file mode 100644
index 000000000..886f51a67
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/inner.h
@@ -0,0 +1,794 @@
+#ifndef PQCLEAN_FALCON1024_CLEAN_INNER_H
+#define PQCLEAN_FALCON1024_CLEAN_INNER_H
+
+
+/*
+ * Internal functions for Falcon. This is not the API intended to be
+ * used by applications; instead, this internal API provides all the
+ * primitives on which wrappers build to provide external APIs.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+/*
+ * IMPORTANT API RULES
+ * -------------------
+ *
+ * This API has some non-trivial usage rules:
+ *
+ *
+ * - All public functions (i.e. the non-static ones) must be referenced
+ * with the PQCLEAN_FALCON1024_CLEAN_ macro (e.g. PQCLEAN_FALCON1024_CLEAN_verify_raw for the verify_raw()
+ * function). That macro adds a prefix to the name, which is
+ * configurable with the FALCON_PREFIX macro. This allows compiling
+ * the code into a specific "namespace" and potentially including
+ * several versions of this code into a single application (e.g. to
+ * have an AVX2 and a non-AVX2 variants and select the one to use at
+ * runtime based on availability of AVX2 opcodes).
+ *
+ * - Functions that need temporary buffers expects them as a final
+ * tmp[] array of type uint8_t*, with a size which is documented for
+ * each function. However, most have some alignment requirements,
+ * because they will use the array to store 16-bit, 32-bit or 64-bit
+ * values (e.g. uint64_t or double). The caller must ensure proper
+ * alignment. What happens on unaligned access depends on the
+ * underlying architecture, ranging from a slight time penalty
+ * to immediate termination of the process.
+ *
+ * - Some functions rely on specific rounding rules and precision for
+ * floating-point numbers. On some systems (in particular 32-bit x86
+ * with the 387 FPU), this requires setting an hardware control
+ * word. The caller MUST use set_fpu_cw() to ensure proper precision:
+ *
+ * oldcw = set_fpu_cw(2);
+ * PQCLEAN_FALCON1024_CLEAN_sign_dyn(...);
+ * set_fpu_cw(oldcw);
+ *
+ * On systems where the native floating-point precision is already
+ * proper, or integer-based emulation is used, the set_fpu_cw()
+ * function does nothing, so it can be called systematically.
+ */
+#include "fips202.h"
+#include "fpr.h"
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+
+
+
+/*
+ * Some computations with floating-point elements, in particular
+ * rounding to the nearest integer, rely on operations using _exactly_
+ * the precision of IEEE-754 binary64 type (i.e. 52 bits). On 32-bit
+ * x86, the 387 FPU may be used (depending on the target OS) and, in
+ * that case, may use more precision bits (i.e. 64 bits, for an 80-bit
+ * total type length); to prevent miscomputations, we define an explicit
+ * function that modifies the precision in the FPU control word.
+ *
+ * set_fpu_cw() sets the precision to the provided value, and returns
+ * the previously set precision; callers are supposed to restore the
+ * previous precision on exit. The correct (52-bit) precision is
+ * configured with the value "2". On unsupported compilers, or on
+ * targets other than 32-bit x86, or when the native 'double' type is
+ * not used, the set_fpu_cw() function does nothing at all.
+ */
+#define set_fpu_cw PQCLEAN_FALCON1024_CLEAN_set_fpu_cw
+unsigned set_fpu_cw(unsigned x);
+
+
+/* ==================================================================== */
+/*
+ * SHAKE256 implementation (shake.c).
+ *
+ * API is defined to be easily replaced with the fips202.h API defined
+ * as part of PQClean.
+ */
+
+
+
+#define inner_shake256_context shake256incctx
+#define inner_shake256_init(sc) shake256_inc_init(sc)
+#define inner_shake256_inject(sc, in, len) shake256_inc_absorb(sc, in, len)
+#define inner_shake256_flip(sc) shake256_inc_finalize(sc)
+#define inner_shake256_extract(sc, out, len) shake256_inc_squeeze(out, len, sc)
+#define inner_shake256_ctx_release(sc) shake256_inc_ctx_release(sc)
+
+
+/* ==================================================================== */
+/*
+ * Encoding/decoding functions (codec.c).
+ *
+ * Encoding functions take as parameters an output buffer (out) with
+ * a given maximum length (max_out_len); returned value is the actual
+ * number of bytes which have been written. If the output buffer is
+ * not large enough, then 0 is returned (some bytes may have been
+ * written to the buffer). If 'out' is NULL, then 'max_out_len' is
+ * ignored; instead, the function computes and returns the actual
+ * required output length (in bytes).
+ *
+ * Decoding functions take as parameters an input buffer (in) with
+ * its maximum length (max_in_len); returned value is the actual number
+ * of bytes that have been read from the buffer. If the provided length
+ * is too short, then 0 is returned.
+ *
+ * Values to encode or decode are vectors of integers, with N = 2^logn
+ * elements.
+ *
+ * Three encoding formats are defined:
+ *
+ * - modq: sequence of values modulo 12289, each encoded over exactly
+ * 14 bits. The encoder and decoder verify that integers are within
+ * the valid range (0..12288). Values are arrays of uint16.
+ *
+ * - trim: sequence of signed integers, a specified number of bits
+ * each. The number of bits is provided as parameter and includes
+ * the sign bit. Each integer x must be such that |x| < 2^(bits-1)
+ * (which means that the -2^(bits-1) value is forbidden); encode and
+ * decode functions check that property. Values are arrays of
+ * int16_t or int8_t, corresponding to names 'trim_i16' and
+ * 'trim_i8', respectively.
+ *
+ * - comp: variable-length encoding for signed integers; each integer
+ * uses a minimum of 9 bits, possibly more. This is normally used
+ * only for signatures.
+ *
+ */
+
+size_t PQCLEAN_FALCON1024_CLEAN_modq_encode(void *out, size_t max_out_len,
+ const uint16_t *x, unsigned logn);
+size_t PQCLEAN_FALCON1024_CLEAN_trim_i16_encode(void *out, size_t max_out_len,
+ const int16_t *x, unsigned logn, unsigned bits);
+size_t PQCLEAN_FALCON1024_CLEAN_trim_i8_encode(void *out, size_t max_out_len,
+ const int8_t *x, unsigned logn, unsigned bits);
+size_t PQCLEAN_FALCON1024_CLEAN_comp_encode(void *out, size_t max_out_len,
+ const int16_t *x, unsigned logn);
+
+size_t PQCLEAN_FALCON1024_CLEAN_modq_decode(uint16_t *x, unsigned logn,
+ const void *in, size_t max_in_len);
+size_t PQCLEAN_FALCON1024_CLEAN_trim_i16_decode(int16_t *x, unsigned logn, unsigned bits,
+ const void *in, size_t max_in_len);
+size_t PQCLEAN_FALCON1024_CLEAN_trim_i8_decode(int8_t *x, unsigned logn, unsigned bits,
+ const void *in, size_t max_in_len);
+size_t PQCLEAN_FALCON1024_CLEAN_comp_decode(int16_t *x, unsigned logn,
+ const void *in, size_t max_in_len);
+
+/*
+ * Number of bits for key elements, indexed by logn (1 to 10). This
+ * is at most 8 bits for all degrees, but some degrees may have shorter
+ * elements.
+ */
+extern const uint8_t PQCLEAN_FALCON1024_CLEAN_max_fg_bits[];
+extern const uint8_t PQCLEAN_FALCON1024_CLEAN_max_FG_bits[];
+
+/*
+ * Maximum size, in bits, of elements in a signature, indexed by logn
+ * (1 to 10). The size includes the sign bit.
+ */
+extern const uint8_t PQCLEAN_FALCON1024_CLEAN_max_sig_bits[];
+
+/* ==================================================================== */
+/*
+ * Support functions used for both signature generation and signature
+ * verification (common.c).
+ */
+
+/*
+ * From a SHAKE256 context (must be already flipped), produce a new
+ * point. This is the non-constant-time version, which may leak enough
+ * information to serve as a stop condition on a brute force attack on
+ * the hashed message (provided that the nonce value is known).
+ */
+void PQCLEAN_FALCON1024_CLEAN_hash_to_point_vartime(inner_shake256_context *sc,
+ uint16_t *x, unsigned logn);
+
+/*
+ * From a SHAKE256 context (must be already flipped), produce a new
+ * point. The temporary buffer (tmp) must have room for 2*2^logn bytes.
+ * This function is constant-time but is typically more expensive than
+ * PQCLEAN_FALCON1024_CLEAN_hash_to_point_vartime().
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+void PQCLEAN_FALCON1024_CLEAN_hash_to_point_ct(inner_shake256_context *sc,
+ uint16_t *x, unsigned logn, uint8_t *tmp);
+
+/*
+ * Tell whether a given vector (2N coordinates, in two halves) is
+ * acceptable as a signature. This compares the appropriate norm of the
+ * vector with the acceptance bound. Returned value is 1 on success
+ * (vector is short enough to be acceptable), 0 otherwise.
+ */
+int PQCLEAN_FALCON1024_CLEAN_is_short(const int16_t *s1, const int16_t *s2, unsigned logn);
+
+/*
+ * Tell whether a given vector (2N coordinates, in two halves) is
+ * acceptable as a signature. Instead of the first half s1, this
+ * function receives the "saturated squared norm" of s1, i.e. the
+ * sum of the squares of the coordinates of s1 (saturated at 2^32-1
+ * if the sum exceeds 2^31-1).
+ *
+ * Returned value is 1 on success (vector is short enough to be
+ * acceptable), 0 otherwise.
+ */
+int PQCLEAN_FALCON1024_CLEAN_is_short_half(uint32_t sqn, const int16_t *s2, unsigned logn);
+
+/* ==================================================================== */
+/*
+ * Signature verification functions (vrfy.c).
+ */
+
+/*
+ * Convert a public key to NTT + Montgomery format. Conversion is done
+ * in place.
+ */
+void PQCLEAN_FALCON1024_CLEAN_to_ntt_monty(uint16_t *h, unsigned logn);
+
+/*
+ * Internal signature verification code:
+ * c0[] contains the hashed nonce+message
+ * s2[] is the decoded signature
+ * h[] contains the public key, in NTT + Montgomery format
+ * logn is the degree log
+ * tmp[] temporary, must have at least 2*2^logn bytes
+ * Returned value is 1 on success, 0 on error.
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON1024_CLEAN_verify_raw(const uint16_t *c0, const int16_t *s2,
+ const uint16_t *h, unsigned logn, uint8_t *tmp);
+
+/*
+ * Compute the public key h[], given the private key elements f[] and
+ * g[]. This computes h = g/f mod phi mod q, where phi is the polynomial
+ * modulus. This function returns 1 on success, 0 on error (an error is
+ * reported if f is not invertible mod phi mod q).
+ *
+ * The tmp[] array must have room for at least 2*2^logn elements.
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON1024_CLEAN_compute_public(uint16_t *h,
+ const int8_t *f, const int8_t *g, unsigned logn, uint8_t *tmp);
+
+/*
+ * Recompute the fourth private key element. Private key consists in
+ * four polynomials with small coefficients f, g, F and G, which are
+ * such that fG - gF = q mod phi; furthermore, f is invertible modulo
+ * phi and modulo q. This function recomputes G from f, g and F.
+ *
+ * The tmp[] array must have room for at least 4*2^logn bytes.
+ *
+ * Returned value is 1 in success, 0 on error (f not invertible).
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON1024_CLEAN_complete_private(int8_t *G,
+ const int8_t *f, const int8_t *g, const int8_t *F,
+ unsigned logn, uint8_t *tmp);
+
+/*
+ * Test whether a given polynomial is invertible modulo phi and q.
+ * Polynomial coefficients are small integers.
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON1024_CLEAN_is_invertible(
+ const int16_t *s2, unsigned logn, uint8_t *tmp);
+
+/*
+ * Count the number of elements of value zero in the NTT representation
+ * of the given polynomial: this is the number of primitive 2n-th roots
+ * of unity (modulo q = 12289) that are roots of the provided polynomial
+ * (taken modulo q).
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON1024_CLEAN_count_nttzero(const int16_t *sig, unsigned logn, uint8_t *tmp);
+
+/*
+ * Internal signature verification with public key recovery:
+ * h[] receives the public key (NOT in NTT/Montgomery format)
+ * c0[] contains the hashed nonce+message
+ * s1[] is the first signature half
+ * s2[] is the second signature half
+ * logn is the degree log
+ * tmp[] temporary, must have at least 2*2^logn bytes
+ * Returned value is 1 on success, 0 on error. Success is returned if
+ * the signature is a short enough vector; in that case, the public
+ * key has been written to h[]. However, the caller must still
+ * verify that h[] is the correct value (e.g. with regards to a known
+ * hash of the public key).
+ *
+ * h[] may not overlap with any of the other arrays.
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON1024_CLEAN_verify_recover(uint16_t *h,
+ const uint16_t *c0, const int16_t *s1, const int16_t *s2,
+ unsigned logn, uint8_t *tmp);
+
+/* ==================================================================== */
+/*
+ * Implementation of floating-point real numbers (fpr.h, fpr.c).
+ */
+
+/*
+ * Real numbers are implemented by an extra header file, included below.
+ * This is meant to support pluggable implementations. The default
+ * implementation relies on the C type 'double'.
+ *
+ * The included file must define the following types, functions and
+ * constants:
+ *
+ * fpr
+ * type for a real number
+ *
+ * fpr fpr_of(int64_t i)
+ * cast an integer into a real number; source must be in the
+ * -(2^63-1)..+(2^63-1) range
+ *
+ * fpr fpr_scaled(int64_t i, int sc)
+ * compute i*2^sc as a real number; source 'i' must be in the
+ * -(2^63-1)..+(2^63-1) range
+ *
+ * fpr fpr_ldexp(fpr x, int e)
+ * compute x*2^e
+ *
+ * int64_t fpr_rint(fpr x)
+ * round x to the nearest integer; x must be in the -(2^63-1)
+ * to +(2^63-1) range
+ *
+ * int64_t fpr_trunc(fpr x)
+ * round to an integer; this rounds towards zero; value must
+ * be in the -(2^63-1) to +(2^63-1) range
+ *
+ * fpr fpr_add(fpr x, fpr y)
+ * compute x + y
+ *
+ * fpr fpr_sub(fpr x, fpr y)
+ * compute x - y
+ *
+ * fpr fpr_neg(fpr x)
+ * compute -x
+ *
+ * fpr fpr_half(fpr x)
+ * compute x/2
+ *
+ * fpr fpr_double(fpr x)
+ * compute x*2
+ *
+ * fpr fpr_mul(fpr x, fpr y)
+ * compute x * y
+ *
+ * fpr fpr_sqr(fpr x)
+ * compute x * x
+ *
+ * fpr fpr_inv(fpr x)
+ * compute 1/x
+ *
+ * fpr fpr_div(fpr x, fpr y)
+ * compute x/y
+ *
+ * fpr fpr_sqrt(fpr x)
+ * compute the square root of x
+ *
+ * int fpr_lt(fpr x, fpr y)
+ * return 1 if x < y, 0 otherwise
+ *
+ * uint64_t fpr_expm_p63(fpr x)
+ * return exp(x), assuming that 0 <= x < log(2). Returned value
+ * is scaled to 63 bits (i.e. it really returns 2^63*exp(-x),
+ * rounded to the nearest integer). Computation should have a
+ * precision of at least 45 bits.
+ *
+ * const fpr fpr_gm_tab[]
+ * array of constants for FFT / iFFT
+ *
+ * const fpr fpr_p2_tab[]
+ * precomputed powers of 2 (by index, 0 to 10)
+ *
+ * Constants of type 'fpr':
+ *
+ * fpr fpr_q 12289
+ * fpr fpr_inverse_of_q 1/12289
+ * fpr fpr_inv_2sqrsigma0 1/(2*(1.8205^2))
+ * fpr fpr_inv_sigma 1/(1.55*sqrt(12289))
+ * fpr fpr_sigma_min_9 1.291500756233514568549480827642
+ * fpr fpr_sigma_min_10 1.311734375905083682667395805765
+ * fpr fpr_log2 log(2)
+ * fpr fpr_inv_log2 1/log(2)
+ * fpr fpr_bnorm_max 16822.4121
+ * fpr fpr_zero 0
+ * fpr fpr_one 1
+ * fpr fpr_two 2
+ * fpr fpr_onehalf 0.5
+ * fpr fpr_ptwo31 2^31
+ * fpr fpr_ptwo31m1 2^31-1
+ * fpr fpr_mtwo31m1 -(2^31-1)
+ * fpr fpr_ptwo63m1 2^63-1
+ * fpr fpr_mtwo63m1 -(2^63-1)
+ * fpr fpr_ptwo63 2^63
+ */
+
+/* ==================================================================== */
+/*
+ * RNG (rng.c).
+ *
+ * A PRNG based on ChaCha20 is implemented; it is seeded from a SHAKE256
+ * context (flipped) and is used for bulk pseudorandom generation.
+ * A system-dependent seed generator is also provided.
+ */
+
+/*
+ * Obtain a random seed from the system RNG.
+ *
+ * Returned value is 1 on success, 0 on error.
+ */
+int PQCLEAN_FALCON1024_CLEAN_get_seed(void *seed, size_t seed_len);
+
+/*
+ * Structure for a PRNG. This includes a large buffer so that values
+ * get generated in advance. The 'state' is used to keep the current
+ * PRNG algorithm state (contents depend on the selected algorithm).
+ *
+ * The unions with 'dummy_u64' are there to ensure proper alignment for
+ * 64-bit direct access.
+ */
+typedef struct {
+ union {
+ uint8_t d[512]; /* MUST be 512, exactly */
+ uint64_t dummy_u64;
+ } buf;
+ size_t ptr;
+ union {
+ uint8_t d[256];
+ uint64_t dummy_u64;
+ } state;
+ int type;
+} prng;
+
+/*
+ * Instantiate a PRNG. That PRNG will feed over the provided SHAKE256
+ * context (in "flipped" state) to obtain its initial state.
+ */
+void PQCLEAN_FALCON1024_CLEAN_prng_init(prng *p, inner_shake256_context *src);
+
+/*
+ * Refill the PRNG buffer. This is normally invoked automatically, and
+ * is declared here only so that prng_get_u64() may be inlined.
+ */
+void PQCLEAN_FALCON1024_CLEAN_prng_refill(prng *p);
+
+/*
+ * Get some bytes from a PRNG.
+ */
+void PQCLEAN_FALCON1024_CLEAN_prng_get_bytes(prng *p, void *dst, size_t len);
+
+/*
+ * Get a 64-bit random value from a PRNG.
+ */
+#define prng_get_u64 PQCLEAN_FALCON1024_CLEAN_prng_get_u64
+uint64_t prng_get_u64(prng *p);
+
+/*
+ * Get an 8-bit random value from a PRNG.
+ */
+#define prng_get_u8 PQCLEAN_FALCON1024_CLEAN_prng_get_u8
+unsigned prng_get_u8(prng *p);
+
+/* ==================================================================== */
+/*
+ * FFT (falcon-fft.c).
+ *
+ * A real polynomial is represented as an array of N 'fpr' elements.
+ * The FFT representation of a real polynomial contains N/2 complex
+ * elements; each is stored as two real numbers, for the real and
+ * imaginary parts, respectively. See falcon-fft.c for details on the
+ * internal representation.
+ */
+
+/*
+ * Compute FFT in-place: the source array should contain a real
+ * polynomial (N coefficients); its storage area is reused to store
+ * the FFT representation of that polynomial (N/2 complex numbers).
+ *
+ * 'logn' MUST lie between 1 and 10 (inclusive).
+ */
+void PQCLEAN_FALCON1024_CLEAN_FFT(fpr *f, unsigned logn);
+
+/*
+ * Compute the inverse FFT in-place: the source array should contain the
+ * FFT representation of a real polynomial (N/2 elements); the resulting
+ * real polynomial (N coefficients of type 'fpr') is written over the
+ * array.
+ *
+ * 'logn' MUST lie between 1 and 10 (inclusive).
+ */
+void PQCLEAN_FALCON1024_CLEAN_iFFT(fpr *f, unsigned logn);
+
+/*
+ * Add polynomial b to polynomial a. a and b MUST NOT overlap. This
+ * function works in both normal and FFT representations.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_add(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Subtract polynomial b from polynomial a. a and b MUST NOT overlap. This
+ * function works in both normal and FFT representations.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_sub(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Negate polynomial a. This function works in both normal and FFT
+ * representations.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_neg(fpr *a, unsigned logn);
+
+/*
+ * Compute adjoint of polynomial a. This function works only in FFT
+ * representation.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_adj_fft(fpr *a, unsigned logn);
+
+/*
+ * Multiply polynomial a with polynomial b. a and b MUST NOT overlap.
+ * This function works only in FFT representation.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Multiply polynomial a with the adjoint of polynomial b. a and b MUST NOT
+ * overlap. This function works only in FFT representation.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_muladj_fft(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Multiply polynomial with its own adjoint. This function works only in FFT
+ * representation.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(fpr *a, unsigned logn);
+
+/*
+ * Multiply polynomial with a real constant. This function works in both
+ * normal and FFT representations.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_mulconst(fpr *a, fpr x, unsigned logn);
+
+/*
+ * Divide polynomial a by polynomial b, modulo X^N+1 (FFT representation).
+ * a and b MUST NOT overlap.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_div_fft(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Given f and g (in FFT representation), compute 1/(f*adj(f)+g*adj(g))
+ * (also in FFT representation). Since the result is auto-adjoint, all its
+ * coordinates in FFT representation are real; as such, only the first N/2
+ * values of d[] are filled (the imaginary parts are skipped).
+ *
+ * Array d MUST NOT overlap with either a or b.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_invnorm2_fft(fpr *d,
+ const fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Given F, G, f and g (in FFT representation), compute F*adj(f)+G*adj(g)
+ * (also in FFT representation). Destination d MUST NOT overlap with
+ * any of the source arrays.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_add_muladj_fft(fpr *d,
+ const fpr *F, const fpr *G,
+ const fpr *f, const fpr *g, unsigned logn);
+
+/*
+ * Multiply polynomial a by polynomial b, where b is autoadjoint. Both
+ * a and b are in FFT representation. Since b is autoadjoint, all its
+ * FFT coefficients are real, and the array b contains only N/2 elements.
+ * a and b MUST NOT overlap.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_mul_autoadj_fft(fpr *a,
+ const fpr *b, unsigned logn);
+
+/*
+ * Divide polynomial a by polynomial b, where b is autoadjoint. Both
+ * a and b are in FFT representation. Since b is autoadjoint, all its
+ * FFT coefficients are real, and the array b contains only N/2 elements.
+ * a and b MUST NOT overlap.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_div_autoadj_fft(fpr *a,
+ const fpr *b, unsigned logn);
+
+/*
+ * Perform an LDL decomposition of an auto-adjoint matrix G, in FFT
+ * representation. On input, g00, g01 and g11 are provided (where the
+ * matrix G = [[g00, g01], [adj(g01), g11]]). On output, the d00, l10
+ * and d11 values are written in g00, g01 and g11, respectively
+ * (with D = [[d00, 0], [0, d11]] and L = [[1, 0], [l10, 1]]).
+ * (In fact, d00 = g00, so the g00 operand is left unmodified.)
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_LDL_fft(const fpr *g00,
+ fpr *g01, fpr *g11, unsigned logn);
+
+/*
+ * Perform an LDL decomposition of an auto-adjoint matrix G, in FFT
+ * representation. This is identical to poly_LDL_fft() except that
+ * g00, g01 and g11 are unmodified; the outputs d11 and l10 are written
+ * in two other separate buffers provided as extra parameters.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_LDLmv_fft(fpr *d11, fpr *l10,
+ const fpr *g00, const fpr *g01,
+ const fpr *g11, unsigned logn);
+
+/*
+ * Apply "split" operation on a polynomial in FFT representation:
+ * f = f0(x^2) + x*f1(x^2), for half-size polynomials f0 and f1
+ * (polynomials modulo X^(N/2)+1). f0, f1 and f MUST NOT overlap.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_split_fft(fpr *f0, fpr *f1,
+ const fpr *f, unsigned logn);
+
+/*
+ * Apply "merge" operation on two polynomials in FFT representation:
+ * given f0 and f1, polynomials moduo X^(N/2)+1, this function computes
+ * f = f0(x^2) + x*f1(x^2), in FFT representation modulo X^N+1.
+ * f MUST NOT overlap with either f0 or f1.
+ */
+void PQCLEAN_FALCON1024_CLEAN_poly_merge_fft(fpr *f,
+ const fpr *f0, const fpr *f1, unsigned logn);
+
+/* ==================================================================== */
+/*
+ * Key pair generation.
+ */
+
+/*
+ * Required sizes of the temporary buffer (in bytes).
+ *
+ * This size is 28*2^logn bytes, except for degrees 2 and 4 (logn = 1
+ * or 2) where it is slightly greater.
+ */
+#define FALCON_KEYGEN_TEMP_1 136
+#define FALCON_KEYGEN_TEMP_2 272
+#define FALCON_KEYGEN_TEMP_3 224
+#define FALCON_KEYGEN_TEMP_4 448
+#define FALCON_KEYGEN_TEMP_5 896
+#define FALCON_KEYGEN_TEMP_6 1792
+#define FALCON_KEYGEN_TEMP_7 3584
+#define FALCON_KEYGEN_TEMP_8 7168
+#define FALCON_KEYGEN_TEMP_9 14336
+#define FALCON_KEYGEN_TEMP_10 28672
+
+/*
+ * Generate a new key pair. Randomness is extracted from the provided
+ * SHAKE256 context, which must have already been seeded and flipped.
+ * The tmp[] array must have suitable size (see FALCON_KEYGEN_TEMP_*
+ * macros) and be aligned for the uint32_t, uint64_t and fpr types.
+ *
+ * The private key elements are written in f, g, F and G, and the
+ * public key is written in h. Either or both of G and h may be NULL,
+ * in which case the corresponding element is not returned (they can
+ * be recomputed from f, g and F).
+ *
+ * tmp[] must have 64-bit alignment.
+ * This function uses floating-point rounding (see set_fpu_cw()).
+ */
+void PQCLEAN_FALCON1024_CLEAN_keygen(inner_shake256_context *rng,
+ int8_t *f, int8_t *g, int8_t *F, int8_t *G, uint16_t *h,
+ unsigned logn, uint8_t *tmp);
+
+/* ==================================================================== */
+/*
+ * Signature generation.
+ */
+
+/*
+ * Expand a private key into the B0 matrix in FFT representation and
+ * the LDL tree. All the values are written in 'expanded_key', for
+ * a total of (8*logn+40)*2^logn bytes.
+ *
+ * The tmp[] array must have room for at least 48*2^logn bytes.
+ *
+ * tmp[] must have 64-bit alignment.
+ * This function uses floating-point rounding (see set_fpu_cw()).
+ */
+void PQCLEAN_FALCON1024_CLEAN_expand_privkey(fpr *expanded_key,
+ const int8_t *f, const int8_t *g, const int8_t *F, const int8_t *G,
+ unsigned logn, uint8_t *tmp);
+
+/*
+ * Compute a signature over the provided hashed message (hm); the
+ * signature value is one short vector. This function uses an
+ * expanded key (as generated by PQCLEAN_FALCON1024_CLEAN_expand_privkey()).
+ *
+ * The sig[] and hm[] buffers may overlap.
+ *
+ * On successful output, the start of the tmp[] buffer contains the s1
+ * vector (as int16_t elements).
+ *
+ * The minimal size (in bytes) of tmp[] is 48*2^logn bytes.
+ *
+ * tmp[] must have 64-bit alignment.
+ * This function uses floating-point rounding (see set_fpu_cw()).
+ */
+void PQCLEAN_FALCON1024_CLEAN_sign_tree(int16_t *sig, inner_shake256_context *rng,
+ const fpr *expanded_key,
+ const uint16_t *hm, unsigned logn, uint8_t *tmp);
+
+/*
+ * Compute a signature over the provided hashed message (hm); the
+ * signature value is one short vector. This function uses a raw
+ * key and dynamically recompute the B0 matrix and LDL tree; this
+ * saves RAM since there is no needed for an expanded key, but
+ * increases the signature cost.
+ *
+ * The sig[] and hm[] buffers may overlap.
+ *
+ * On successful output, the start of the tmp[] buffer contains the s1
+ * vector (as int16_t elements).
+ *
+ * The minimal size (in bytes) of tmp[] is 72*2^logn bytes.
+ *
+ * tmp[] must have 64-bit alignment.
+ * This function uses floating-point rounding (see set_fpu_cw()).
+ */
+void PQCLEAN_FALCON1024_CLEAN_sign_dyn(int16_t *sig, inner_shake256_context *rng,
+ const int8_t *f, const int8_t *g,
+ const int8_t *F, const int8_t *G,
+ const uint16_t *hm, unsigned logn, uint8_t *tmp);
+
+/*
+ * Internal sampler engine. Exported for tests.
+ *
+ * sampler_context wraps around a source of random numbers (PRNG) and
+ * the sigma_min value (nominally dependent on the degree).
+ *
+ * sampler() takes as parameters:
+ * ctx pointer to the sampler_context structure
+ * mu center for the distribution
+ * isigma inverse of the distribution standard deviation
+ * It returns an integer sampled along the Gaussian distribution centered
+ * on mu and of standard deviation sigma = 1/isigma.
+ *
+ * gaussian0_sampler() takes as parameter a pointer to a PRNG, and
+ * returns an integer sampled along a half-Gaussian with standard
+ * deviation sigma0 = 1.8205 (center is 0, returned value is
+ * nonnegative).
+ */
+
+typedef struct {
+ prng p;
+ fpr sigma_min;
+} sampler_context;
+
+int PQCLEAN_FALCON1024_CLEAN_sampler(void *ctx, fpr mu, fpr isigma);
+
+int PQCLEAN_FALCON1024_CLEAN_gaussian0_sampler(prng *p);
+
+/* ==================================================================== */
+
+#endif
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/keygen.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/keygen.c
new file mode 100644
index 000000000..2d47412d0
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/keygen.c
@@ -0,0 +1,4231 @@
+#include "inner.h"
+
+/*
+ * Falcon key pair generation.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+#define MKN(logn) ((size_t)1 << (logn))
+
+/* ==================================================================== */
+/*
+ * Modular arithmetics.
+ *
+ * We implement a few functions for computing modulo a small integer p.
+ *
+ * All functions require that 2^30 < p < 2^31. Moreover, operands must
+ * be in the 0..p-1 range.
+ *
+ * Modular addition and subtraction work for all such p.
+ *
+ * Montgomery multiplication requires that p is odd, and must be provided
+ * with an additional value p0i = -1/p mod 2^31. See below for some basics
+ * on Montgomery multiplication.
+ *
+ * Division computes an inverse modulo p by an exponentiation (with
+ * exponent p-2): this works only if p is prime. Multiplication
+ * requirements also apply, i.e. p must be odd and p0i must be provided.
+ *
+ * The NTT and inverse NTT need all of the above, and also that
+ * p = 1 mod 2048.
+ *
+ * -----------------------------------------------------------------------
+ *
+ * We use Montgomery representation with 31-bit values:
+ *
+ * Let R = 2^31 mod p. When 2^30 < p < 2^31, R = 2^31 - p.
+ * Montgomery representation of an integer x modulo p is x*R mod p.
+ *
+ * Montgomery multiplication computes (x*y)/R mod p for
+ * operands x and y. Therefore:
+ *
+ * - if operands are x*R and y*R (Montgomery representations of x and
+ * y), then Montgomery multiplication computes (x*R*y*R)/R = (x*y)*R
+ * mod p, which is the Montgomery representation of the product x*y;
+ *
+ * - if operands are x*R and y (or x and y*R), then Montgomery
+ * multiplication returns x*y mod p: mixed-representation
+ * multiplications yield results in normal representation.
+ *
+ * To convert to Montgomery representation, we multiply by R, which is done
+ * by Montgomery-multiplying by R^2. Stand-alone conversion back from
+ * Montgomery representation is Montgomery-multiplication by 1.
+ */
+
+/*
+ * Precomputed small primes. Each element contains the following:
+ *
+ * p The prime itself.
+ *
+ * g A primitive root of phi = X^N+1 (in field Z_p).
+ *
+ * s The inverse of the product of all previous primes in the array,
+ * computed modulo p and in Montgomery representation.
+ *
+ * All primes are such that p = 1 mod 2048, and are lower than 2^31. They
+ * are listed in decreasing order.
+ */
+
+typedef struct {
+ uint32_t p;
+ uint32_t g;
+ uint32_t s;
+} small_prime;
+
+static const small_prime PRIMES[] = {
+ { 2147473409, 383167813, 10239 },
+ { 2147389441, 211808905, 471403745 },
+ { 2147387393, 37672282, 1329335065 },
+ { 2147377153, 1977035326, 968223422 },
+ { 2147358721, 1067163706, 132460015 },
+ { 2147352577, 1606082042, 598693809 },
+ { 2147346433, 2033915641, 1056257184 },
+ { 2147338241, 1653770625, 421286710 },
+ { 2147309569, 631200819, 1111201074 },
+ { 2147297281, 2038364663, 1042003613 },
+ { 2147295233, 1962540515, 19440033 },
+ { 2147239937, 2100082663, 353296760 },
+ { 2147235841, 1991153006, 1703918027 },
+ { 2147217409, 516405114, 1258919613 },
+ { 2147205121, 409347988, 1089726929 },
+ { 2147196929, 927788991, 1946238668 },
+ { 2147178497, 1136922411, 1347028164 },
+ { 2147100673, 868626236, 701164723 },
+ { 2147082241, 1897279176, 617820870 },
+ { 2147074049, 1888819123, 158382189 },
+ { 2147051521, 25006327, 522758543 },
+ { 2147043329, 327546255, 37227845 },
+ { 2147039233, 766324424, 1133356428 },
+ { 2146988033, 1862817362, 73861329 },
+ { 2146963457, 404622040, 653019435 },
+ { 2146959361, 1936581214, 995143093 },
+ { 2146938881, 1559770096, 634921513 },
+ { 2146908161, 422623708, 1985060172 },
+ { 2146885633, 1751189170, 298238186 },
+ { 2146871297, 578919515, 291810829 },
+ { 2146846721, 1114060353, 915902322 },
+ { 2146834433, 2069565474, 47859524 },
+ { 2146818049, 1552824584, 646281055 },
+ { 2146775041, 1906267847, 1597832891 },
+ { 2146756609, 1847414714, 1228090888 },
+ { 2146744321, 1818792070, 1176377637 },
+ { 2146738177, 1118066398, 1054971214 },
+ { 2146736129, 52057278, 933422153 },
+ { 2146713601, 592259376, 1406621510 },
+ { 2146695169, 263161877, 1514178701 },
+ { 2146656257, 685363115, 384505091 },
+ { 2146650113, 927727032, 537575289 },
+ { 2146646017, 52575506, 1799464037 },
+ { 2146643969, 1276803876, 1348954416 },
+ { 2146603009, 814028633, 1521547704 },
+ { 2146572289, 1846678872, 1310832121 },
+ { 2146547713, 919368090, 1019041349 },
+ { 2146508801, 671847612, 38582496 },
+ { 2146492417, 283911680, 532424562 },
+ { 2146490369, 1780044827, 896447978 },
+ { 2146459649, 327980850, 1327906900 },
+ { 2146447361, 1310561493, 958645253 },
+ { 2146441217, 412148926, 287271128 },
+ { 2146437121, 293186449, 2009822534 },
+ { 2146430977, 179034356, 1359155584 },
+ { 2146418689, 1517345488, 1790248672 },
+ { 2146406401, 1615820390, 1584833571 },
+ { 2146404353, 826651445, 607120498 },
+ { 2146379777, 3816988, 1897049071 },
+ { 2146363393, 1221409784, 1986921567 },
+ { 2146355201, 1388081168, 849968120 },
+ { 2146336769, 1803473237, 1655544036 },
+ { 2146312193, 1023484977, 273671831 },
+ { 2146293761, 1074591448, 467406983 },
+ { 2146283521, 831604668, 1523950494 },
+ { 2146203649, 712865423, 1170834574 },
+ { 2146154497, 1764991362, 1064856763 },
+ { 2146142209, 627386213, 1406840151 },
+ { 2146127873, 1638674429, 2088393537 },
+ { 2146099201, 1516001018, 690673370 },
+ { 2146093057, 1294931393, 315136610 },
+ { 2146091009, 1942399533, 973539425 },
+ { 2146078721, 1843461814, 2132275436 },
+ { 2146060289, 1098740778, 360423481 },
+ { 2146048001, 1617213232, 1951981294 },
+ { 2146041857, 1805783169, 2075683489 },
+ { 2146019329, 272027909, 1753219918 },
+ { 2145986561, 1206530344, 2034028118 },
+ { 2145976321, 1243769360, 1173377644 },
+ { 2145964033, 887200839, 1281344586 },
+ { 2145906689, 1651026455, 906178216 },
+ { 2145875969, 1673238256, 1043521212 },
+ { 2145871873, 1226591210, 1399796492 },
+ { 2145841153, 1465353397, 1324527802 },
+ { 2145832961, 1150638905, 554084759 },
+ { 2145816577, 221601706, 427340863 },
+ { 2145785857, 608896761, 316590738 },
+ { 2145755137, 1712054942, 1684294304 },
+ { 2145742849, 1302302867, 724873116 },
+ { 2145728513, 516717693, 431671476 },
+ { 2145699841, 524575579, 1619722537 },
+ { 2145691649, 1925625239, 982974435 },
+ { 2145687553, 463795662, 1293154300 },
+ { 2145673217, 771716636, 881778029 },
+ { 2145630209, 1509556977, 837364988 },
+ { 2145595393, 229091856, 851648427 },
+ { 2145587201, 1796903241, 635342424 },
+ { 2145525761, 715310882, 1677228081 },
+ { 2145495041, 1040930522, 200685896 },
+ { 2145466369, 949804237, 1809146322 },
+ { 2145445889, 1673903706, 95316881 },
+ { 2145390593, 806941852, 1428671135 },
+ { 2145372161, 1402525292, 159350694 },
+ { 2145361921, 2124760298, 1589134749 },
+ { 2145359873, 1217503067, 1561543010 },
+ { 2145355777, 338341402, 83865711 },
+ { 2145343489, 1381532164, 641430002 },
+ { 2145325057, 1883895478, 1528469895 },
+ { 2145318913, 1335370424, 65809740 },
+ { 2145312769, 2000008042, 1919775760 },
+ { 2145300481, 961450962, 1229540578 },
+ { 2145282049, 910466767, 1964062701 },
+ { 2145232897, 816527501, 450152063 },
+ { 2145218561, 1435128058, 1794509700 },
+ { 2145187841, 33505311, 1272467582 },
+ { 2145181697, 269767433, 1380363849 },
+ { 2145175553, 56386299, 1316870546 },
+ { 2145079297, 2106880293, 1391797340 },
+ { 2145021953, 1347906152, 720510798 },
+ { 2145015809, 206769262, 1651459955 },
+ { 2145003521, 1885513236, 1393381284 },
+ { 2144960513, 1810381315, 31937275 },
+ { 2144944129, 1306487838, 2019419520 },
+ { 2144935937, 37304730, 1841489054 },
+ { 2144894977, 1601434616, 157985831 },
+ { 2144888833, 98749330, 2128592228 },
+ { 2144880641, 1772327002, 2076128344 },
+ { 2144864257, 1404514762, 2029969964 },
+ { 2144827393, 801236594, 406627220 },
+ { 2144806913, 349217443, 1501080290 },
+ { 2144796673, 1542656776, 2084736519 },
+ { 2144778241, 1210734884, 1746416203 },
+ { 2144759809, 1146598851, 716464489 },
+ { 2144757761, 286328400, 1823728177 },
+ { 2144729089, 1347555695, 1836644881 },
+ { 2144727041, 1795703790, 520296412 },
+ { 2144696321, 1302475157, 852964281 },
+ { 2144667649, 1075877614, 504992927 },
+ { 2144573441, 198765808, 1617144982 },
+ { 2144555009, 321528767, 155821259 },
+ { 2144550913, 814139516, 1819937644 },
+ { 2144536577, 571143206, 962942255 },
+ { 2144524289, 1746733766, 2471321 },
+ { 2144512001, 1821415077, 124190939 },
+ { 2144468993, 917871546, 1260072806 },
+ { 2144458753, 378417981, 1569240563 },
+ { 2144421889, 175229668, 1825620763 },
+ { 2144409601, 1699216963, 351648117 },
+ { 2144370689, 1071885991, 958186029 },
+ { 2144348161, 1763151227, 540353574 },
+ { 2144335873, 1060214804, 919598847 },
+ { 2144329729, 663515846, 1448552668 },
+ { 2144327681, 1057776305, 590222840 },
+ { 2144309249, 1705149168, 1459294624 },
+ { 2144296961, 325823721, 1649016934 },
+ { 2144290817, 738775789, 447427206 },
+ { 2144243713, 962347618, 893050215 },
+ { 2144237569, 1655257077, 900860862 },
+ { 2144161793, 242206694, 1567868672 },
+ { 2144155649, 769415308, 1247993134 },
+ { 2144137217, 320492023, 515841070 },
+ { 2144120833, 1639388522, 770877302 },
+ { 2144071681, 1761785233, 964296120 },
+ { 2144065537, 419817825, 204564472 },
+ { 2144028673, 666050597, 2091019760 },
+ { 2144010241, 1413657615, 1518702610 },
+ { 2143952897, 1238327946, 475672271 },
+ { 2143940609, 307063413, 1176750846 },
+ { 2143918081, 2062905559, 786785803 },
+ { 2143899649, 1338112849, 1562292083 },
+ { 2143891457, 68149545, 87166451 },
+ { 2143885313, 921750778, 394460854 },
+ { 2143854593, 719766593, 133877196 },
+ { 2143836161, 1149399850, 1861591875 },
+ { 2143762433, 1848739366, 1335934145 },
+ { 2143756289, 1326674710, 102999236 },
+ { 2143713281, 808061791, 1156900308 },
+ { 2143690753, 388399459, 1926468019 },
+ { 2143670273, 1427891374, 1756689401 },
+ { 2143666177, 1912173949, 986629565 },
+ { 2143645697, 2041160111, 371842865 },
+ { 2143641601, 1279906897, 2023974350 },
+ { 2143635457, 720473174, 1389027526 },
+ { 2143621121, 1298309455, 1732632006 },
+ { 2143598593, 1548762216, 1825417506 },
+ { 2143567873, 620475784, 1073787233 },
+ { 2143561729, 1932954575, 949167309 },
+ { 2143553537, 354315656, 1652037534 },
+ { 2143541249, 577424288, 1097027618 },
+ { 2143531009, 357862822, 478640055 },
+ { 2143522817, 2017706025, 1550531668 },
+ { 2143506433, 2078127419, 1824320165 },
+ { 2143488001, 613475285, 1604011510 },
+ { 2143469569, 1466594987, 502095196 },
+ { 2143426561, 1115430331, 1044637111 },
+ { 2143383553, 9778045, 1902463734 },
+ { 2143377409, 1557401276, 2056861771 },
+ { 2143363073, 652036455, 1965915971 },
+ { 2143260673, 1464581171, 1523257541 },
+ { 2143246337, 1876119649, 764541916 },
+ { 2143209473, 1614992673, 1920672844 },
+ { 2143203329, 981052047, 2049774209 },
+ { 2143160321, 1847355533, 728535665 },
+ { 2143129601, 965558457, 603052992 },
+ { 2143123457, 2140817191, 8348679 },
+ { 2143100929, 1547263683, 694209023 },
+ { 2143092737, 643459066, 1979934533 },
+ { 2143082497, 188603778, 2026175670 },
+ { 2143062017, 1657329695, 377451099 },
+ { 2143051777, 114967950, 979255473 },
+ { 2143025153, 1698431342, 1449196896 },
+ { 2143006721, 1862741675, 1739650365 },
+ { 2142996481, 756660457, 996160050 },
+ { 2142976001, 927864010, 1166847574 },
+ { 2142965761, 905070557, 661974566 },
+ { 2142916609, 40932754, 1787161127 },
+ { 2142892033, 1987985648, 675335382 },
+ { 2142885889, 797497211, 1323096997 },
+ { 2142871553, 2068025830, 1411877159 },
+ { 2142861313, 1217177090, 1438410687 },
+ { 2142830593, 409906375, 1767860634 },
+ { 2142803969, 1197788993, 359782919 },
+ { 2142785537, 643817365, 513932862 },
+ { 2142779393, 1717046338, 218943121 },
+ { 2142724097, 89336830, 416687049 },
+ { 2142707713, 5944581, 1356813523 },
+ { 2142658561, 887942135, 2074011722 },
+ { 2142638081, 151851972, 1647339939 },
+ { 2142564353, 1691505537, 1483107336 },
+ { 2142533633, 1989920200, 1135938817 },
+ { 2142529537, 959263126, 1531961857 },
+ { 2142527489, 453251129, 1725566162 },
+ { 2142502913, 1536028102, 182053257 },
+ { 2142498817, 570138730, 701443447 },
+ { 2142416897, 326965800, 411931819 },
+ { 2142363649, 1675665410, 1517191733 },
+ { 2142351361, 968529566, 1575712703 },
+ { 2142330881, 1384953238, 1769087884 },
+ { 2142314497, 1977173242, 1833745524 },
+ { 2142289921, 95082313, 1714775493 },
+ { 2142283777, 109377615, 1070584533 },
+ { 2142277633, 16960510, 702157145 },
+ { 2142263297, 553850819, 431364395 },
+ { 2142208001, 241466367, 2053967982 },
+ { 2142164993, 1795661326, 1031836848 },
+ { 2142097409, 1212530046, 712772031 },
+ { 2142087169, 1763869720, 822276067 },
+ { 2142078977, 644065713, 1765268066 },
+ { 2142074881, 112671944, 643204925 },
+ { 2142044161, 1387785471, 1297890174 },
+ { 2142025729, 783885537, 1000425730 },
+ { 2142011393, 905662232, 1679401033 },
+ { 2141974529, 799788433, 468119557 },
+ { 2141943809, 1932544124, 449305555 },
+ { 2141933569, 1527403256, 841867925 },
+ { 2141931521, 1247076451, 743823916 },
+ { 2141902849, 1199660531, 401687910 },
+ { 2141890561, 150132350, 1720336972 },
+ { 2141857793, 1287438162, 663880489 },
+ { 2141833217, 618017731, 1819208266 },
+ { 2141820929, 999578638, 1403090096 },
+ { 2141786113, 81834325, 1523542501 },
+ { 2141771777, 120001928, 463556492 },
+ { 2141759489, 122455485, 2124928282 },
+ { 2141749249, 141986041, 940339153 },
+ { 2141685761, 889088734, 477141499 },
+ { 2141673473, 324212681, 1122558298 },
+ { 2141669377, 1175806187, 1373818177 },
+ { 2141655041, 1113654822, 296887082 },
+ { 2141587457, 991103258, 1585913875 },
+ { 2141583361, 1401451409, 1802457360 },
+ { 2141575169, 1571977166, 712760980 },
+ { 2141546497, 1107849376, 1250270109 },
+ { 2141515777, 196544219, 356001130 },
+ { 2141495297, 1733571506, 1060744866 },
+ { 2141483009, 321552363, 1168297026 },
+ { 2141458433, 505818251, 733225819 },
+ { 2141360129, 1026840098, 948342276 },
+ { 2141325313, 945133744, 2129965998 },
+ { 2141317121, 1871100260, 1843844634 },
+ { 2141286401, 1790639498, 1750465696 },
+ { 2141267969, 1376858592, 186160720 },
+ { 2141255681, 2129698296, 1876677959 },
+ { 2141243393, 2138900688, 1340009628 },
+ { 2141214721, 1933049835, 1087819477 },
+ { 2141212673, 1898664939, 1786328049 },
+ { 2141202433, 990234828, 940682169 },
+ { 2141175809, 1406392421, 993089586 },
+ { 2141165569, 1263518371, 289019479 },
+ { 2141073409, 1485624211, 507864514 },
+ { 2141052929, 1885134788, 311252465 },
+ { 2141040641, 1285021247, 280941862 },
+ { 2141028353, 1527610374, 375035110 },
+ { 2141011969, 1400626168, 164696620 },
+ { 2140999681, 632959608, 966175067 },
+ { 2140997633, 2045628978, 1290889438 },
+ { 2140993537, 1412755491, 375366253 },
+ { 2140942337, 719477232, 785367828 },
+ { 2140925953, 45224252, 836552317 },
+ { 2140917761, 1157376588, 1001839569 },
+ { 2140887041, 278480752, 2098732796 },
+ { 2140837889, 1663139953, 924094810 },
+ { 2140788737, 802501511, 2045368990 },
+ { 2140766209, 1820083885, 1800295504 },
+ { 2140764161, 1169561905, 2106792035 },
+ { 2140696577, 127781498, 1885987531 },
+ { 2140684289, 16014477, 1098116827 },
+ { 2140653569, 665960598, 1796728247 },
+ { 2140594177, 1043085491, 377310938 },
+ { 2140579841, 1732838211, 1504505945 },
+ { 2140569601, 302071939, 358291016 },
+ { 2140567553, 192393733, 1909137143 },
+ { 2140557313, 406595731, 1175330270 },
+ { 2140549121, 1748850918, 525007007 },
+ { 2140477441, 499436566, 1031159814 },
+ { 2140469249, 1886004401, 1029951320 },
+ { 2140426241, 1483168100, 1676273461 },
+ { 2140420097, 1779917297, 846024476 },
+ { 2140413953, 522948893, 1816354149 },
+ { 2140383233, 1931364473, 1296921241 },
+ { 2140366849, 1917356555, 147196204 },
+ { 2140354561, 16466177, 1349052107 },
+ { 2140348417, 1875366972, 1860485634 },
+ { 2140323841, 456498717, 1790256483 },
+ { 2140321793, 1629493973, 150031888 },
+ { 2140315649, 1904063898, 395510935 },
+ { 2140280833, 1784104328, 831417909 },
+ { 2140250113, 256087139, 697349101 },
+ { 2140229633, 388553070, 243875754 },
+ { 2140223489, 747459608, 1396270850 },
+ { 2140200961, 507423743, 1895572209 },
+ { 2140162049, 580106016, 2045297469 },
+ { 2140149761, 712426444, 785217995 },
+ { 2140137473, 1441607584, 536866543 },
+ { 2140119041, 346538902, 1740434653 },
+ { 2140090369, 282642885, 21051094 },
+ { 2140076033, 1407456228, 319910029 },
+ { 2140047361, 1619330500, 1488632070 },
+ { 2140041217, 2089408064, 2012026134 },
+ { 2140008449, 1705524800, 1613440760 },
+ { 2139924481, 1846208233, 1280649481 },
+ { 2139906049, 989438755, 1185646076 },
+ { 2139867137, 1522314850, 372783595 },
+ { 2139842561, 1681587377, 216848235 },
+ { 2139826177, 2066284988, 1784999464 },
+ { 2139824129, 480888214, 1513323027 },
+ { 2139789313, 847937200, 858192859 },
+ { 2139783169, 1642000434, 1583261448 },
+ { 2139770881, 940699589, 179702100 },
+ { 2139768833, 315623242, 964612676 },
+ { 2139666433, 331649203, 764666914 },
+ { 2139641857, 2118730799, 1313764644 },
+ { 2139635713, 519149027, 519212449 },
+ { 2139598849, 1526413634, 1769667104 },
+ { 2139574273, 551148610, 820739925 },
+ { 2139568129, 1386800242, 472447405 },
+ { 2139549697, 813760130, 1412328531 },
+ { 2139537409, 1615286260, 1609362979 },
+ { 2139475969, 1352559299, 1696720421 },
+ { 2139455489, 1048691649, 1584935400 },
+ { 2139432961, 836025845, 950121150 },
+ { 2139424769, 1558281165, 1635486858 },
+ { 2139406337, 1728402143, 1674423301 },
+ { 2139396097, 1727715782, 1483470544 },
+ { 2139383809, 1092853491, 1741699084 },
+ { 2139369473, 690776899, 1242798709 },
+ { 2139351041, 1768782380, 2120712049 },
+ { 2139334657, 1739968247, 1427249225 },
+ { 2139332609, 1547189119, 623011170 },
+ { 2139310081, 1346827917, 1605466350 },
+ { 2139303937, 369317948, 828392831 },
+ { 2139301889, 1560417239, 1788073219 },
+ { 2139283457, 1303121623, 595079358 },
+ { 2139248641, 1354555286, 573424177 },
+ { 2139240449, 60974056, 885781403 },
+ { 2139222017, 355573421, 1221054839 },
+ { 2139215873, 566477826, 1724006500 },
+ { 2139150337, 871437673, 1609133294 },
+ { 2139144193, 1478130914, 1137491905 },
+ { 2139117569, 1854880922, 964728507 },
+ { 2139076609, 202405335, 756508944 },
+ { 2139062273, 1399715741, 884826059 },
+ { 2139045889, 1051045798, 1202295476 },
+ { 2139033601, 1707715206, 632234634 },
+ { 2139006977, 2035853139, 231626690 },
+ { 2138951681, 183867876, 838350879 },
+ { 2138945537, 1403254661, 404460202 },
+ { 2138920961, 310865011, 1282911681 },
+ { 2138910721, 1328496553, 103472415 },
+ { 2138904577, 78831681, 993513549 },
+ { 2138902529, 1319697451, 1055904361 },
+ { 2138816513, 384338872, 1706202469 },
+ { 2138810369, 1084868275, 405677177 },
+ { 2138787841, 401181788, 1964773901 },
+ { 2138775553, 1850532988, 1247087473 },
+ { 2138767361, 874261901, 1576073565 },
+ { 2138757121, 1187474742, 993541415 },
+ { 2138748929, 1782458888, 1043206483 },
+ { 2138744833, 1221500487, 800141243 },
+ { 2138738689, 413465368, 1450660558 },
+ { 2138695681, 739045140, 342611472 },
+ { 2138658817, 1355845756, 672674190 },
+ { 2138644481, 608379162, 1538874380 },
+ { 2138632193, 1444914034, 686911254 },
+ { 2138607617, 484707818, 1435142134 },
+ { 2138591233, 539460669, 1290458549 },
+ { 2138572801, 2093538990, 2011138646 },
+ { 2138552321, 1149786988, 1076414907 },
+ { 2138546177, 840688206, 2108985273 },
+ { 2138533889, 209669619, 198172413 },
+ { 2138523649, 1975879426, 1277003968 },
+ { 2138490881, 1351891144, 1976858109 },
+ { 2138460161, 1817321013, 1979278293 },
+ { 2138429441, 1950077177, 203441928 },
+ { 2138400769, 908970113, 628395069 },
+ { 2138398721, 219890864, 758486760 },
+ { 2138376193, 1306654379, 977554090 },
+ { 2138351617, 298822498, 2004708503 },
+ { 2138337281, 441457816, 1049002108 },
+ { 2138320897, 1517731724, 1442269609 },
+ { 2138290177, 1355911197, 1647139103 },
+ { 2138234881, 531313247, 1746591962 },
+ { 2138214401, 1899410930, 781416444 },
+ { 2138202113, 1813477173, 1622508515 },
+ { 2138191873, 1086458299, 1025408615 },
+ { 2138183681, 1998800427, 827063290 },
+ { 2138173441, 1921308898, 749670117 },
+ { 2138103809, 1620902804, 2126787647 },
+ { 2138099713, 828647069, 1892961817 },
+ { 2138085377, 179405355, 1525506535 },
+ { 2138060801, 615683235, 1259580138 },
+ { 2138044417, 2030277840, 1731266562 },
+ { 2138042369, 2087222316, 1627902259 },
+ { 2138032129, 126388712, 1108640984 },
+ { 2138011649, 715026550, 1017980050 },
+ { 2137993217, 1693714349, 1351778704 },
+ { 2137888769, 1289762259, 1053090405 },
+ { 2137853953, 199991890, 1254192789 },
+ { 2137833473, 941421685, 896995556 },
+ { 2137817089, 750416446, 1251031181 },
+ { 2137792513, 798075119, 368077456 },
+ { 2137786369, 878543495, 1035375025 },
+ { 2137767937, 9351178, 1156563902 },
+ { 2137755649, 1382297614, 1686559583 },
+ { 2137724929, 1345472850, 1681096331 },
+ { 2137704449, 834666929, 630551727 },
+ { 2137673729, 1646165729, 1892091571 },
+ { 2137620481, 778943821, 48456461 },
+ { 2137618433, 1730837875, 1713336725 },
+ { 2137581569, 805610339, 1378891359 },
+ { 2137538561, 204342388, 1950165220 },
+ { 2137526273, 1947629754, 1500789441 },
+ { 2137516033, 719902645, 1499525372 },
+ { 2137491457, 230451261, 556382829 },
+ { 2137440257, 979573541, 412760291 },
+ { 2137374721, 927841248, 1954137185 },
+ { 2137362433, 1243778559, 861024672 },
+ { 2137313281, 1341338501, 980638386 },
+ { 2137311233, 937415182, 1793212117 },
+ { 2137255937, 795331324, 1410253405 },
+ { 2137243649, 150756339, 1966999887 },
+ { 2137182209, 163346914, 1939301431 },
+ { 2137171969, 1952552395, 758913141 },
+ { 2137159681, 570788721, 218668666 },
+ { 2137147393, 1896656810, 2045670345 },
+ { 2137141249, 358493842, 518199643 },
+ { 2137139201, 1505023029, 674695848 },
+ { 2137133057, 27911103, 830956306 },
+ { 2137122817, 439771337, 1555268614 },
+ { 2137116673, 790988579, 1871449599 },
+ { 2137110529, 432109234, 811805080 },
+ { 2137102337, 1357900653, 1184997641 },
+ { 2137098241, 515119035, 1715693095 },
+ { 2137090049, 408575203, 2085660657 },
+ { 2137085953, 2097793407, 1349626963 },
+ { 2137055233, 1556739954, 1449960883 },
+ { 2137030657, 1545758650, 1369303716 },
+ { 2136987649, 332602570, 103875114 },
+ { 2136969217, 1499989506, 1662964115 },
+ { 2136924161, 857040753, 4738842 },
+ { 2136895489, 1948872712, 570436091 },
+ { 2136893441, 58969960, 1568349634 },
+ { 2136887297, 2127193379, 273612548 },
+ { 2136850433, 111208983, 1181257116 },
+ { 2136809473, 1627275942, 1680317971 },
+ { 2136764417, 1574888217, 14011331 },
+ { 2136741889, 14011055, 1129154251 },
+ { 2136727553, 35862563, 1838555253 },
+ { 2136721409, 310235666, 1363928244 },
+ { 2136698881, 1612429202, 1560383828 },
+ { 2136649729, 1138540131, 800014364 },
+ { 2136606721, 602323503, 1433096652 },
+ { 2136563713, 182209265, 1919611038 },
+ { 2136555521, 324156477, 165591039 },
+ { 2136549377, 195513113, 217165345 },
+ { 2136526849, 1050768046, 939647887 },
+ { 2136508417, 1886286237, 1619926572 },
+ { 2136477697, 609647664, 35065157 },
+ { 2136471553, 679352216, 1452259468 },
+ { 2136457217, 128630031, 824816521 },
+ { 2136422401, 19787464, 1526049830 },
+ { 2136420353, 698316836, 1530623527 },
+ { 2136371201, 1651862373, 1804812805 },
+ { 2136334337, 326596005, 336977082 },
+ { 2136322049, 63253370, 1904972151 },
+ { 2136297473, 312176076, 172182411 },
+ { 2136248321, 381261841, 369032670 },
+ { 2136242177, 358688773, 1640007994 },
+ { 2136229889, 512677188, 75585225 },
+ { 2136219649, 2095003250, 1970086149 },
+ { 2136207361, 1909650722, 537760675 },
+ { 2136176641, 1334616195, 1533487619 },
+ { 2136158209, 2096285632, 1793285210 },
+ { 2136143873, 1897347517, 293843959 },
+ { 2136133633, 923586222, 1022655978 },
+ { 2136096769, 1464868191, 1515074410 },
+ { 2136094721, 2020679520, 2061636104 },
+ { 2136076289, 290798503, 1814726809 },
+ { 2136041473, 156415894, 1250757633 },
+ { 2135996417, 297459940, 1132158924 },
+ { 2135955457, 538755304, 1688831340 },
+ { 0, 0, 0 }
+};
+
+/*
+ * Reduce a small signed integer modulo a small prime. The source
+ * value x MUST be such that -p < x < p.
+ */
+static inline uint32_t
+modp_set(int32_t x, uint32_t p) {
+ uint32_t w;
+
+ w = (uint32_t)x;
+ w += p & -(w >> 31);
+ return w;
+}
+
+/*
+ * Normalize a modular integer around 0.
+ */
+static inline int32_t
+modp_norm(uint32_t x, uint32_t p) {
+ return (int32_t)(x - (p & (((x - ((p + 1) >> 1)) >> 31) - 1)));
+}
+
+/*
+ * Compute -1/p mod 2^31. This works for all odd integers p that fit
+ * on 31 bits.
+ */
+static uint32_t
+modp_ninv31(uint32_t p) {
+ uint32_t y;
+
+ y = 2 - p;
+ y *= 2 - p * y;
+ y *= 2 - p * y;
+ y *= 2 - p * y;
+ y *= 2 - p * y;
+ return (uint32_t)0x7FFFFFFF & -y;
+}
+
+/*
+ * Compute R = 2^31 mod p.
+ */
+static inline uint32_t
+modp_R(uint32_t p) {
+ /*
+ * Since 2^30 < p < 2^31, we know that 2^31 mod p is simply
+ * 2^31 - p.
+ */
+ return ((uint32_t)1 << 31) - p;
+}
+
+/*
+ * Addition modulo p.
+ */
+static inline uint32_t
+modp_add(uint32_t a, uint32_t b, uint32_t p) {
+ uint32_t d;
+
+ d = a + b - p;
+ d += p & -(d >> 31);
+ return d;
+}
+
+/*
+ * Subtraction modulo p.
+ */
+static inline uint32_t
+modp_sub(uint32_t a, uint32_t b, uint32_t p) {
+ uint32_t d;
+
+ d = a - b;
+ d += p & -(d >> 31);
+ return d;
+}
+
+/*
+ * Halving modulo p.
+ */
+/* unused
+static inline uint32_t
+modp_half(uint32_t a, uint32_t p)
+{
+ a += p & -(a & 1);
+ return a >> 1;
+}
+*/
+
+/*
+ * Montgomery multiplication modulo p. The 'p0i' value is -1/p mod 2^31.
+ * It is required that p is an odd integer.
+ */
+static inline uint32_t
+modp_montymul(uint32_t a, uint32_t b, uint32_t p, uint32_t p0i) {
+ uint64_t z, w;
+ uint32_t d;
+
+ z = (uint64_t)a * (uint64_t)b;
+ w = ((z * p0i) & (uint64_t)0x7FFFFFFF) * p;
+ d = (uint32_t)((z + w) >> 31) - p;
+ d += p & -(d >> 31);
+ return d;
+}
+
+/*
+ * Compute R2 = 2^62 mod p.
+ */
+static uint32_t
+modp_R2(uint32_t p, uint32_t p0i) {
+ uint32_t z;
+
+ /*
+ * Compute z = 2^31 mod p (this is the value 1 in Montgomery
+ * representation), then double it with an addition.
+ */
+ z = modp_R(p);
+ z = modp_add(z, z, p);
+
+ /*
+ * Square it five times to obtain 2^32 in Montgomery representation
+ * (i.e. 2^63 mod p).
+ */
+ z = modp_montymul(z, z, p, p0i);
+ z = modp_montymul(z, z, p, p0i);
+ z = modp_montymul(z, z, p, p0i);
+ z = modp_montymul(z, z, p, p0i);
+ z = modp_montymul(z, z, p, p0i);
+
+ /*
+ * Halve the value mod p to get 2^62.
+ */
+ z = (z + (p & -(z & 1))) >> 1;
+ return z;
+}
+
+/*
+ * Compute 2^(31*x) modulo p. This works for integers x up to 2^11.
+ * p must be prime such that 2^30 < p < 2^31; p0i must be equal to
+ * -1/p mod 2^31; R2 must be equal to 2^62 mod p.
+ */
+static inline uint32_t
+modp_Rx(unsigned x, uint32_t p, uint32_t p0i, uint32_t R2) {
+ int i;
+ uint32_t r, z;
+
+ /*
+ * 2^(31*x) = (2^31)*(2^(31*(x-1))); i.e. we want the Montgomery
+ * representation of (2^31)^e mod p, where e = x-1.
+ * R2 is 2^31 in Montgomery representation.
+ */
+ x --;
+ r = R2;
+ z = modp_R(p);
+ for (i = 0; (1U << i) <= x; i ++) {
+ if ((x & (1U << i)) != 0) {
+ z = modp_montymul(z, r, p, p0i);
+ }
+ r = modp_montymul(r, r, p, p0i);
+ }
+ return z;
+}
+
+/*
+ * Division modulo p. If the divisor (b) is 0, then 0 is returned.
+ * This function computes proper results only when p is prime.
+ * Parameters:
+ * a dividend
+ * b divisor
+ * p odd prime modulus
+ * p0i -1/p mod 2^31
+ * R 2^31 mod R
+ */
+static uint32_t
+modp_div(uint32_t a, uint32_t b, uint32_t p, uint32_t p0i, uint32_t R) {
+ uint32_t z, e;
+ int i;
+
+ e = p - 2;
+ z = R;
+ for (i = 30; i >= 0; i --) {
+ uint32_t z2;
+
+ z = modp_montymul(z, z, p, p0i);
+ z2 = modp_montymul(z, b, p, p0i);
+ z ^= (z ^ z2) & -(uint32_t)((e >> i) & 1);
+ }
+
+ /*
+ * The loop above just assumed that b was in Montgomery
+ * representation, i.e. really contained b*R; under that
+ * assumption, it returns 1/b in Montgomery representation,
+ * which is R/b. But we gave it b in normal representation,
+ * so the loop really returned R/(b/R) = R^2/b.
+ *
+ * We want a/b, so we need one Montgomery multiplication with a,
+ * which also remove one of the R factors, and another such
+ * multiplication to remove the second R factor.
+ */
+ z = modp_montymul(z, 1, p, p0i);
+ return modp_montymul(a, z, p, p0i);
+}
+
+/*
+ * Bit-reversal index table.
+ */
+static const uint16_t REV10[] = {
+ 0, 512, 256, 768, 128, 640, 384, 896, 64, 576, 320, 832,
+ 192, 704, 448, 960, 32, 544, 288, 800, 160, 672, 416, 928,
+ 96, 608, 352, 864, 224, 736, 480, 992, 16, 528, 272, 784,
+ 144, 656, 400, 912, 80, 592, 336, 848, 208, 720, 464, 976,
+ 48, 560, 304, 816, 176, 688, 432, 944, 112, 624, 368, 880,
+ 240, 752, 496, 1008, 8, 520, 264, 776, 136, 648, 392, 904,
+ 72, 584, 328, 840, 200, 712, 456, 968, 40, 552, 296, 808,
+ 168, 680, 424, 936, 104, 616, 360, 872, 232, 744, 488, 1000,
+ 24, 536, 280, 792, 152, 664, 408, 920, 88, 600, 344, 856,
+ 216, 728, 472, 984, 56, 568, 312, 824, 184, 696, 440, 952,
+ 120, 632, 376, 888, 248, 760, 504, 1016, 4, 516, 260, 772,
+ 132, 644, 388, 900, 68, 580, 324, 836, 196, 708, 452, 964,
+ 36, 548, 292, 804, 164, 676, 420, 932, 100, 612, 356, 868,
+ 228, 740, 484, 996, 20, 532, 276, 788, 148, 660, 404, 916,
+ 84, 596, 340, 852, 212, 724, 468, 980, 52, 564, 308, 820,
+ 180, 692, 436, 948, 116, 628, 372, 884, 244, 756, 500, 1012,
+ 12, 524, 268, 780, 140, 652, 396, 908, 76, 588, 332, 844,
+ 204, 716, 460, 972, 44, 556, 300, 812, 172, 684, 428, 940,
+ 108, 620, 364, 876, 236, 748, 492, 1004, 28, 540, 284, 796,
+ 156, 668, 412, 924, 92, 604, 348, 860, 220, 732, 476, 988,
+ 60, 572, 316, 828, 188, 700, 444, 956, 124, 636, 380, 892,
+ 252, 764, 508, 1020, 2, 514, 258, 770, 130, 642, 386, 898,
+ 66, 578, 322, 834, 194, 706, 450, 962, 34, 546, 290, 802,
+ 162, 674, 418, 930, 98, 610, 354, 866, 226, 738, 482, 994,
+ 18, 530, 274, 786, 146, 658, 402, 914, 82, 594, 338, 850,
+ 210, 722, 466, 978, 50, 562, 306, 818, 178, 690, 434, 946,
+ 114, 626, 370, 882, 242, 754, 498, 1010, 10, 522, 266, 778,
+ 138, 650, 394, 906, 74, 586, 330, 842, 202, 714, 458, 970,
+ 42, 554, 298, 810, 170, 682, 426, 938, 106, 618, 362, 874,
+ 234, 746, 490, 1002, 26, 538, 282, 794, 154, 666, 410, 922,
+ 90, 602, 346, 858, 218, 730, 474, 986, 58, 570, 314, 826,
+ 186, 698, 442, 954, 122, 634, 378, 890, 250, 762, 506, 1018,
+ 6, 518, 262, 774, 134, 646, 390, 902, 70, 582, 326, 838,
+ 198, 710, 454, 966, 38, 550, 294, 806, 166, 678, 422, 934,
+ 102, 614, 358, 870, 230, 742, 486, 998, 22, 534, 278, 790,
+ 150, 662, 406, 918, 86, 598, 342, 854, 214, 726, 470, 982,
+ 54, 566, 310, 822, 182, 694, 438, 950, 118, 630, 374, 886,
+ 246, 758, 502, 1014, 14, 526, 270, 782, 142, 654, 398, 910,
+ 78, 590, 334, 846, 206, 718, 462, 974, 46, 558, 302, 814,
+ 174, 686, 430, 942, 110, 622, 366, 878, 238, 750, 494, 1006,
+ 30, 542, 286, 798, 158, 670, 414, 926, 94, 606, 350, 862,
+ 222, 734, 478, 990, 62, 574, 318, 830, 190, 702, 446, 958,
+ 126, 638, 382, 894, 254, 766, 510, 1022, 1, 513, 257, 769,
+ 129, 641, 385, 897, 65, 577, 321, 833, 193, 705, 449, 961,
+ 33, 545, 289, 801, 161, 673, 417, 929, 97, 609, 353, 865,
+ 225, 737, 481, 993, 17, 529, 273, 785, 145, 657, 401, 913,
+ 81, 593, 337, 849, 209, 721, 465, 977, 49, 561, 305, 817,
+ 177, 689, 433, 945, 113, 625, 369, 881, 241, 753, 497, 1009,
+ 9, 521, 265, 777, 137, 649, 393, 905, 73, 585, 329, 841,
+ 201, 713, 457, 969, 41, 553, 297, 809, 169, 681, 425, 937,
+ 105, 617, 361, 873, 233, 745, 489, 1001, 25, 537, 281, 793,
+ 153, 665, 409, 921, 89, 601, 345, 857, 217, 729, 473, 985,
+ 57, 569, 313, 825, 185, 697, 441, 953, 121, 633, 377, 889,
+ 249, 761, 505, 1017, 5, 517, 261, 773, 133, 645, 389, 901,
+ 69, 581, 325, 837, 197, 709, 453, 965, 37, 549, 293, 805,
+ 165, 677, 421, 933, 101, 613, 357, 869, 229, 741, 485, 997,
+ 21, 533, 277, 789, 149, 661, 405, 917, 85, 597, 341, 853,
+ 213, 725, 469, 981, 53, 565, 309, 821, 181, 693, 437, 949,
+ 117, 629, 373, 885, 245, 757, 501, 1013, 13, 525, 269, 781,
+ 141, 653, 397, 909, 77, 589, 333, 845, 205, 717, 461, 973,
+ 45, 557, 301, 813, 173, 685, 429, 941, 109, 621, 365, 877,
+ 237, 749, 493, 1005, 29, 541, 285, 797, 157, 669, 413, 925,
+ 93, 605, 349, 861, 221, 733, 477, 989, 61, 573, 317, 829,
+ 189, 701, 445, 957, 125, 637, 381, 893, 253, 765, 509, 1021,
+ 3, 515, 259, 771, 131, 643, 387, 899, 67, 579, 323, 835,
+ 195, 707, 451, 963, 35, 547, 291, 803, 163, 675, 419, 931,
+ 99, 611, 355, 867, 227, 739, 483, 995, 19, 531, 275, 787,
+ 147, 659, 403, 915, 83, 595, 339, 851, 211, 723, 467, 979,
+ 51, 563, 307, 819, 179, 691, 435, 947, 115, 627, 371, 883,
+ 243, 755, 499, 1011, 11, 523, 267, 779, 139, 651, 395, 907,
+ 75, 587, 331, 843, 203, 715, 459, 971, 43, 555, 299, 811,
+ 171, 683, 427, 939, 107, 619, 363, 875, 235, 747, 491, 1003,
+ 27, 539, 283, 795, 155, 667, 411, 923, 91, 603, 347, 859,
+ 219, 731, 475, 987, 59, 571, 315, 827, 187, 699, 443, 955,
+ 123, 635, 379, 891, 251, 763, 507, 1019, 7, 519, 263, 775,
+ 135, 647, 391, 903, 71, 583, 327, 839, 199, 711, 455, 967,
+ 39, 551, 295, 807, 167, 679, 423, 935, 103, 615, 359, 871,
+ 231, 743, 487, 999, 23, 535, 279, 791, 151, 663, 407, 919,
+ 87, 599, 343, 855, 215, 727, 471, 983, 55, 567, 311, 823,
+ 183, 695, 439, 951, 119, 631, 375, 887, 247, 759, 503, 1015,
+ 15, 527, 271, 783, 143, 655, 399, 911, 79, 591, 335, 847,
+ 207, 719, 463, 975, 47, 559, 303, 815, 175, 687, 431, 943,
+ 111, 623, 367, 879, 239, 751, 495, 1007, 31, 543, 287, 799,
+ 159, 671, 415, 927, 95, 607, 351, 863, 223, 735, 479, 991,
+ 63, 575, 319, 831, 191, 703, 447, 959, 127, 639, 383, 895,
+ 255, 767, 511, 1023
+};
+
+/*
+ * Compute the roots for NTT and inverse NTT (binary case). Input
+ * parameter g is a primitive 2048-th root of 1 modulo p (i.e. g^1024 =
+ * -1 mod p). This fills gm[] and igm[] with powers of g and 1/g:
+ * gm[rev(i)] = g^i mod p
+ * igm[rev(i)] = (1/g)^i mod p
+ * where rev() is the "bit reversal" function over 10 bits. It fills
+ * the arrays only up to N = 2^logn values.
+ *
+ * The values stored in gm[] and igm[] are in Montgomery representation.
+ *
+ * p must be a prime such that p = 1 mod 2048.
+ */
+static void
+modp_mkgm2(uint32_t *gm, uint32_t *igm, unsigned logn,
+ uint32_t g, uint32_t p, uint32_t p0i) {
+ size_t u, n;
+ unsigned k;
+ uint32_t ig, x1, x2, R2;
+
+ n = (size_t)1 << logn;
+
+ /*
+ * We want g such that g^(2N) = 1 mod p, but the provided
+ * generator has order 2048. We must square it a few times.
+ */
+ R2 = modp_R2(p, p0i);
+ g = modp_montymul(g, R2, p, p0i);
+ for (k = logn; k < 10; k ++) {
+ g = modp_montymul(g, g, p, p0i);
+ }
+
+ ig = modp_div(R2, g, p, p0i, modp_R(p));
+ k = 10 - logn;
+ x1 = x2 = modp_R(p);
+ for (u = 0; u < n; u ++) {
+ size_t v;
+
+ v = REV10[u << k];
+ gm[v] = x1;
+ igm[v] = x2;
+ x1 = modp_montymul(x1, g, p, p0i);
+ x2 = modp_montymul(x2, ig, p, p0i);
+ }
+}
+
+/*
+ * Compute the NTT over a polynomial (binary case). Polynomial elements
+ * are a[0], a[stride], a[2 * stride]...
+ */
+static void
+modp_NTT2_ext(uint32_t *a, size_t stride, const uint32_t *gm, unsigned logn,
+ uint32_t p, uint32_t p0i) {
+ size_t t, m, n;
+
+ if (logn == 0) {
+ return;
+ }
+ n = (size_t)1 << logn;
+ t = n;
+ for (m = 1; m < n; m <<= 1) {
+ size_t ht, u, v1;
+
+ ht = t >> 1;
+ for (u = 0, v1 = 0; u < m; u ++, v1 += t) {
+ uint32_t s;
+ size_t v;
+ uint32_t *r1, *r2;
+
+ s = gm[m + u];
+ r1 = a + v1 * stride;
+ r2 = r1 + ht * stride;
+ for (v = 0; v < ht; v ++, r1 += stride, r2 += stride) {
+ uint32_t x, y;
+
+ x = *r1;
+ y = modp_montymul(*r2, s, p, p0i);
+ *r1 = modp_add(x, y, p);
+ *r2 = modp_sub(x, y, p);
+ }
+ }
+ t = ht;
+ }
+}
+
+/*
+ * Compute the inverse NTT over a polynomial (binary case).
+ */
+static void
+modp_iNTT2_ext(uint32_t *a, size_t stride, const uint32_t *igm, unsigned logn,
+ uint32_t p, uint32_t p0i) {
+ size_t t, m, n, k;
+ uint32_t ni;
+ uint32_t *r;
+
+ if (logn == 0) {
+ return;
+ }
+ n = (size_t)1 << logn;
+ t = 1;
+ for (m = n; m > 1; m >>= 1) {
+ size_t hm, dt, u, v1;
+
+ hm = m >> 1;
+ dt = t << 1;
+ for (u = 0, v1 = 0; u < hm; u ++, v1 += dt) {
+ uint32_t s;
+ size_t v;
+ uint32_t *r1, *r2;
+
+ s = igm[hm + u];
+ r1 = a + v1 * stride;
+ r2 = r1 + t * stride;
+ for (v = 0; v < t; v ++, r1 += stride, r2 += stride) {
+ uint32_t x, y;
+
+ x = *r1;
+ y = *r2;
+ *r1 = modp_add(x, y, p);
+ *r2 = modp_montymul(
+ modp_sub(x, y, p), s, p, p0i);;
+ }
+ }
+ t = dt;
+ }
+
+ /*
+ * We need 1/n in Montgomery representation, i.e. R/n. Since
+ * 1 <= logn <= 10, R/n is an integer; morever, R/n <= 2^30 < p,
+ * thus a simple shift will do.
+ */
+ ni = (uint32_t)1 << (31 - logn);
+ for (k = 0, r = a; k < n; k ++, r += stride) {
+ *r = modp_montymul(*r, ni, p, p0i);
+ }
+}
+
+/*
+ * Simplified macros for NTT and iNTT (binary case) when the elements
+ * are consecutive in RAM.
+ */
+#define modp_NTT2(a, gm, logn, p, p0i) modp_NTT2_ext(a, 1, gm, logn, p, p0i)
+#define modp_iNTT2(a, igm, logn, p, p0i) modp_iNTT2_ext(a, 1, igm, logn, p, p0i)
+
+/*
+ * Given polynomial f in NTT representation modulo p, compute f' of degree
+ * less than N/2 such that f' = f0^2 - X*f1^2, where f0 and f1 are
+ * polynomials of degree less than N/2 such that f = f0(X^2) + X*f1(X^2).
+ *
+ * The new polynomial is written "in place" over the first N/2 elements
+ * of f.
+ *
+ * If applied logn times successively on a given polynomial, the resulting
+ * degree-0 polynomial is the resultant of f and X^N+1 modulo p.
+ *
+ * This function applies only to the binary case; it is invoked from
+ * solve_NTRU_binary_depth1().
+ */
+static void
+modp_poly_rec_res(uint32_t *f, unsigned logn,
+ uint32_t p, uint32_t p0i, uint32_t R2) {
+ size_t hn, u;
+
+ hn = (size_t)1 << (logn - 1);
+ for (u = 0; u < hn; u ++) {
+ uint32_t w0, w1;
+
+ w0 = f[(u << 1) + 0];
+ w1 = f[(u << 1) + 1];
+ f[u] = modp_montymul(modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+}
+
+/* ==================================================================== */
+/*
+ * Custom bignum implementation.
+ *
+ * This is a very reduced set of functionalities. We need to do the
+ * following operations:
+ *
+ * - Rebuild the resultant and the polynomial coefficients from their
+ * values modulo small primes (of length 31 bits each).
+ *
+ * - Compute an extended GCD between the two computed resultants.
+ *
+ * - Extract top bits and add scaled values during the successive steps
+ * of Babai rounding.
+ *
+ * When rebuilding values using CRT, we must also recompute the product
+ * of the small prime factors. We always do it one small factor at a
+ * time, so the "complicated" operations can be done modulo the small
+ * prime with the modp_* functions. CRT coefficients (inverses) are
+ * precomputed.
+ *
+ * All values are positive until the last step: when the polynomial
+ * coefficients have been rebuilt, we normalize them around 0. But then,
+ * only additions and subtractions on the upper few bits are needed
+ * afterwards.
+ *
+ * We keep big integers as arrays of 31-bit words (in uint32_t values);
+ * the top bit of each uint32_t is kept equal to 0. Using 31-bit words
+ * makes it easier to keep track of carries. When negative values are
+ * used, two's complement is used.
+ */
+
+/*
+ * Subtract integer b from integer a. Both integers are supposed to have
+ * the same size. The carry (0 or 1) is returned. Source arrays a and b
+ * MUST be distinct.
+ *
+ * The operation is performed as described above if ctr = 1. If
+ * ctl = 0, the value a[] is unmodified, but all memory accesses are
+ * still performed, and the carry is computed and returned.
+ */
+static uint32_t
+zint_sub(uint32_t *a, const uint32_t *b, size_t len,
+ uint32_t ctl) {
+ size_t u;
+ uint32_t cc, m;
+
+ cc = 0;
+ m = -ctl;
+ for (u = 0; u < len; u ++) {
+ uint32_t aw, w;
+
+ aw = a[u];
+ w = aw - b[u] - cc;
+ cc = w >> 31;
+ aw ^= ((w & 0x7FFFFFFF) ^ aw) & m;
+ a[u] = aw;
+ }
+ return cc;
+}
+
+/*
+ * Mutiply the provided big integer m with a small value x.
+ * This function assumes that x < 2^31. The carry word is returned.
+ */
+static uint32_t
+zint_mul_small(uint32_t *m, size_t mlen, uint32_t x) {
+ size_t u;
+ uint32_t cc;
+
+ cc = 0;
+ for (u = 0; u < mlen; u ++) {
+ uint64_t z;
+
+ z = (uint64_t)m[u] * (uint64_t)x + cc;
+ m[u] = (uint32_t)z & 0x7FFFFFFF;
+ cc = (uint32_t)(z >> 31);
+ }
+ return cc;
+}
+
+/*
+ * Reduce a big integer d modulo a small integer p.
+ * Rules:
+ * d is unsigned
+ * p is prime
+ * 2^30 < p < 2^31
+ * p0i = -(1/p) mod 2^31
+ * R2 = 2^62 mod p
+ */
+static uint32_t
+zint_mod_small_unsigned(const uint32_t *d, size_t dlen,
+ uint32_t p, uint32_t p0i, uint32_t R2) {
+ uint32_t x;
+ size_t u;
+
+ /*
+ * Algorithm: we inject words one by one, starting with the high
+ * word. Each step is:
+ * - multiply x by 2^31
+ * - add new word
+ */
+ x = 0;
+ u = dlen;
+ while (u -- > 0) {
+ uint32_t w;
+
+ x = modp_montymul(x, R2, p, p0i);
+ w = d[u] - p;
+ w += p & -(w >> 31);
+ x = modp_add(x, w, p);
+ }
+ return x;
+}
+
+/*
+ * Similar to zint_mod_small_unsigned(), except that d may be signed.
+ * Extra parameter is Rx = 2^(31*dlen) mod p.
+ */
+static uint32_t
+zint_mod_small_signed(const uint32_t *d, size_t dlen,
+ uint32_t p, uint32_t p0i, uint32_t R2, uint32_t Rx) {
+ uint32_t z;
+
+ if (dlen == 0) {
+ return 0;
+ }
+ z = zint_mod_small_unsigned(d, dlen, p, p0i, R2);
+ z = modp_sub(z, Rx & -(d[dlen - 1] >> 30), p);
+ return z;
+}
+
+/*
+ * Add y*s to x. x and y initially have length 'len' words; the new x
+ * has length 'len+1' words. 's' must fit on 31 bits. x[] and y[] must
+ * not overlap.
+ */
+static void
+zint_add_mul_small(uint32_t *x,
+ const uint32_t *y, size_t len, uint32_t s) {
+ size_t u;
+ uint32_t cc;
+
+ cc = 0;
+ for (u = 0; u < len; u ++) {
+ uint32_t xw, yw;
+ uint64_t z;
+
+ xw = x[u];
+ yw = y[u];
+ z = (uint64_t)yw * (uint64_t)s + (uint64_t)xw + (uint64_t)cc;
+ x[u] = (uint32_t)z & 0x7FFFFFFF;
+ cc = (uint32_t)(z >> 31);
+ }
+ x[len] = cc;
+}
+
+/*
+ * Normalize a modular integer around 0: if x > p/2, then x is replaced
+ * with x - p (signed encoding with two's complement); otherwise, x is
+ * untouched. The two integers x and p are encoded over the same length.
+ */
+static void
+zint_norm_zero(uint32_t *x, const uint32_t *p, size_t len) {
+ size_t u;
+ uint32_t r, bb;
+
+ /*
+ * Compare x with p/2. We use the shifted version of p, and p
+ * is odd, so we really compare with (p-1)/2; we want to perform
+ * the subtraction if and only if x > (p-1)/2.
+ */
+ r = 0;
+ bb = 0;
+ u = len;
+ while (u -- > 0) {
+ uint32_t wx, wp, cc;
+
+ /*
+ * Get the two words to compare in wx and wp (both over
+ * 31 bits exactly).
+ */
+ wx = x[u];
+ wp = (p[u] >> 1) | (bb << 30);
+ bb = p[u] & 1;
+
+ /*
+ * We set cc to -1, 0 or 1, depending on whether wp is
+ * lower than, equal to, or greater than wx.
+ */
+ cc = wp - wx;
+ cc = ((-cc) >> 31) | -(cc >> 31);
+
+ /*
+ * If r != 0 then it is either 1 or -1, and we keep its
+ * value. Otherwise, if r = 0, then we replace it with cc.
+ */
+ r |= cc & ((r & 1) - 1);
+ }
+
+ /*
+ * At this point, r = -1, 0 or 1, depending on whether (p-1)/2
+ * is lower than, equal to, or greater than x. We thus want to
+ * do the subtraction only if r = -1.
+ */
+ zint_sub(x, p, len, r >> 31);
+}
+
+/*
+ * Rebuild integers from their RNS representation. There are 'num'
+ * integers, and each consists in 'xlen' words. 'xx' points at that
+ * first word of the first integer; subsequent integers are accessed
+ * by adding 'xstride' repeatedly.
+ *
+ * The words of an integer are the RNS representation of that integer,
+ * using the provided 'primes' are moduli. This function replaces
+ * each integer with its multi-word value (little-endian order).
+ *
+ * If "normalize_signed" is non-zero, then the returned value is
+ * normalized to the -m/2..m/2 interval (where m is the product of all
+ * small prime moduli); two's complement is used for negative values.
+ */
+static void
+zint_rebuild_CRT(uint32_t *xx, size_t xlen, size_t xstride,
+ size_t num, const small_prime *primes, int normalize_signed,
+ uint32_t *tmp) {
+ size_t u;
+ uint32_t *x;
+
+ tmp[0] = primes[0].p;
+ for (u = 1; u < xlen; u ++) {
+ /*
+ * At the entry of each loop iteration:
+ * - the first u words of each array have been
+ * reassembled;
+ * - the first u words of tmp[] contains the
+ * product of the prime moduli processed so far.
+ *
+ * We call 'q' the product of all previous primes.
+ */
+ uint32_t p, p0i, s, R2;
+ size_t v;
+
+ p = primes[u].p;
+ s = primes[u].s;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+
+ for (v = 0, x = xx; v < num; v ++, x += xstride) {
+ uint32_t xp, xq, xr;
+ /*
+ * xp = the integer x modulo the prime p for this
+ * iteration
+ * xq = (x mod q) mod p
+ */
+ xp = x[u];
+ xq = zint_mod_small_unsigned(x, u, p, p0i, R2);
+
+ /*
+ * New value is (x mod q) + q * (s * (xp - xq) mod p)
+ */
+ xr = modp_montymul(s, modp_sub(xp, xq, p), p, p0i);
+ zint_add_mul_small(x, tmp, u, xr);
+ }
+
+ /*
+ * Update product of primes in tmp[].
+ */
+ tmp[u] = zint_mul_small(tmp, u, p);
+ }
+
+ /*
+ * Normalize the reconstructed values around 0.
+ */
+ if (normalize_signed) {
+ for (u = 0, x = xx; u < num; u ++, x += xstride) {
+ zint_norm_zero(x, tmp, xlen);
+ }
+ }
+}
+
+/*
+ * Negate a big integer conditionally: value a is replaced with -a if
+ * and only if ctl = 1. Control value ctl must be 0 or 1.
+ */
+static void
+zint_negate(uint32_t *a, size_t len, uint32_t ctl) {
+ size_t u;
+ uint32_t cc, m;
+
+ /*
+ * If ctl = 1 then we flip the bits of a by XORing with
+ * 0x7FFFFFFF, and we add 1 to the value. If ctl = 0 then we XOR
+ * with 0 and add 0, which leaves the value unchanged.
+ */
+ cc = ctl;
+ m = -ctl >> 1;
+ for (u = 0; u < len; u ++) {
+ uint32_t aw;
+
+ aw = a[u];
+ aw = (aw ^ m) + cc;
+ a[u] = aw & 0x7FFFFFFF;
+ cc = aw >> 31;
+ }
+}
+
+/*
+ * Replace a with (a*xa+b*xb)/(2^31) and b with (a*ya+b*yb)/(2^31).
+ * The low bits are dropped (the caller should compute the coefficients
+ * such that these dropped bits are all zeros). If either or both
+ * yields a negative value, then the value is negated.
+ *
+ * Returned value is:
+ * 0 both values were positive
+ * 1 new a had to be negated
+ * 2 new b had to be negated
+ * 3 both new a and new b had to be negated
+ *
+ * Coefficients xa, xb, ya and yb may use the full signed 32-bit range.
+ */
+static uint32_t
+zint_co_reduce(uint32_t *a, uint32_t *b, size_t len,
+ int64_t xa, int64_t xb, int64_t ya, int64_t yb) {
+ size_t u;
+ int64_t cca, ccb;
+ uint32_t nega, negb;
+
+ cca = 0;
+ ccb = 0;
+ for (u = 0; u < len; u ++) {
+ uint32_t wa, wb;
+ uint64_t za, zb;
+
+ wa = a[u];
+ wb = b[u];
+ za = wa * (uint64_t)xa + wb * (uint64_t)xb + (uint64_t)cca;
+ zb = wa * (uint64_t)ya + wb * (uint64_t)yb + (uint64_t)ccb;
+ if (u > 0) {
+ a[u - 1] = (uint32_t)za & 0x7FFFFFFF;
+ b[u - 1] = (uint32_t)zb & 0x7FFFFFFF;
+ }
+ cca = *(int64_t *)&za >> 31;
+ ccb = *(int64_t *)&zb >> 31;
+ }
+ a[len - 1] = (uint32_t)cca;
+ b[len - 1] = (uint32_t)ccb;
+
+ nega = (uint32_t)((uint64_t)cca >> 63);
+ negb = (uint32_t)((uint64_t)ccb >> 63);
+ zint_negate(a, len, nega);
+ zint_negate(b, len, negb);
+ return nega | (negb << 1);
+}
+
+/*
+ * Finish modular reduction. Rules on input parameters:
+ *
+ * if neg = 1, then -m <= a < 0
+ * if neg = 0, then 0 <= a < 2*m
+ *
+ * If neg = 0, then the top word of a[] is allowed to use 32 bits.
+ *
+ * Modulus m must be odd.
+ */
+static void
+zint_finish_mod(uint32_t *a, size_t len, const uint32_t *m, uint32_t neg) {
+ size_t u;
+ uint32_t cc, xm, ym;
+
+ /*
+ * First pass: compare a (assumed nonnegative) with m. Note that
+ * if the top word uses 32 bits, subtracting m must yield a
+ * value less than 2^31 since a < 2*m.
+ */
+ cc = 0;
+ for (u = 0; u < len; u ++) {
+ cc = (a[u] - m[u] - cc) >> 31;
+ }
+
+ /*
+ * If neg = 1 then we must add m (regardless of cc)
+ * If neg = 0 and cc = 0 then we must subtract m
+ * If neg = 0 and cc = 1 then we must do nothing
+ *
+ * In the loop below, we conditionally subtract either m or -m
+ * from a. Word xm is a word of m (if neg = 0) or -m (if neg = 1);
+ * but if neg = 0 and cc = 1, then ym = 0 and it forces mw to 0.
+ */
+ xm = -neg >> 1;
+ ym = -(neg | (1 - cc));
+ cc = neg;
+ for (u = 0; u < len; u ++) {
+ uint32_t aw, mw;
+
+ aw = a[u];
+ mw = (m[u] ^ xm) & ym;
+ aw = aw - mw - cc;
+ a[u] = aw & 0x7FFFFFFF;
+ cc = aw >> 31;
+ }
+}
+
+/*
+ * Replace a with (a*xa+b*xb)/(2^31) mod m, and b with
+ * (a*ya+b*yb)/(2^31) mod m. Modulus m must be odd; m0i = -1/m[0] mod 2^31.
+ */
+static void
+zint_co_reduce_mod(uint32_t *a, uint32_t *b, const uint32_t *m, size_t len,
+ uint32_t m0i, int64_t xa, int64_t xb, int64_t ya, int64_t yb) {
+ size_t u;
+ int64_t cca, ccb;
+ uint32_t fa, fb;
+
+ /*
+ * These are actually four combined Montgomery multiplications.
+ */
+ cca = 0;
+ ccb = 0;
+ fa = ((a[0] * (uint32_t)xa + b[0] * (uint32_t)xb) * m0i) & 0x7FFFFFFF;
+ fb = ((a[0] * (uint32_t)ya + b[0] * (uint32_t)yb) * m0i) & 0x7FFFFFFF;
+ for (u = 0; u < len; u ++) {
+ uint32_t wa, wb;
+ uint64_t za, zb;
+
+ wa = a[u];
+ wb = b[u];
+ za = wa * (uint64_t)xa + wb * (uint64_t)xb
+ + m[u] * (uint64_t)fa + (uint64_t)cca;
+ zb = wa * (uint64_t)ya + wb * (uint64_t)yb
+ + m[u] * (uint64_t)fb + (uint64_t)ccb;
+ if (u > 0) {
+ a[u - 1] = (uint32_t)za & 0x7FFFFFFF;
+ b[u - 1] = (uint32_t)zb & 0x7FFFFFFF;
+ }
+ cca = *(int64_t *)&za >> 31;
+ ccb = *(int64_t *)&zb >> 31;
+ }
+ a[len - 1] = (uint32_t)cca;
+ b[len - 1] = (uint32_t)ccb;
+
+ /*
+ * At this point:
+ * -m <= a < 2*m
+ * -m <= b < 2*m
+ * (this is a case of Montgomery reduction)
+ * The top words of 'a' and 'b' may have a 32-th bit set.
+ * We want to add or subtract the modulus, as required.
+ */
+ zint_finish_mod(a, len, m, (uint32_t)((uint64_t)cca >> 63));
+ zint_finish_mod(b, len, m, (uint32_t)((uint64_t)ccb >> 63));
+}
+
+/*
+ * Compute a GCD between two positive big integers x and y. The two
+ * integers must be odd. Returned value is 1 if the GCD is 1, 0
+ * otherwise. When 1 is returned, arrays u and v are filled with values
+ * such that:
+ * 0 <= u <= y
+ * 0 <= v <= x
+ * x*u - y*v = 1
+ * x[] and y[] are unmodified. Both input values must have the same
+ * encoded length. Temporary array must be large enough to accommodate 4
+ * extra values of that length. Arrays u, v and tmp may not overlap with
+ * each other, or with either x or y.
+ */
+static int
+zint_bezout(uint32_t *u, uint32_t *v,
+ const uint32_t *x, const uint32_t *y,
+ size_t len, uint32_t *tmp) {
+ /*
+ * Algorithm is an extended binary GCD. We maintain 6 values
+ * a, b, u0, u1, v0 and v1 with the following invariants:
+ *
+ * a = x*u0 - y*v0
+ * b = x*u1 - y*v1
+ * 0 <= a <= x
+ * 0 <= b <= y
+ * 0 <= u0 < y
+ * 0 <= v0 < x
+ * 0 <= u1 <= y
+ * 0 <= v1 < x
+ *
+ * Initial values are:
+ *
+ * a = x u0 = 1 v0 = 0
+ * b = y u1 = y v1 = x-1
+ *
+ * Each iteration reduces either a or b, and maintains the
+ * invariants. Algorithm stops when a = b, at which point their
+ * common value is GCD(a,b) and (u0,v0) (or (u1,v1)) contains
+ * the values (u,v) we want to return.
+ *
+ * The formal definition of the algorithm is a sequence of steps:
+ *
+ * - If a is even, then:
+ * a <- a/2
+ * u0 <- u0/2 mod y
+ * v0 <- v0/2 mod x
+ *
+ * - Otherwise, if b is even, then:
+ * b <- b/2
+ * u1 <- u1/2 mod y
+ * v1 <- v1/2 mod x
+ *
+ * - Otherwise, if a > b, then:
+ * a <- (a-b)/2
+ * u0 <- (u0-u1)/2 mod y
+ * v0 <- (v0-v1)/2 mod x
+ *
+ * - Otherwise:
+ * b <- (b-a)/2
+ * u1 <- (u1-u0)/2 mod y
+ * v1 <- (v1-v0)/2 mod y
+ *
+ * We can show that the operations above preserve the invariants:
+ *
+ * - If a is even, then u0 and v0 are either both even or both
+ * odd (since a = x*u0 - y*v0, and x and y are both odd).
+ * If u0 and v0 are both even, then (u0,v0) <- (u0/2,v0/2).
+ * Otherwise, (u0,v0) <- ((u0+y)/2,(v0+x)/2). Either way,
+ * the a = x*u0 - y*v0 invariant is preserved.
+ *
+ * - The same holds for the case where b is even.
+ *
+ * - If a and b are odd, and a > b, then:
+ *
+ * a-b = x*(u0-u1) - y*(v0-v1)
+ *
+ * In that situation, if u0 < u1, then x*(u0-u1) < 0, but
+ * a-b > 0; therefore, it must be that v0 < v1, and the
+ * first part of the update is: (u0,v0) <- (u0-u1+y,v0-v1+x),
+ * which preserves the invariants. Otherwise, if u0 > u1,
+ * then u0-u1 >= 1, thus x*(u0-u1) >= x. But a <= x and
+ * b >= 0, hence a-b <= x. It follows that, in that case,
+ * v0-v1 >= 0. The first part of the update is then:
+ * (u0,v0) <- (u0-u1,v0-v1), which again preserves the
+ * invariants.
+ *
+ * Either way, once the subtraction is done, the new value of
+ * a, which is the difference of two odd values, is even,
+ * and the remaining of this step is a subcase of the
+ * first algorithm case (i.e. when a is even).
+ *
+ * - If a and b are odd, and b > a, then the a similar
+ * argument holds.
+ *
+ * The values a and b start at x and y, respectively. Since x
+ * and y are odd, their GCD is odd, and it is easily seen that
+ * all steps conserve the GCD (GCD(a-b,b) = GCD(a, b);
+ * GCD(a/2,b) = GCD(a,b) if GCD(a,b) is odd). Moreover, either a
+ * or b is reduced by at least one bit at each iteration, so
+ * the algorithm necessarily converges on the case a = b, at
+ * which point the common value is the GCD.
+ *
+ * In the algorithm expressed above, when a = b, the fourth case
+ * applies, and sets b = 0. Since a contains the GCD of x and y,
+ * which are both odd, a must be odd, and subsequent iterations
+ * (if any) will simply divide b by 2 repeatedly, which has no
+ * consequence. Thus, the algorithm can run for more iterations
+ * than necessary; the final GCD will be in a, and the (u,v)
+ * coefficients will be (u0,v0).
+ *
+ *
+ * The presentation above is bit-by-bit. It can be sped up by
+ * noticing that all decisions are taken based on the low bits
+ * and high bits of a and b. We can extract the two top words
+ * and low word of each of a and b, and compute reduction
+ * parameters pa, pb, qa and qb such that the new values for
+ * a and b are:
+ * a' = (a*pa + b*pb) / (2^31)
+ * b' = (a*qa + b*qb) / (2^31)
+ * the two divisions being exact. The coefficients are obtained
+ * just from the extracted words, and may be slightly off, requiring
+ * an optional correction: if a' < 0, then we replace pa with -pa
+ * and pb with -pb. Each such step will reduce the total length
+ * (sum of lengths of a and b) by at least 30 bits at each
+ * iteration.
+ */
+ uint32_t *u0, *u1, *v0, *v1, *a, *b;
+ uint32_t x0i, y0i;
+ uint32_t num, rc;
+ size_t j;
+
+ if (len == 0) {
+ return 0;
+ }
+
+ /*
+ * u0 and v0 are the u and v result buffers; the four other
+ * values (u1, v1, a and b) are taken from tmp[].
+ */
+ u0 = u;
+ v0 = v;
+ u1 = tmp;
+ v1 = u1 + len;
+ a = v1 + len;
+ b = a + len;
+
+ /*
+ * We'll need the Montgomery reduction coefficients.
+ */
+ x0i = modp_ninv31(x[0]);
+ y0i = modp_ninv31(y[0]);
+
+ /*
+ * Initialize a, b, u0, u1, v0 and v1.
+ * a = x u0 = 1 v0 = 0
+ * b = y u1 = y v1 = x-1
+ * Note that x is odd, so computing x-1 is easy.
+ */
+ memcpy(a, x, len * sizeof * x);
+ memcpy(b, y, len * sizeof * y);
+ u0[0] = 1;
+ memset(u0 + 1, 0, (len - 1) * sizeof * u0);
+ memset(v0, 0, len * sizeof * v0);
+ memcpy(u1, y, len * sizeof * u1);
+ memcpy(v1, x, len * sizeof * v1);
+ v1[0] --;
+
+ /*
+ * Each input operand may be as large as 31*len bits, and we
+ * reduce the total length by at least 30 bits at each iteration.
+ */
+ for (num = 62 * (uint32_t)len + 30; num >= 30; num -= 30) {
+ uint32_t c0, c1;
+ uint32_t a0, a1, b0, b1;
+ uint64_t a_hi, b_hi;
+ uint32_t a_lo, b_lo;
+ int64_t pa, pb, qa, qb;
+ int i;
+ uint32_t r;
+
+ /*
+ * Extract the top words of a and b. If j is the highest
+ * index >= 1 such that a[j] != 0 or b[j] != 0, then we
+ * want (a[j] << 31) + a[j-1] and (b[j] << 31) + b[j-1].
+ * If a and b are down to one word each, then we use
+ * a[0] and b[0].
+ */
+ c0 = (uint32_t) -1;
+ c1 = (uint32_t) -1;
+ a0 = 0;
+ a1 = 0;
+ b0 = 0;
+ b1 = 0;
+ j = len;
+ while (j -- > 0) {
+ uint32_t aw, bw;
+
+ aw = a[j];
+ bw = b[j];
+ a0 ^= (a0 ^ aw) & c0;
+ a1 ^= (a1 ^ aw) & c1;
+ b0 ^= (b0 ^ bw) & c0;
+ b1 ^= (b1 ^ bw) & c1;
+ c1 = c0;
+ c0 &= (((aw | bw) + 0x7FFFFFFF) >> 31) - (uint32_t)1;
+ }
+
+ /*
+ * If c1 = 0, then we grabbed two words for a and b.
+ * If c1 != 0 but c0 = 0, then we grabbed one word. It
+ * is not possible that c1 != 0 and c0 != 0, because that
+ * would mean that both integers are zero.
+ */
+ a1 |= a0 & c1;
+ a0 &= ~c1;
+ b1 |= b0 & c1;
+ b0 &= ~c1;
+ a_hi = ((uint64_t)a0 << 31) + a1;
+ b_hi = ((uint64_t)b0 << 31) + b1;
+ a_lo = a[0];
+ b_lo = b[0];
+
+ /*
+ * Compute reduction factors:
+ *
+ * a' = a*pa + b*pb
+ * b' = a*qa + b*qb
+ *
+ * such that a' and b' are both multiple of 2^31, but are
+ * only marginally larger than a and b.
+ */
+ pa = 1;
+ pb = 0;
+ qa = 0;
+ qb = 1;
+ for (i = 0; i < 31; i ++) {
+ /*
+ * At each iteration:
+ *
+ * a <- (a-b)/2 if: a is odd, b is odd, a_hi > b_hi
+ * b <- (b-a)/2 if: a is odd, b is odd, a_hi <= b_hi
+ * a <- a/2 if: a is even
+ * b <- b/2 if: a is odd, b is even
+ *
+ * We multiply a_lo and b_lo by 2 at each
+ * iteration, thus a division by 2 really is a
+ * non-multiplication by 2.
+ */
+ uint32_t rt, oa, ob, cAB, cBA, cA;
+ uint64_t rz;
+
+ /*
+ * rt = 1 if a_hi > b_hi, 0 otherwise.
+ */
+ rz = b_hi - a_hi;
+ rt = (uint32_t)((rz ^ ((a_hi ^ b_hi)
+ & (a_hi ^ rz))) >> 63);
+
+ /*
+ * cAB = 1 if b must be subtracted from a
+ * cBA = 1 if a must be subtracted from b
+ * cA = 1 if a must be divided by 2
+ *
+ * Rules:
+ *
+ * cAB and cBA cannot both be 1.
+ * If a is not divided by 2, b is.
+ */
+ oa = (a_lo >> i) & 1;
+ ob = (b_lo >> i) & 1;
+ cAB = oa & ob & rt;
+ cBA = oa & ob & ~rt;
+ cA = cAB | (oa ^ 1);
+
+ /*
+ * Conditional subtractions.
+ */
+ a_lo -= b_lo & -cAB;
+ a_hi -= b_hi & -(uint64_t)cAB;
+ pa -= qa & -(int64_t)cAB;
+ pb -= qb & -(int64_t)cAB;
+ b_lo -= a_lo & -cBA;
+ b_hi -= a_hi & -(uint64_t)cBA;
+ qa -= pa & -(int64_t)cBA;
+ qb -= pb & -(int64_t)cBA;
+
+ /*
+ * Shifting.
+ */
+ a_lo += a_lo & (cA - 1);
+ pa += pa & ((int64_t)cA - 1);
+ pb += pb & ((int64_t)cA - 1);
+ a_hi ^= (a_hi ^ (a_hi >> 1)) & -(uint64_t)cA;
+ b_lo += b_lo & -cA;
+ qa += qa & -(int64_t)cA;
+ qb += qb & -(int64_t)cA;
+ b_hi ^= (b_hi ^ (b_hi >> 1)) & ((uint64_t)cA - 1);
+ }
+
+ /*
+ * Apply the computed parameters to our values. We
+ * may have to correct pa and pb depending on the
+ * returned value of zint_co_reduce() (when a and/or b
+ * had to be negated).
+ */
+ r = zint_co_reduce(a, b, len, pa, pb, qa, qb);
+ pa -= (pa + pa) & -(int64_t)(r & 1);
+ pb -= (pb + pb) & -(int64_t)(r & 1);
+ qa -= (qa + qa) & -(int64_t)(r >> 1);
+ qb -= (qb + qb) & -(int64_t)(r >> 1);
+ zint_co_reduce_mod(u0, u1, y, len, y0i, pa, pb, qa, qb);
+ zint_co_reduce_mod(v0, v1, x, len, x0i, pa, pb, qa, qb);
+ }
+
+ /*
+ * At that point, array a[] should contain the GCD, and the
+ * results (u,v) should already be set. We check that the GCD
+ * is indeed 1. We also check that the two operands x and y
+ * are odd.
+ */
+ rc = a[0] ^ 1;
+ for (j = 1; j < len; j ++) {
+ rc |= a[j];
+ }
+ return (int)((1 - ((rc | -rc) >> 31)) & x[0] & y[0]);
+}
+
+/*
+ * Add k*y*2^sc to x. The result is assumed to fit in the array of
+ * size xlen (truncation is applied if necessary).
+ * Scale factor 'sc' is provided as sch and scl, such that:
+ * sch = sc / 31
+ * scl = sc % 31
+ * xlen MUST NOT be lower than ylen.
+ *
+ * x[] and y[] are both signed integers, using two's complement for
+ * negative values.
+ */
+static void
+zint_add_scaled_mul_small(uint32_t *x, size_t xlen,
+ const uint32_t *y, size_t ylen, int32_t k,
+ uint32_t sch, uint32_t scl) {
+ size_t u;
+ uint32_t ysign, tw;
+ int32_t cc;
+
+ if (ylen == 0) {
+ return;
+ }
+
+ ysign = -(y[ylen - 1] >> 30) >> 1;
+ tw = 0;
+ cc = 0;
+ for (u = sch; u < xlen; u ++) {
+ size_t v;
+ uint32_t wy, wys, ccu;
+ uint64_t z;
+
+ /*
+ * Get the next word of y (scaled).
+ */
+ v = u - sch;
+ if (v < ylen) {
+ wy = y[v];
+ } else {
+ wy = ysign;
+ }
+ wys = ((wy << scl) & 0x7FFFFFFF) | tw;
+ tw = wy >> (31 - scl);
+
+ /*
+ * The expression below does not overflow.
+ */
+ z = (uint64_t)((int64_t)wys * (int64_t)k + (int64_t)x[u] + cc);
+ x[u] = (uint32_t)z & 0x7FFFFFFF;
+
+ /*
+ * Right-shifting the signed value z would yield
+ * implementation-defined results (arithmetic shift is
+ * not guaranteed). However, we can cast to unsigned,
+ * and get the next carry as an unsigned word. We can
+ * then convert it back to signed by using the guaranteed
+ * fact that 'int32_t' uses two's complement with no
+ * trap representation or padding bit, and with a layout
+ * compatible with that of 'uint32_t'.
+ */
+ ccu = (uint32_t)(z >> 31);
+ cc = *(int32_t *)&ccu;
+ }
+}
+
+/*
+ * Subtract y*2^sc from x. The result is assumed to fit in the array of
+ * size xlen (truncation is applied if necessary).
+ * Scale factor 'sc' is provided as sch and scl, such that:
+ * sch = sc / 31
+ * scl = sc % 31
+ * xlen MUST NOT be lower than ylen.
+ *
+ * x[] and y[] are both signed integers, using two's complement for
+ * negative values.
+ */
+static void
+zint_sub_scaled(uint32_t *x, size_t xlen,
+ const uint32_t *y, size_t ylen, uint32_t sch, uint32_t scl) {
+ size_t u;
+ uint32_t ysign, tw;
+ uint32_t cc;
+
+ if (ylen == 0) {
+ return;
+ }
+
+ ysign = -(y[ylen - 1] >> 30) >> 1;
+ tw = 0;
+ cc = 0;
+ for (u = sch; u < xlen; u ++) {
+ size_t v;
+ uint32_t w, wy, wys;
+
+ /*
+ * Get the next word of y (scaled).
+ */
+ v = u - sch;
+ if (v < ylen) {
+ wy = y[v];
+ } else {
+ wy = ysign;
+ }
+ wys = ((wy << scl) & 0x7FFFFFFF) | tw;
+ tw = wy >> (31 - scl);
+
+ w = x[u] - wys - cc;
+ x[u] = w & 0x7FFFFFFF;
+ cc = w >> 31;
+ }
+}
+
+/*
+ * Convert a one-word signed big integer into a signed value.
+ */
+static inline int32_t
+zint_one_to_plain(const uint32_t *x) {
+ uint32_t w;
+
+ w = x[0];
+ w |= (w & 0x40000000) << 1;
+ return *(int32_t *)&w;
+}
+
+/* ==================================================================== */
+
+/*
+ * Convert a polynomial to floating-point values.
+ *
+ * Each coefficient has length flen words, and starts fstride words after
+ * the previous.
+ *
+ * IEEE-754 binary64 values can represent values in a finite range,
+ * roughly 2^(-1023) to 2^(+1023); thus, if coefficients are too large,
+ * they should be "trimmed" by pointing not to the lowest word of each,
+ * but upper.
+ */
+static void
+poly_big_to_fp(fpr *d, const uint32_t *f, size_t flen, size_t fstride,
+ unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ if (flen == 0) {
+ for (u = 0; u < n; u ++) {
+ d[u] = fpr_zero;
+ }
+ return;
+ }
+ for (u = 0; u < n; u ++, f += fstride) {
+ size_t v;
+ uint32_t neg, cc, xm;
+ fpr x, fsc;
+
+ /*
+ * Get sign of the integer; if it is negative, then we
+ * will load its absolute value instead, and negate the
+ * result.
+ */
+ neg = -(f[flen - 1] >> 30);
+ xm = neg >> 1;
+ cc = neg & 1;
+ x = fpr_zero;
+ fsc = fpr_one;
+ for (v = 0; v < flen; v ++, fsc = fpr_mul(fsc, fpr_ptwo31)) {
+ uint32_t w;
+
+ w = (f[v] ^ xm) + cc;
+ cc = w >> 31;
+ w &= 0x7FFFFFFF;
+ w -= (w << 1) & neg;
+ x = fpr_add(x, fpr_mul(fpr_of(*(int32_t *)&w), fsc));
+ }
+ d[u] = x;
+ }
+}
+
+/*
+ * Convert a polynomial to small integers. Source values are supposed
+ * to be one-word integers, signed over 31 bits. Returned value is 0
+ * if any of the coefficients exceeds the provided limit (in absolute
+ * value), or 1 on success.
+ *
+ * This is not constant-time; this is not a problem here, because on
+ * any failure, the NTRU-solving process will be deemed to have failed
+ * and the (f,g) polynomials will be discarded.
+ */
+static int
+poly_big_to_small(int8_t *d, const uint32_t *s, int lim, unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = zint_one_to_plain(s + u);
+ if (z < -lim || z > lim) {
+ return 0;
+ }
+ d[u] = (int8_t)z;
+ }
+ return 1;
+}
+
+/*
+ * Subtract k*f from F, where F, f and k are polynomials modulo X^N+1.
+ * Coefficients of polynomial k are small integers (signed values in the
+ * -2^31..2^31 range) scaled by 2^sc. Value sc is provided as sch = sc / 31
+ * and scl = sc % 31.
+ *
+ * This function implements the basic quadratic multiplication algorithm,
+ * which is efficient in space (no extra buffer needed) but slow at
+ * high degree.
+ */
+static void
+poly_sub_scaled(uint32_t *F, size_t Flen, size_t Fstride,
+ const uint32_t *f, size_t flen, size_t fstride,
+ const int32_t *k, uint32_t sch, uint32_t scl, unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ for (u = 0; u < n; u ++) {
+ int32_t kf;
+ size_t v;
+ uint32_t *x;
+ const uint32_t *y;
+
+ kf = -k[u];
+ x = F + u * Fstride;
+ y = f;
+ for (v = 0; v < n; v ++) {
+ zint_add_scaled_mul_small(
+ x, Flen, y, flen, kf, sch, scl);
+ if (u + v == n - 1) {
+ x = F;
+ kf = -kf;
+ } else {
+ x += Fstride;
+ }
+ y += fstride;
+ }
+ }
+}
+
+/*
+ * Subtract k*f from F. Coefficients of polynomial k are small integers
+ * (signed values in the -2^31..2^31 range) scaled by 2^sc. This function
+ * assumes that the degree is large, and integers relatively small.
+ * The value sc is provided as sch = sc / 31 and scl = sc % 31.
+ */
+static void
+poly_sub_scaled_ntt(uint32_t *F, size_t Flen, size_t Fstride,
+ const uint32_t *f, size_t flen, size_t fstride,
+ const int32_t *k, uint32_t sch, uint32_t scl, unsigned logn,
+ uint32_t *tmp) {
+ uint32_t *gm, *igm, *fk, *t1, *x;
+ const uint32_t *y;
+ size_t n, u, tlen;
+ const small_prime *primes;
+
+ n = MKN(logn);
+ tlen = flen + 1;
+ gm = tmp;
+ igm = gm + MKN(logn);
+ fk = igm + MKN(logn);
+ t1 = fk + n * tlen;
+
+ primes = PRIMES;
+
+ /*
+ * Compute k*f in fk[], in RNS notation.
+ */
+ for (u = 0; u < tlen; u ++) {
+ uint32_t p, p0i, R2, Rx;
+ size_t v;
+
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ Rx = modp_Rx((unsigned)flen, p, p0i, R2);
+ modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i);
+
+ for (v = 0; v < n; v ++) {
+ t1[v] = modp_set(k[v], p);
+ }
+ modp_NTT2(t1, gm, logn, p, p0i);
+ for (v = 0, y = f, x = fk + u;
+ v < n; v ++, y += fstride, x += tlen) {
+ *x = zint_mod_small_signed(y, flen, p, p0i, R2, Rx);
+ }
+ modp_NTT2_ext(fk + u, tlen, gm, logn, p, p0i);
+ for (v = 0, x = fk + u; v < n; v ++, x += tlen) {
+ *x = modp_montymul(
+ modp_montymul(t1[v], *x, p, p0i), R2, p, p0i);
+ }
+ modp_iNTT2_ext(fk + u, tlen, igm, logn, p, p0i);
+ }
+
+ /*
+ * Rebuild k*f.
+ */
+ zint_rebuild_CRT(fk, tlen, tlen, n, primes, 1, t1);
+
+ /*
+ * Subtract k*f, scaled, from F.
+ */
+ for (u = 0, x = F, y = fk; u < n; u ++, x += Fstride, y += tlen) {
+ zint_sub_scaled(x, Flen, y, tlen, sch, scl);
+ }
+}
+
+/* ==================================================================== */
+
+
+#define RNG_CONTEXT inner_shake256_context
+
+/*
+ * Get a random 8-byte integer from a SHAKE-based RNG. This function
+ * ensures consistent interpretation of the SHAKE output so that
+ * the same values will be obtained over different platforms, in case
+ * a known seed is used.
+ */
+static inline uint64_t
+get_rng_u64(inner_shake256_context *rng) {
+ /*
+ * We enforce little-endian representation.
+ */
+
+ uint8_t tmp[8];
+
+ inner_shake256_extract(rng, tmp, sizeof tmp);
+ return (uint64_t)tmp[0]
+ | ((uint64_t)tmp[1] << 8)
+ | ((uint64_t)tmp[2] << 16)
+ | ((uint64_t)tmp[3] << 24)
+ | ((uint64_t)tmp[4] << 32)
+ | ((uint64_t)tmp[5] << 40)
+ | ((uint64_t)tmp[6] << 48)
+ | ((uint64_t)tmp[7] << 56);
+}
+
+/*
+ * Table below incarnates a discrete Gaussian distribution:
+ * D(x) = exp(-(x^2)/(2*sigma^2))
+ * where sigma = 1.17*sqrt(q/(2*N)), q = 12289, and N = 1024.
+ * Element 0 of the table is P(x = 0).
+ * For k > 0, element k is P(x >= k+1 | x > 0).
+ * Probabilities are scaled up by 2^63.
+ */
+static const uint64_t gauss_1024_12289[] = {
+ 1283868770400643928u, 6416574995475331444u, 4078260278032692663u,
+ 2353523259288686585u, 1227179971273316331u, 575931623374121527u,
+ 242543240509105209u, 91437049221049666u, 30799446349977173u,
+ 9255276791179340u, 2478152334826140u, 590642893610164u,
+ 125206034929641u, 23590435911403u, 3948334035941u,
+ 586753615614u, 77391054539u, 9056793210u,
+ 940121950u, 86539696u, 7062824u,
+ 510971u, 32764u, 1862u,
+ 94u, 4u, 0u
+};
+
+/*
+ * Generate a random value with a Gaussian distribution centered on 0.
+ * The RNG must be ready for extraction (already flipped).
+ *
+ * Distribution has standard deviation 1.17*sqrt(q/(2*N)). The
+ * precomputed table is for N = 1024. Since the sum of two independent
+ * values of standard deviation sigma has standard deviation
+ * sigma*sqrt(2), then we can just generate more values and add them
+ * together for lower dimensions.
+ */
+static int
+mkgauss(RNG_CONTEXT *rng, unsigned logn) {
+ unsigned u, g;
+ int val;
+
+ g = 1U << (10 - logn);
+ val = 0;
+ for (u = 0; u < g; u ++) {
+ /*
+ * Each iteration generates one value with the
+ * Gaussian distribution for N = 1024.
+ *
+ * We use two random 64-bit values. First value
+ * decides on whether the generated value is 0, and,
+ * if not, the sign of the value. Second random 64-bit
+ * word is used to generate the non-zero value.
+ *
+ * For constant-time code we have to read the complete
+ * table. This has negligible cost, compared with the
+ * remainder of the keygen process (solving the NTRU
+ * equation).
+ */
+ uint64_t r;
+ uint32_t f, v, k, neg;
+
+ /*
+ * First value:
+ * - flag 'neg' is randomly selected to be 0 or 1.
+ * - flag 'f' is set to 1 if the generated value is zero,
+ * or set to 0 otherwise.
+ */
+ r = get_rng_u64(rng);
+ neg = (uint32_t)(r >> 63);
+ r &= ~((uint64_t)1 << 63);
+ f = (uint32_t)((r - gauss_1024_12289[0]) >> 63);
+
+ /*
+ * We produce a new random 63-bit integer r, and go over
+ * the array, starting at index 1. We store in v the
+ * index of the first array element which is not greater
+ * than r, unless the flag f was already 1.
+ */
+ v = 0;
+ r = get_rng_u64(rng);
+ r &= ~((uint64_t)1 << 63);
+ for (k = 1; k < (uint32_t)((sizeof gauss_1024_12289)
+ / (sizeof gauss_1024_12289[0])); k ++) {
+ uint32_t t;
+
+ t = (uint32_t)((r - gauss_1024_12289[k]) >> 63) ^ 1;
+ v |= k & -(t & (f ^ 1));
+ f |= t;
+ }
+
+ /*
+ * We apply the sign ('neg' flag). If the value is zero,
+ * the sign has no effect.
+ */
+ v = (v ^ -neg) + neg;
+
+ /*
+ * Generated value is added to val.
+ */
+ val += *(int32_t *)&v;
+ }
+ return val;
+}
+
+/*
+ * The MAX_BL_SMALL[] and MAX_BL_LARGE[] contain the lengths, in 31-bit
+ * words, of intermediate values in the computation:
+ *
+ * MAX_BL_SMALL[depth]: length for the input f and g at that depth
+ * MAX_BL_LARGE[depth]: length for the unreduced F and G at that depth
+ *
+ * Rules:
+ *
+ * - Within an array, values grow.
+ *
+ * - The 'SMALL' array must have an entry for maximum depth, corresponding
+ * to the size of values used in the binary GCD. There is no such value
+ * for the 'LARGE' array (the binary GCD yields already reduced
+ * coefficients).
+ *
+ * - MAX_BL_LARGE[depth] >= MAX_BL_SMALL[depth + 1].
+ *
+ * - Values must be large enough to handle the common cases, with some
+ * margins.
+ *
+ * - Values must not be "too large" either because we will convert some
+ * integers into floating-point values by considering the top 10 words,
+ * i.e. 310 bits; hence, for values of length more than 10 words, we
+ * should take care to have the length centered on the expected size.
+ *
+ * The following average lengths, in bits, have been measured on thousands
+ * of random keys (fg = max length of the absolute value of coefficients
+ * of f and g at that depth; FG = idem for the unreduced F and G; for the
+ * maximum depth, F and G are the output of binary GCD, multiplied by q;
+ * for each value, the average and standard deviation are provided).
+ *
+ * Binary case:
+ * depth: 10 fg: 6307.52 (24.48) FG: 6319.66 (24.51)
+ * depth: 9 fg: 3138.35 (12.25) FG: 9403.29 (27.55)
+ * depth: 8 fg: 1576.87 ( 7.49) FG: 4703.30 (14.77)
+ * depth: 7 fg: 794.17 ( 4.98) FG: 2361.84 ( 9.31)
+ * depth: 6 fg: 400.67 ( 3.10) FG: 1188.68 ( 6.04)
+ * depth: 5 fg: 202.22 ( 1.87) FG: 599.81 ( 3.87)
+ * depth: 4 fg: 101.62 ( 1.02) FG: 303.49 ( 2.38)
+ * depth: 3 fg: 50.37 ( 0.53) FG: 153.65 ( 1.39)
+ * depth: 2 fg: 24.07 ( 0.25) FG: 78.20 ( 0.73)
+ * depth: 1 fg: 10.99 ( 0.08) FG: 39.82 ( 0.41)
+ * depth: 0 fg: 4.00 ( 0.00) FG: 19.61 ( 0.49)
+ *
+ * Integers are actually represented either in binary notation over
+ * 31-bit words (signed, using two's complement), or in RNS, modulo
+ * many small primes. These small primes are close to, but slightly
+ * lower than, 2^31. Use of RNS loses less than two bits, even for
+ * the largest values.
+ *
+ * IMPORTANT: if these values are modified, then the temporary buffer
+ * sizes (FALCON_KEYGEN_TEMP_*, in inner.h) must be recomputed
+ * accordingly.
+ */
+
+static const size_t MAX_BL_SMALL[] = {
+ 1, 1, 2, 2, 4, 7, 14, 27, 53, 106, 209
+};
+
+static const size_t MAX_BL_LARGE[] = {
+ 2, 2, 5, 7, 12, 21, 40, 78, 157, 308
+};
+
+/*
+ * Average and standard deviation for the maximum size (in bits) of
+ * coefficients of (f,g), depending on depth. These values are used
+ * to compute bounds for Babai's reduction.
+ */
+static const struct {
+ int avg;
+ int std;
+} BITLENGTH[] = {
+ { 4, 0 },
+ { 11, 1 },
+ { 24, 1 },
+ { 50, 1 },
+ { 102, 1 },
+ { 202, 2 },
+ { 401, 4 },
+ { 794, 5 },
+ { 1577, 8 },
+ { 3138, 13 },
+ { 6308, 25 }
+};
+
+/*
+ * Minimal recursion depth at which we rebuild intermediate values
+ * when reconstructing f and g.
+ */
+#define DEPTH_INT_FG 4
+
+/*
+ * Compute squared norm of a short vector. Returned value is saturated to
+ * 2^32-1 if it is not lower than 2^31.
+ */
+static uint32_t
+poly_small_sqnorm(const int8_t *f, unsigned logn) {
+ size_t n, u;
+ uint32_t s, ng;
+
+ n = MKN(logn);
+ s = 0;
+ ng = 0;
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = f[u];
+ s += (uint32_t)(z * z);
+ ng |= s;
+ }
+ return s | -(ng >> 31);
+}
+
+/*
+ * Align (upwards) the provided 'data' pointer with regards to 'base'
+ * so that the offset is a multiple of the size of 'fpr'.
+ */
+static fpr *
+align_fpr(void *base, void *data) {
+ uint8_t *cb, *cd;
+ size_t k, km;
+
+ cb = base;
+ cd = data;
+ k = (size_t)(cd - cb);
+ km = k % sizeof(fpr);
+ if (km) {
+ k += (sizeof(fpr)) - km;
+ }
+ return (fpr *)(cb + k);
+}
+
+/*
+ * Align (upwards) the provided 'data' pointer with regards to 'base'
+ * so that the offset is a multiple of the size of 'uint32_t'.
+ */
+static uint32_t *
+align_u32(void *base, void *data) {
+ uint8_t *cb, *cd;
+ size_t k, km;
+
+ cb = base;
+ cd = data;
+ k = (size_t)(cd - cb);
+ km = k % sizeof(uint32_t);
+ if (km) {
+ k += (sizeof(uint32_t)) - km;
+ }
+ return (uint32_t *)(cb + k);
+}
+
+/*
+ * Convert a small vector to floating point.
+ */
+static void
+poly_small_to_fp(fpr *x, const int8_t *f, unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ for (u = 0; u < n; u ++) {
+ x[u] = fpr_of(f[u]);
+ }
+}
+
+/*
+ * Input: f,g of degree N = 2^logn; 'depth' is used only to get their
+ * individual length.
+ *
+ * Output: f',g' of degree N/2, with the length for 'depth+1'.
+ *
+ * Values are in RNS; input and/or output may also be in NTT.
+ */
+static void
+make_fg_step(uint32_t *data, unsigned logn, unsigned depth,
+ int in_ntt, int out_ntt) {
+ size_t n, hn, u;
+ size_t slen, tlen;
+ uint32_t *fd, *gd, *fs, *gs, *gm, *igm, *t1;
+ const small_prime *primes;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ slen = MAX_BL_SMALL[depth];
+ tlen = MAX_BL_SMALL[depth + 1];
+ primes = PRIMES;
+
+ /*
+ * Prepare room for the result.
+ */
+ fd = data;
+ gd = fd + hn * tlen;
+ fs = gd + hn * tlen;
+ gs = fs + n * slen;
+ gm = gs + n * slen;
+ igm = gm + n;
+ t1 = igm + n;
+ memmove(fs, data, 2 * n * slen * sizeof * data);
+
+ /*
+ * First slen words: we use the input values directly, and apply
+ * inverse NTT as we go.
+ */
+ for (u = 0; u < slen; u ++) {
+ uint32_t p, p0i, R2;
+ size_t v;
+ uint32_t *x;
+
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i);
+
+ for (v = 0, x = fs + u; v < n; v ++, x += slen) {
+ t1[v] = *x;
+ }
+ if (!in_ntt) {
+ modp_NTT2(t1, gm, logn, p, p0i);
+ }
+ for (v = 0, x = fd + u; v < hn; v ++, x += tlen) {
+ uint32_t w0, w1;
+
+ w0 = t1[(v << 1) + 0];
+ w1 = t1[(v << 1) + 1];
+ *x = modp_montymul(
+ modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+ if (in_ntt) {
+ modp_iNTT2_ext(fs + u, slen, igm, logn, p, p0i);
+ }
+
+ for (v = 0, x = gs + u; v < n; v ++, x += slen) {
+ t1[v] = *x;
+ }
+ if (!in_ntt) {
+ modp_NTT2(t1, gm, logn, p, p0i);
+ }
+ for (v = 0, x = gd + u; v < hn; v ++, x += tlen) {
+ uint32_t w0, w1;
+
+ w0 = t1[(v << 1) + 0];
+ w1 = t1[(v << 1) + 1];
+ *x = modp_montymul(
+ modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+ if (in_ntt) {
+ modp_iNTT2_ext(gs + u, slen, igm, logn, p, p0i);
+ }
+
+ if (!out_ntt) {
+ modp_iNTT2_ext(fd + u, tlen, igm, logn - 1, p, p0i);
+ modp_iNTT2_ext(gd + u, tlen, igm, logn - 1, p, p0i);
+ }
+ }
+
+ /*
+ * Since the fs and gs words have been de-NTTized, we can use the
+ * CRT to rebuild the values.
+ */
+ zint_rebuild_CRT(fs, slen, slen, n, primes, 1, gm);
+ zint_rebuild_CRT(gs, slen, slen, n, primes, 1, gm);
+
+ /*
+ * Remaining words: use modular reductions to extract the values.
+ */
+ for (u = slen; u < tlen; u ++) {
+ uint32_t p, p0i, R2, Rx;
+ size_t v;
+ uint32_t *x;
+
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ Rx = modp_Rx((unsigned)slen, p, p0i, R2);
+ modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i);
+ for (v = 0, x = fs; v < n; v ++, x += slen) {
+ t1[v] = zint_mod_small_signed(x, slen, p, p0i, R2, Rx);
+ }
+ modp_NTT2(t1, gm, logn, p, p0i);
+ for (v = 0, x = fd + u; v < hn; v ++, x += tlen) {
+ uint32_t w0, w1;
+
+ w0 = t1[(v << 1) + 0];
+ w1 = t1[(v << 1) + 1];
+ *x = modp_montymul(
+ modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+ for (v = 0, x = gs; v < n; v ++, x += slen) {
+ t1[v] = zint_mod_small_signed(x, slen, p, p0i, R2, Rx);
+ }
+ modp_NTT2(t1, gm, logn, p, p0i);
+ for (v = 0, x = gd + u; v < hn; v ++, x += tlen) {
+ uint32_t w0, w1;
+
+ w0 = t1[(v << 1) + 0];
+ w1 = t1[(v << 1) + 1];
+ *x = modp_montymul(
+ modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+
+ if (!out_ntt) {
+ modp_iNTT2_ext(fd + u, tlen, igm, logn - 1, p, p0i);
+ modp_iNTT2_ext(gd + u, tlen, igm, logn - 1, p, p0i);
+ }
+ }
+}
+
+/*
+ * Compute f and g at a specific depth, in RNS notation.
+ *
+ * Returned values are stored in the data[] array, at slen words per integer.
+ *
+ * Conditions:
+ * 0 <= depth <= logn
+ *
+ * Space use in data[]: enough room for any two successive values (f', g',
+ * f and g).
+ */
+static void
+make_fg(uint32_t *data, const int8_t *f, const int8_t *g,
+ unsigned logn, unsigned depth, int out_ntt) {
+ size_t n, u;
+ uint32_t *ft, *gt, p0;
+ unsigned d;
+ const small_prime *primes;
+
+ n = MKN(logn);
+ ft = data;
+ gt = ft + n;
+ primes = PRIMES;
+ p0 = primes[0].p;
+ for (u = 0; u < n; u ++) {
+ ft[u] = modp_set(f[u], p0);
+ gt[u] = modp_set(g[u], p0);
+ }
+
+ if (depth == 0 && out_ntt) {
+ uint32_t *gm, *igm;
+ uint32_t p, p0i;
+
+ p = primes[0].p;
+ p0i = modp_ninv31(p);
+ gm = gt + n;
+ igm = gm + MKN(logn);
+ modp_mkgm2(gm, igm, logn, primes[0].g, p, p0i);
+ modp_NTT2(ft, gm, logn, p, p0i);
+ modp_NTT2(gt, gm, logn, p, p0i);
+ return;
+ }
+
+ if (depth == 0) {
+ return;
+ }
+ if (depth == 1) {
+ make_fg_step(data, logn, 0, 0, out_ntt);
+ return;
+ }
+ make_fg_step(data, logn, 0, 0, 1);
+ for (d = 1; d + 1 < depth; d ++) {
+ make_fg_step(data, logn - d, d, 1, 1);
+ }
+ make_fg_step(data, logn - depth + 1, depth - 1, 1, out_ntt);
+}
+
+/*
+ * Solving the NTRU equation, deepest level: compute the resultants of
+ * f and g with X^N+1, and use binary GCD. The F and G values are
+ * returned in tmp[].
+ *
+ * Returned value: 1 on success, 0 on error.
+ */
+static int
+solve_NTRU_deepest(unsigned logn_top,
+ const int8_t *f, const int8_t *g, uint32_t *tmp) {
+ size_t len;
+ uint32_t *Fp, *Gp, *fp, *gp, *t1, q;
+ const small_prime *primes;
+
+ len = MAX_BL_SMALL[logn_top];
+ primes = PRIMES;
+
+ Fp = tmp;
+ Gp = Fp + len;
+ fp = Gp + len;
+ gp = fp + len;
+ t1 = gp + len;
+
+ make_fg(fp, f, g, logn_top, logn_top, 0);
+
+ /*
+ * We use the CRT to rebuild the resultants as big integers.
+ * There are two such big integers. The resultants are always
+ * nonnegative.
+ */
+ zint_rebuild_CRT(fp, len, len, 2, primes, 0, t1);
+
+ /*
+ * Apply the binary GCD. The zint_bezout() function works only
+ * if both inputs are odd.
+ *
+ * We can test on the result and return 0 because that would
+ * imply failure of the NTRU solving equation, and the (f,g)
+ * values will be abandoned in that case.
+ */
+ if (!zint_bezout(Gp, Fp, fp, gp, len, t1)) {
+ return 0;
+ }
+
+ /*
+ * Multiply the two values by the target value q. Values must
+ * fit in the destination arrays.
+ * We can again test on the returned words: a non-zero output
+ * of zint_mul_small() means that we exceeded our array
+ * capacity, and that implies failure and rejection of (f,g).
+ */
+ q = 12289;
+ if (zint_mul_small(Fp, len, q) != 0
+ || zint_mul_small(Gp, len, q) != 0) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Solving the NTRU equation, intermediate level. Upon entry, the F and G
+ * from the previous level should be in the tmp[] array.
+ * This function MAY be invoked for the top-level (in which case depth = 0).
+ *
+ * Returned value: 1 on success, 0 on error.
+ */
+static int
+solve_NTRU_intermediate(unsigned logn_top,
+ const int8_t *f, const int8_t *g, unsigned depth, uint32_t *tmp) {
+ /*
+ * In this function, 'logn' is the log2 of the degree for
+ * this step. If N = 2^logn, then:
+ * - the F and G values already in fk->tmp (from the deeper
+ * levels) have degree N/2;
+ * - this function should return F and G of degree N.
+ */
+ unsigned logn;
+ size_t n, hn, slen, dlen, llen, rlen, FGlen, u;
+ uint32_t *Fd, *Gd, *Ft, *Gt, *ft, *gt, *t1;
+ fpr *rt1, *rt2, *rt3, *rt4, *rt5;
+ int scale_fg, minbl_fg, maxbl_fg, maxbl_FG, scale_k;
+ uint32_t *x, *y;
+ int32_t *k;
+ const small_prime *primes;
+
+ logn = logn_top - depth;
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+
+ /*
+ * slen = size for our input f and g; also size of the reduced
+ * F and G we return (degree N)
+ *
+ * dlen = size of the F and G obtained from the deeper level
+ * (degree N/2 or N/3)
+ *
+ * llen = size for intermediary F and G before reduction (degree N)
+ *
+ * We build our non-reduced F and G as two independent halves each,
+ * of degree N/2 (F = F0 + X*F1, G = G0 + X*G1).
+ */
+ slen = MAX_BL_SMALL[depth];
+ dlen = MAX_BL_SMALL[depth + 1];
+ llen = MAX_BL_LARGE[depth];
+ primes = PRIMES;
+
+ /*
+ * Fd and Gd are the F and G from the deeper level.
+ */
+ Fd = tmp;
+ Gd = Fd + dlen * hn;
+
+ /*
+ * Compute the input f and g for this level. Note that we get f
+ * and g in RNS + NTT representation.
+ */
+ ft = Gd + dlen * hn;
+ make_fg(ft, f, g, logn_top, depth, 1);
+
+ /*
+ * Move the newly computed f and g to make room for our candidate
+ * F and G (unreduced).
+ */
+ Ft = tmp;
+ Gt = Ft + n * llen;
+ t1 = Gt + n * llen;
+ memmove(t1, ft, 2 * n * slen * sizeof * ft);
+ ft = t1;
+ gt = ft + slen * n;
+ t1 = gt + slen * n;
+
+ /*
+ * Move Fd and Gd _after_ f and g.
+ */
+ memmove(t1, Fd, 2 * hn * dlen * sizeof * Fd);
+ Fd = t1;
+ Gd = Fd + hn * dlen;
+
+ /*
+ * We reduce Fd and Gd modulo all the small primes we will need,
+ * and store the values in Ft and Gt (only n/2 values in each).
+ */
+ for (u = 0; u < llen; u ++) {
+ uint32_t p, p0i, R2, Rx;
+ size_t v;
+ uint32_t *xs, *ys, *xd, *yd;
+
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ Rx = modp_Rx((unsigned)dlen, p, p0i, R2);
+ for (v = 0, xs = Fd, ys = Gd, xd = Ft + u, yd = Gt + u;
+ v < hn;
+ v ++, xs += dlen, ys += dlen, xd += llen, yd += llen) {
+ *xd = zint_mod_small_signed(xs, dlen, p, p0i, R2, Rx);
+ *yd = zint_mod_small_signed(ys, dlen, p, p0i, R2, Rx);
+ }
+ }
+
+ /*
+ * We do not need Fd and Gd after that point.
+ */
+
+ /*
+ * Compute our F and G modulo sufficiently many small primes.
+ */
+ for (u = 0; u < llen; u ++) {
+ uint32_t p, p0i, R2;
+ uint32_t *gm, *igm, *fx, *gx, *Fp, *Gp;
+ size_t v;
+
+ /*
+ * All computations are done modulo p.
+ */
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+
+ /*
+ * If we processed slen words, then f and g have been
+ * de-NTTized, and are in RNS; we can rebuild them.
+ */
+ if (u == slen) {
+ zint_rebuild_CRT(ft, slen, slen, n, primes, 1, t1);
+ zint_rebuild_CRT(gt, slen, slen, n, primes, 1, t1);
+ }
+
+ gm = t1;
+ igm = gm + n;
+ fx = igm + n;
+ gx = fx + n;
+
+ modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i);
+
+ if (u < slen) {
+ for (v = 0, x = ft + u, y = gt + u;
+ v < n; v ++, x += slen, y += slen) {
+ fx[v] = *x;
+ gx[v] = *y;
+ }
+ modp_iNTT2_ext(ft + u, slen, igm, logn, p, p0i);
+ modp_iNTT2_ext(gt + u, slen, igm, logn, p, p0i);
+ } else {
+ uint32_t Rx;
+
+ Rx = modp_Rx((unsigned)slen, p, p0i, R2);
+ for (v = 0, x = ft, y = gt;
+ v < n; v ++, x += slen, y += slen) {
+ fx[v] = zint_mod_small_signed(x, slen,
+ p, p0i, R2, Rx);
+ gx[v] = zint_mod_small_signed(y, slen,
+ p, p0i, R2, Rx);
+ }
+ modp_NTT2(fx, gm, logn, p, p0i);
+ modp_NTT2(gx, gm, logn, p, p0i);
+ }
+
+ /*
+ * Get F' and G' modulo p and in NTT representation
+ * (they have degree n/2). These values were computed in
+ * a previous step, and stored in Ft and Gt.
+ */
+ Fp = gx + n;
+ Gp = Fp + hn;
+ for (v = 0, x = Ft + u, y = Gt + u;
+ v < hn; v ++, x += llen, y += llen) {
+ Fp[v] = *x;
+ Gp[v] = *y;
+ }
+ modp_NTT2(Fp, gm, logn - 1, p, p0i);
+ modp_NTT2(Gp, gm, logn - 1, p, p0i);
+
+ /*
+ * Compute our F and G modulo p.
+ *
+ * General case:
+ *
+ * we divide degree by d = 2 or 3
+ * f'(x^d) = N(f)(x^d) = f * adj(f)
+ * g'(x^d) = N(g)(x^d) = g * adj(g)
+ * f'*G' - g'*F' = q
+ * F = F'(x^d) * adj(g)
+ * G = G'(x^d) * adj(f)
+ *
+ * We compute things in the NTT. We group roots of phi
+ * such that all roots x in a group share the same x^d.
+ * If the roots in a group are x_1, x_2... x_d, then:
+ *
+ * N(f)(x_1^d) = f(x_1)*f(x_2)*...*f(x_d)
+ *
+ * Thus, we have:
+ *
+ * G(x_1) = f(x_2)*f(x_3)*...*f(x_d)*G'(x_1^d)
+ * G(x_2) = f(x_1)*f(x_3)*...*f(x_d)*G'(x_1^d)
+ * ...
+ * G(x_d) = f(x_1)*f(x_2)*...*f(x_{d-1})*G'(x_1^d)
+ *
+ * In all cases, we can thus compute F and G in NTT
+ * representation by a few simple multiplications.
+ * Moreover, in our chosen NTT representation, roots
+ * from the same group are consecutive in RAM.
+ */
+ for (v = 0, x = Ft + u, y = Gt + u; v < hn;
+ v ++, x += (llen << 1), y += (llen << 1)) {
+ uint32_t ftA, ftB, gtA, gtB;
+ uint32_t mFp, mGp;
+
+ ftA = fx[(v << 1) + 0];
+ ftB = fx[(v << 1) + 1];
+ gtA = gx[(v << 1) + 0];
+ gtB = gx[(v << 1) + 1];
+ mFp = modp_montymul(Fp[v], R2, p, p0i);
+ mGp = modp_montymul(Gp[v], R2, p, p0i);
+ x[0] = modp_montymul(gtB, mFp, p, p0i);
+ x[llen] = modp_montymul(gtA, mFp, p, p0i);
+ y[0] = modp_montymul(ftB, mGp, p, p0i);
+ y[llen] = modp_montymul(ftA, mGp, p, p0i);
+ }
+ modp_iNTT2_ext(Ft + u, llen, igm, logn, p, p0i);
+ modp_iNTT2_ext(Gt + u, llen, igm, logn, p, p0i);
+ }
+
+ /*
+ * Rebuild F and G with the CRT.
+ */
+ zint_rebuild_CRT(Ft, llen, llen, n, primes, 1, t1);
+ zint_rebuild_CRT(Gt, llen, llen, n, primes, 1, t1);
+
+ /*
+ * At that point, Ft, Gt, ft and gt are consecutive in RAM (in that
+ * order).
+ */
+
+ /*
+ * Apply Babai reduction to bring back F and G to size slen.
+ *
+ * We use the FFT to compute successive approximations of the
+ * reduction coefficient. We first isolate the top bits of
+ * the coefficients of f and g, and convert them to floating
+ * point; with the FFT, we compute adj(f), adj(g), and
+ * 1/(f*adj(f)+g*adj(g)).
+ *
+ * Then, we repeatedly apply the following:
+ *
+ * - Get the top bits of the coefficients of F and G into
+ * floating point, and use the FFT to compute:
+ * (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g))
+ *
+ * - Convert back that value into normal representation, and
+ * round it to the nearest integers, yielding a polynomial k.
+ * Proper scaling is applied to f, g, F and G so that the
+ * coefficients fit on 32 bits (signed).
+ *
+ * - Subtract k*f from F and k*g from G.
+ *
+ * Under normal conditions, this process reduces the size of F
+ * and G by some bits at each iteration. For constant-time
+ * operation, we do not want to measure the actual length of
+ * F and G; instead, we do the following:
+ *
+ * - f and g are converted to floating-point, with some scaling
+ * if necessary to keep values in the representable range.
+ *
+ * - For each iteration, we _assume_ a maximum size for F and G,
+ * and use the values at that size. If we overreach, then
+ * we get zeros, which is harmless: the resulting coefficients
+ * of k will be 0 and the value won't be reduced.
+ *
+ * - We conservatively assume that F and G will be reduced by
+ * at least 25 bits at each iteration.
+ *
+ * Even when reaching the bottom of the reduction, reduction
+ * coefficient will remain low. If it goes out-of-range, then
+ * something wrong occurred and the whole NTRU solving fails.
+ */
+
+ /*
+ * Memory layout:
+ * - We need to compute and keep adj(f), adj(g), and
+ * 1/(f*adj(f)+g*adj(g)) (sizes N, N and N/2 fp numbers,
+ * respectively).
+ * - At each iteration we need two extra fp buffer (N fp values),
+ * and produce a k (N 32-bit words). k will be shared with one
+ * of the fp buffers.
+ * - To compute k*f and k*g efficiently (with the NTT), we need
+ * some extra room; we reuse the space of the temporary buffers.
+ *
+ * Arrays of 'fpr' are obtained from the temporary array itself.
+ * We ensure that the base is at a properly aligned offset (the
+ * source array tmp[] is supposed to be already aligned).
+ */
+
+ rt3 = align_fpr(tmp, t1);
+ rt4 = rt3 + n;
+ rt5 = rt4 + n;
+ rt1 = rt5 + (n >> 1);
+ k = (int32_t *)align_u32(tmp, rt1);
+ rt2 = align_fpr(tmp, k + n);
+ if (rt2 < (rt1 + n)) {
+ rt2 = rt1 + n;
+ }
+ t1 = (uint32_t *)k + n;
+
+ /*
+ * Get f and g into rt3 and rt4 as floating-point approximations.
+ *
+ * We need to "scale down" the floating-point representation of
+ * coefficients when they are too big. We want to keep the value
+ * below 2^310 or so. Thus, when values are larger than 10 words,
+ * we consider only the top 10 words. Array lengths have been
+ * computed so that average maximum length will fall in the
+ * middle or the upper half of these top 10 words.
+ */
+ rlen = slen;
+ if (rlen > 10) {
+ rlen = 10;
+ }
+ poly_big_to_fp(rt3, ft + slen - rlen, rlen, slen, logn);
+ poly_big_to_fp(rt4, gt + slen - rlen, rlen, slen, logn);
+
+ /*
+ * Values in rt3 and rt4 are downscaled by 2^(scale_fg).
+ */
+ scale_fg = 31 * (int)(slen - rlen);
+
+ /*
+ * Estimated boundaries for the maximum size (in bits) of the
+ * coefficients of (f,g). We use the measured average, and
+ * allow for a deviation of at most six times the standard
+ * deviation.
+ */
+ minbl_fg = BITLENGTH[depth].avg - 6 * BITLENGTH[depth].std;
+ maxbl_fg = BITLENGTH[depth].avg + 6 * BITLENGTH[depth].std;
+
+ /*
+ * Compute 1/(f*adj(f)+g*adj(g)) in rt5. We also keep adj(f)
+ * and adj(g) in rt3 and rt4, respectively.
+ */
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt3, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt4, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_invnorm2_fft(rt5, rt3, rt4, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_adj_fft(rt3, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_adj_fft(rt4, logn);
+
+ /*
+ * Reduce F and G repeatedly.
+ *
+ * The expected maximum bit length of coefficients of F and G
+ * is kept in maxbl_FG, with the corresponding word length in
+ * FGlen.
+ */
+ FGlen = llen;
+ maxbl_FG = 31 * (int)llen;
+
+ /*
+ * Each reduction operation computes the reduction polynomial
+ * "k". We need that polynomial to have coefficients that fit
+ * on 32-bit signed integers, with some scaling; thus, we use
+ * a descending sequence of scaling values, down to zero.
+ *
+ * The size of the coefficients of k is (roughly) the difference
+ * between the size of the coefficients of (F,G) and the size
+ * of the coefficients of (f,g). Thus, the maximum size of the
+ * coefficients of k is, at the start, maxbl_FG - minbl_fg;
+ * this is our starting scale value for k.
+ *
+ * We need to estimate the size of (F,G) during the execution of
+ * the algorithm; we are allowed some overestimation but not too
+ * much (poly_big_to_fp() uses a 310-bit window). Generally
+ * speaking, after applying a reduction with k scaled to
+ * scale_k, the size of (F,G) will be size(f,g) + scale_k + dd,
+ * where 'dd' is a few bits to account for the fact that the
+ * reduction is never perfect (intuitively, dd is on the order
+ * of sqrt(N), so at most 5 bits; we here allow for 10 extra
+ * bits).
+ *
+ * The size of (f,g) is not known exactly, but maxbl_fg is an
+ * upper bound.
+ */
+ scale_k = maxbl_FG - minbl_fg;
+
+ for (;;) {
+ int scale_FG, dc, new_maxbl_FG;
+ uint32_t scl, sch;
+ fpr pdc, pt;
+
+ /*
+ * Convert current F and G into floating-point. We apply
+ * scaling if the current length is more than 10 words.
+ */
+ rlen = FGlen;
+ if (rlen > 10) {
+ rlen = 10;
+ }
+ scale_FG = 31 * (int)(FGlen - rlen);
+ poly_big_to_fp(rt1, Ft + FGlen - rlen, rlen, llen, logn);
+ poly_big_to_fp(rt2, Gt + FGlen - rlen, rlen, llen, logn);
+
+ /*
+ * Compute (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) in rt2.
+ */
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt1, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt2, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(rt1, rt3, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(rt2, rt4, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(rt2, rt1, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_autoadj_fft(rt2, rt5, logn);
+ PQCLEAN_FALCON1024_CLEAN_iFFT(rt2, logn);
+
+ /*
+ * (f,g) are scaled by 'scale_fg', meaning that the
+ * numbers in rt3/rt4 should be multiplied by 2^(scale_fg)
+ * to have their true mathematical value.
+ *
+ * (F,G) are similarly scaled by 'scale_FG'. Therefore,
+ * the value we computed in rt2 is scaled by
+ * 'scale_FG-scale_fg'.
+ *
+ * We want that value to be scaled by 'scale_k', hence we
+ * apply a corrective scaling. After scaling, the values
+ * should fit in -2^31-1..+2^31-1.
+ */
+ dc = scale_k - scale_FG + scale_fg;
+
+ /*
+ * We will need to multiply values by 2^(-dc). The value
+ * 'dc' is not secret, so we can compute 2^(-dc) with a
+ * non-constant-time process.
+ * (We could use ldexp(), but we prefer to avoid any
+ * dependency on libm. When using FP emulation, we could
+ * use our fpr_ldexp(), which is constant-time.)
+ */
+ if (dc < 0) {
+ dc = -dc;
+ pt = fpr_two;
+ } else {
+ pt = fpr_onehalf;
+ }
+ pdc = fpr_one;
+ while (dc != 0) {
+ if ((dc & 1) != 0) {
+ pdc = fpr_mul(pdc, pt);
+ }
+ dc >>= 1;
+ pt = fpr_sqr(pt);
+ }
+
+ for (u = 0; u < n; u ++) {
+ fpr xv;
+
+ xv = fpr_mul(rt2[u], pdc);
+
+ /*
+ * Sometimes the values can be out-of-bounds if
+ * the algorithm fails; we must not call
+ * fpr_rint() (and cast to int32_t) if the value
+ * is not in-bounds. Note that the test does not
+ * break constant-time discipline, since any
+ * failure here implies that we discard the current
+ * secret key (f,g).
+ */
+ if (!fpr_lt(fpr_mtwo31m1, xv)
+ || !fpr_lt(xv, fpr_ptwo31m1)) {
+ return 0;
+ }
+ k[u] = (int32_t)fpr_rint(xv);
+ }
+
+ /*
+ * Values in k[] are integers. They really are scaled
+ * down by maxbl_FG - minbl_fg bits.
+ *
+ * If we are at low depth, then we use the NTT to
+ * compute k*f and k*g.
+ */
+ sch = (uint32_t)(scale_k / 31);
+ scl = (uint32_t)(scale_k % 31);
+ if (depth <= DEPTH_INT_FG) {
+ poly_sub_scaled_ntt(Ft, FGlen, llen, ft, slen, slen,
+ k, sch, scl, logn, t1);
+ poly_sub_scaled_ntt(Gt, FGlen, llen, gt, slen, slen,
+ k, sch, scl, logn, t1);
+ } else {
+ poly_sub_scaled(Ft, FGlen, llen, ft, slen, slen,
+ k, sch, scl, logn);
+ poly_sub_scaled(Gt, FGlen, llen, gt, slen, slen,
+ k, sch, scl, logn);
+ }
+
+ /*
+ * We compute the new maximum size of (F,G), assuming that
+ * (f,g) has _maximal_ length (i.e. that reduction is
+ * "late" instead of "early". We also adjust FGlen
+ * accordingly.
+ */
+ new_maxbl_FG = scale_k + maxbl_fg + 10;
+ if (new_maxbl_FG < maxbl_FG) {
+ maxbl_FG = new_maxbl_FG;
+ if ((int)FGlen * 31 >= maxbl_FG + 31) {
+ FGlen --;
+ }
+ }
+
+ /*
+ * We suppose that scaling down achieves a reduction by
+ * at least 25 bits per iteration. We stop when we have
+ * done the loop with an unscaled k.
+ */
+ if (scale_k <= 0) {
+ break;
+ }
+ scale_k -= 25;
+ if (scale_k < 0) {
+ scale_k = 0;
+ }
+ }
+
+ /*
+ * If (F,G) length was lowered below 'slen', then we must take
+ * care to re-extend the sign.
+ */
+ if (FGlen < slen) {
+ for (u = 0; u < n; u ++, Ft += llen, Gt += llen) {
+ size_t v;
+ uint32_t sw;
+
+ sw = -(Ft[FGlen - 1] >> 30) >> 1;
+ for (v = FGlen; v < slen; v ++) {
+ Ft[v] = sw;
+ }
+ sw = -(Gt[FGlen - 1] >> 30) >> 1;
+ for (v = FGlen; v < slen; v ++) {
+ Gt[v] = sw;
+ }
+ }
+ }
+
+ /*
+ * Compress encoding of all values to 'slen' words (this is the
+ * expected output format).
+ */
+ for (u = 0, x = tmp, y = tmp;
+ u < (n << 1); u ++, x += slen, y += llen) {
+ memmove(x, y, slen * sizeof * y);
+ }
+ return 1;
+}
+
+/*
+ * Solving the NTRU equation, binary case, depth = 1. Upon entry, the
+ * F and G from the previous level should be in the tmp[] array.
+ *
+ * Returned value: 1 on success, 0 on error.
+ */
+static int
+solve_NTRU_binary_depth1(unsigned logn_top,
+ const int8_t *f, const int8_t *g, uint32_t *tmp) {
+ /*
+ * The first half of this function is a copy of the corresponding
+ * part in solve_NTRU_intermediate(), for the reconstruction of
+ * the unreduced F and G. The second half (Babai reduction) is
+ * done differently, because the unreduced F and G fit in 53 bits
+ * of precision, allowing a much simpler process with lower RAM
+ * usage.
+ */
+ unsigned depth, logn;
+ size_t n_top, n, hn, slen, dlen, llen, u;
+ uint32_t *Fd, *Gd, *Ft, *Gt, *ft, *gt, *t1;
+ fpr *rt1, *rt2, *rt3, *rt4, *rt5, *rt6;
+ uint32_t *x, *y;
+
+ depth = 1;
+ n_top = (size_t)1 << logn_top;
+ logn = logn_top - depth;
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+
+ /*
+ * Equations are:
+ *
+ * f' = f0^2 - X^2*f1^2
+ * g' = g0^2 - X^2*g1^2
+ * F' and G' are a solution to f'G' - g'F' = q (from deeper levels)
+ * F = F'*(g0 - X*g1)
+ * G = G'*(f0 - X*f1)
+ *
+ * f0, f1, g0, g1, f', g', F' and G' are all "compressed" to
+ * degree N/2 (their odd-indexed coefficients are all zero).
+ */
+
+ /*
+ * slen = size for our input f and g; also size of the reduced
+ * F and G we return (degree N)
+ *
+ * dlen = size of the F and G obtained from the deeper level
+ * (degree N/2)
+ *
+ * llen = size for intermediary F and G before reduction (degree N)
+ *
+ * We build our non-reduced F and G as two independent halves each,
+ * of degree N/2 (F = F0 + X*F1, G = G0 + X*G1).
+ */
+ slen = MAX_BL_SMALL[depth];
+ dlen = MAX_BL_SMALL[depth + 1];
+ llen = MAX_BL_LARGE[depth];
+
+ /*
+ * Fd and Gd are the F and G from the deeper level. Ft and Gt
+ * are the destination arrays for the unreduced F and G.
+ */
+ Fd = tmp;
+ Gd = Fd + dlen * hn;
+ Ft = Gd + dlen * hn;
+ Gt = Ft + llen * n;
+
+ /*
+ * We reduce Fd and Gd modulo all the small primes we will need,
+ * and store the values in Ft and Gt.
+ */
+ for (u = 0; u < llen; u ++) {
+ uint32_t p, p0i, R2, Rx;
+ size_t v;
+ uint32_t *xs, *ys, *xd, *yd;
+
+ p = PRIMES[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ Rx = modp_Rx((unsigned)dlen, p, p0i, R2);
+ for (v = 0, xs = Fd, ys = Gd, xd = Ft + u, yd = Gt + u;
+ v < hn;
+ v ++, xs += dlen, ys += dlen, xd += llen, yd += llen) {
+ *xd = zint_mod_small_signed(xs, dlen, p, p0i, R2, Rx);
+ *yd = zint_mod_small_signed(ys, dlen, p, p0i, R2, Rx);
+ }
+ }
+
+ /*
+ * Now Fd and Gd are not needed anymore; we can squeeze them out.
+ */
+ memmove(tmp, Ft, llen * n * sizeof(uint32_t));
+ Ft = tmp;
+ memmove(Ft + llen * n, Gt, llen * n * sizeof(uint32_t));
+ Gt = Ft + llen * n;
+ ft = Gt + llen * n;
+ gt = ft + slen * n;
+
+ t1 = gt + slen * n;
+
+ /*
+ * Compute our F and G modulo sufficiently many small primes.
+ */
+ for (u = 0; u < llen; u ++) {
+ uint32_t p, p0i, R2;
+ uint32_t *gm, *igm, *fx, *gx, *Fp, *Gp;
+ unsigned e;
+ size_t v;
+
+ /*
+ * All computations are done modulo p.
+ */
+ p = PRIMES[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+
+ /*
+ * We recompute things from the source f and g, of full
+ * degree. However, we will need only the n first elements
+ * of the inverse NTT table (igm); the call to modp_mkgm()
+ * below will fill n_top elements in igm[] (thus overflowing
+ * into fx[]) but later code will overwrite these extra
+ * elements.
+ */
+ gm = t1;
+ igm = gm + n_top;
+ fx = igm + n;
+ gx = fx + n_top;
+ modp_mkgm2(gm, igm, logn_top, PRIMES[u].g, p, p0i);
+
+ /*
+ * Set ft and gt to f and g modulo p, respectively.
+ */
+ for (v = 0; v < n_top; v ++) {
+ fx[v] = modp_set(f[v], p);
+ gx[v] = modp_set(g[v], p);
+ }
+
+ /*
+ * Convert to NTT and compute our f and g.
+ */
+ modp_NTT2(fx, gm, logn_top, p, p0i);
+ modp_NTT2(gx, gm, logn_top, p, p0i);
+ for (e = logn_top; e > logn; e --) {
+ modp_poly_rec_res(fx, e, p, p0i, R2);
+ modp_poly_rec_res(gx, e, p, p0i, R2);
+ }
+
+ /*
+ * From that point onward, we only need tables for
+ * degree n, so we can save some space.
+ */
+ if (depth > 0) { /* always true */
+ memmove(gm + n, igm, n * sizeof * igm);
+ igm = gm + n;
+ memmove(igm + n, fx, n * sizeof * ft);
+ fx = igm + n;
+ memmove(fx + n, gx, n * sizeof * gt);
+ gx = fx + n;
+ }
+
+ /*
+ * Get F' and G' modulo p and in NTT representation
+ * (they have degree n/2). These values were computed
+ * in a previous step, and stored in Ft and Gt.
+ */
+ Fp = gx + n;
+ Gp = Fp + hn;
+ for (v = 0, x = Ft + u, y = Gt + u;
+ v < hn; v ++, x += llen, y += llen) {
+ Fp[v] = *x;
+ Gp[v] = *y;
+ }
+ modp_NTT2(Fp, gm, logn - 1, p, p0i);
+ modp_NTT2(Gp, gm, logn - 1, p, p0i);
+
+ /*
+ * Compute our F and G modulo p.
+ *
+ * Equations are:
+ *
+ * f'(x^2) = N(f)(x^2) = f * adj(f)
+ * g'(x^2) = N(g)(x^2) = g * adj(g)
+ *
+ * f'*G' - g'*F' = q
+ *
+ * F = F'(x^2) * adj(g)
+ * G = G'(x^2) * adj(f)
+ *
+ * The NTT representation of f is f(w) for all w which
+ * are roots of phi. In the binary case, as well as in
+ * the ternary case for all depth except the deepest,
+ * these roots can be grouped in pairs (w,-w), and we
+ * then have:
+ *
+ * f(w) = adj(f)(-w)
+ * f(-w) = adj(f)(w)
+ *
+ * and w^2 is then a root for phi at the half-degree.
+ *
+ * At the deepest level in the ternary case, this still
+ * holds, in the following sense: the roots of x^2-x+1
+ * are (w,-w^2) (for w^3 = -1, and w != -1), and we
+ * have:
+ *
+ * f(w) = adj(f)(-w^2)
+ * f(-w^2) = adj(f)(w)
+ *
+ * In all case, we can thus compute F and G in NTT
+ * representation by a few simple multiplications.
+ * Moreover, the two roots for each pair are consecutive
+ * in our bit-reversal encoding.
+ */
+ for (v = 0, x = Ft + u, y = Gt + u;
+ v < hn; v ++, x += (llen << 1), y += (llen << 1)) {
+ uint32_t ftA, ftB, gtA, gtB;
+ uint32_t mFp, mGp;
+
+ ftA = fx[(v << 1) + 0];
+ ftB = fx[(v << 1) + 1];
+ gtA = gx[(v << 1) + 0];
+ gtB = gx[(v << 1) + 1];
+ mFp = modp_montymul(Fp[v], R2, p, p0i);
+ mGp = modp_montymul(Gp[v], R2, p, p0i);
+ x[0] = modp_montymul(gtB, mFp, p, p0i);
+ x[llen] = modp_montymul(gtA, mFp, p, p0i);
+ y[0] = modp_montymul(ftB, mGp, p, p0i);
+ y[llen] = modp_montymul(ftA, mGp, p, p0i);
+ }
+ modp_iNTT2_ext(Ft + u, llen, igm, logn, p, p0i);
+ modp_iNTT2_ext(Gt + u, llen, igm, logn, p, p0i);
+
+ /*
+ * Also save ft and gt (only up to size slen).
+ */
+ if (u < slen) {
+ modp_iNTT2(fx, igm, logn, p, p0i);
+ modp_iNTT2(gx, igm, logn, p, p0i);
+ for (v = 0, x = ft + u, y = gt + u;
+ v < n; v ++, x += slen, y += slen) {
+ *x = fx[v];
+ *y = gx[v];
+ }
+ }
+ }
+
+ /*
+ * Rebuild f, g, F and G with the CRT. Note that the elements of F
+ * and G are consecutive, and thus can be rebuilt in a single
+ * loop; similarly, the elements of f and g are consecutive.
+ */
+ zint_rebuild_CRT(Ft, llen, llen, n << 1, PRIMES, 1, t1);
+ zint_rebuild_CRT(ft, slen, slen, n << 1, PRIMES, 1, t1);
+
+ /*
+ * Here starts the Babai reduction, specialized for depth = 1.
+ *
+ * Candidates F and G (from Ft and Gt), and base f and g (ft and gt),
+ * are converted to floating point. There is no scaling, and a
+ * single pass is sufficient.
+ */
+
+ /*
+ * Convert F and G into floating point (rt1 and rt2).
+ */
+ rt1 = align_fpr(tmp, gt + slen * n);
+ rt2 = rt1 + n;
+ poly_big_to_fp(rt1, Ft, llen, llen, logn);
+ poly_big_to_fp(rt2, Gt, llen, llen, logn);
+
+ /*
+ * Integer representation of F and G is no longer needed, we
+ * can remove it.
+ */
+ memmove(tmp, ft, 2 * slen * n * sizeof * ft);
+ ft = tmp;
+ gt = ft + slen * n;
+ rt3 = align_fpr(tmp, gt + slen * n);
+ memmove(rt3, rt1, 2 * n * sizeof * rt1);
+ rt1 = rt3;
+ rt2 = rt1 + n;
+ rt3 = rt2 + n;
+ rt4 = rt3 + n;
+
+ /*
+ * Convert f and g into floating point (rt3 and rt4).
+ */
+ poly_big_to_fp(rt3, ft, slen, slen, logn);
+ poly_big_to_fp(rt4, gt, slen, slen, logn);
+
+ /*
+ * Remove unneeded ft and gt.
+ */
+ memmove(tmp, rt1, 4 * n * sizeof * rt1);
+ rt1 = (fpr *)tmp;
+ rt2 = rt1 + n;
+ rt3 = rt2 + n;
+ rt4 = rt3 + n;
+
+ /*
+ * We now have:
+ * rt1 = F
+ * rt2 = G
+ * rt3 = f
+ * rt4 = g
+ * in that order in RAM. We convert all of them to FFT.
+ */
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt1, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt2, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt3, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt4, logn);
+
+ /*
+ * Compute:
+ * rt5 = F*adj(f) + G*adj(g)
+ * rt6 = 1 / (f*adj(f) + g*adj(g))
+ * (Note that rt6 is half-length.)
+ */
+ rt5 = rt4 + n;
+ rt6 = rt5 + n;
+ PQCLEAN_FALCON1024_CLEAN_poly_add_muladj_fft(rt5, rt1, rt2, rt3, rt4, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_invnorm2_fft(rt6, rt3, rt4, logn);
+
+ /*
+ * Compute:
+ * rt5 = (F*adj(f)+G*adj(g)) / (f*adj(f)+g*adj(g))
+ */
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_autoadj_fft(rt5, rt6, logn);
+
+ /*
+ * Compute k as the rounded version of rt5. Check that none of
+ * the values is larger than 2^63-1 (in absolute value)
+ * because that would make the fpr_rint() do something undefined;
+ * note that any out-of-bounds value here implies a failure and
+ * (f,g) will be discarded, so we can make a simple test.
+ */
+ PQCLEAN_FALCON1024_CLEAN_iFFT(rt5, logn);
+ for (u = 0; u < n; u ++) {
+ fpr z;
+
+ z = rt5[u];
+ if (!fpr_lt(z, fpr_ptwo63m1) || !fpr_lt(fpr_mtwo63m1, z)) {
+ return 0;
+ }
+ rt5[u] = fpr_of(fpr_rint(z));
+ }
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt5, logn);
+
+ /*
+ * Subtract k*f from F, and k*g from G.
+ */
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(rt3, rt5, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(rt4, rt5, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_sub(rt1, rt3, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_sub(rt2, rt4, logn);
+ PQCLEAN_FALCON1024_CLEAN_iFFT(rt1, logn);
+ PQCLEAN_FALCON1024_CLEAN_iFFT(rt2, logn);
+
+ /*
+ * Convert back F and G to integers, and return.
+ */
+ Ft = tmp;
+ Gt = Ft + n;
+ rt3 = align_fpr(tmp, Gt + n);
+ memmove(rt3, rt1, 2 * n * sizeof * rt1);
+ rt1 = rt3;
+ rt2 = rt1 + n;
+ for (u = 0; u < n; u ++) {
+ Ft[u] = (uint32_t)fpr_rint(rt1[u]);
+ Gt[u] = (uint32_t)fpr_rint(rt2[u]);
+ }
+
+ return 1;
+}
+
+/*
+ * Solving the NTRU equation, top level. Upon entry, the F and G
+ * from the previous level should be in the tmp[] array.
+ *
+ * Returned value: 1 on success, 0 on error.
+ */
+static int
+solve_NTRU_binary_depth0(unsigned logn,
+ const int8_t *f, const int8_t *g, uint32_t *tmp) {
+ size_t n, hn, u;
+ uint32_t p, p0i, R2;
+ uint32_t *Fp, *Gp, *t1, *t2, *t3, *t4, *t5;
+ uint32_t *gm, *igm, *ft, *gt;
+ fpr *rt2, *rt3;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+
+ /*
+ * Equations are:
+ *
+ * f' = f0^2 - X^2*f1^2
+ * g' = g0^2 - X^2*g1^2
+ * F' and G' are a solution to f'G' - g'F' = q (from deeper levels)
+ * F = F'*(g0 - X*g1)
+ * G = G'*(f0 - X*f1)
+ *
+ * f0, f1, g0, g1, f', g', F' and G' are all "compressed" to
+ * degree N/2 (their odd-indexed coefficients are all zero).
+ *
+ * Everything should fit in 31-bit integers, hence we can just use
+ * the first small prime p = 2147473409.
+ */
+ p = PRIMES[0].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+
+ Fp = tmp;
+ Gp = Fp + hn;
+ ft = Gp + hn;
+ gt = ft + n;
+ gm = gt + n;
+ igm = gm + n;
+
+ modp_mkgm2(gm, igm, logn, PRIMES[0].g, p, p0i);
+
+ /*
+ * Convert F' anf G' in NTT representation.
+ */
+ for (u = 0; u < hn; u ++) {
+ Fp[u] = modp_set(zint_one_to_plain(Fp + u), p);
+ Gp[u] = modp_set(zint_one_to_plain(Gp + u), p);
+ }
+ modp_NTT2(Fp, gm, logn - 1, p, p0i);
+ modp_NTT2(Gp, gm, logn - 1, p, p0i);
+
+ /*
+ * Load f and g and convert them to NTT representation.
+ */
+ for (u = 0; u < n; u ++) {
+ ft[u] = modp_set(f[u], p);
+ gt[u] = modp_set(g[u], p);
+ }
+ modp_NTT2(ft, gm, logn, p, p0i);
+ modp_NTT2(gt, gm, logn, p, p0i);
+
+ /*
+ * Build the unreduced F,G in ft and gt.
+ */
+ for (u = 0; u < n; u += 2) {
+ uint32_t ftA, ftB, gtA, gtB;
+ uint32_t mFp, mGp;
+
+ ftA = ft[u + 0];
+ ftB = ft[u + 1];
+ gtA = gt[u + 0];
+ gtB = gt[u + 1];
+ mFp = modp_montymul(Fp[u >> 1], R2, p, p0i);
+ mGp = modp_montymul(Gp[u >> 1], R2, p, p0i);
+ ft[u + 0] = modp_montymul(gtB, mFp, p, p0i);
+ ft[u + 1] = modp_montymul(gtA, mFp, p, p0i);
+ gt[u + 0] = modp_montymul(ftB, mGp, p, p0i);
+ gt[u + 1] = modp_montymul(ftA, mGp, p, p0i);
+ }
+ modp_iNTT2(ft, igm, logn, p, p0i);
+ modp_iNTT2(gt, igm, logn, p, p0i);
+
+ Gp = Fp + n;
+ t1 = Gp + n;
+ memmove(Fp, ft, 2 * n * sizeof * ft);
+
+ /*
+ * We now need to apply the Babai reduction. At that point,
+ * we have F and G in two n-word arrays.
+ *
+ * We can compute F*adj(f)+G*adj(g) and f*adj(f)+g*adj(g)
+ * modulo p, using the NTT. We still move memory around in
+ * order to save RAM.
+ */
+ t2 = t1 + n;
+ t3 = t2 + n;
+ t4 = t3 + n;
+ t5 = t4 + n;
+
+ /*
+ * Compute the NTT tables in t1 and t2. We do not keep t2
+ * (we'll recompute it later on).
+ */
+ modp_mkgm2(t1, t2, logn, PRIMES[0].g, p, p0i);
+
+ /*
+ * Convert F and G to NTT.
+ */
+ modp_NTT2(Fp, t1, logn, p, p0i);
+ modp_NTT2(Gp, t1, logn, p, p0i);
+
+ /*
+ * Load f and adj(f) in t4 and t5, and convert them to NTT
+ * representation.
+ */
+ t4[0] = t5[0] = modp_set(f[0], p);
+ for (u = 1; u < n; u ++) {
+ t4[u] = modp_set(f[u], p);
+ t5[n - u] = modp_set(-f[u], p);
+ }
+ modp_NTT2(t4, t1, logn, p, p0i);
+ modp_NTT2(t5, t1, logn, p, p0i);
+
+ /*
+ * Compute F*adj(f) in t2, and f*adj(f) in t3.
+ */
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = modp_montymul(t5[u], R2, p, p0i);
+ t2[u] = modp_montymul(w, Fp[u], p, p0i);
+ t3[u] = modp_montymul(w, t4[u], p, p0i);
+ }
+
+ /*
+ * Load g and adj(g) in t4 and t5, and convert them to NTT
+ * representation.
+ */
+ t4[0] = t5[0] = modp_set(g[0], p);
+ for (u = 1; u < n; u ++) {
+ t4[u] = modp_set(g[u], p);
+ t5[n - u] = modp_set(-g[u], p);
+ }
+ modp_NTT2(t4, t1, logn, p, p0i);
+ modp_NTT2(t5, t1, logn, p, p0i);
+
+ /*
+ * Add G*adj(g) to t2, and g*adj(g) to t3.
+ */
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = modp_montymul(t5[u], R2, p, p0i);
+ t2[u] = modp_add(t2[u],
+ modp_montymul(w, Gp[u], p, p0i), p);
+ t3[u] = modp_add(t3[u],
+ modp_montymul(w, t4[u], p, p0i), p);
+ }
+
+ /*
+ * Convert back t2 and t3 to normal representation (normalized
+ * around 0), and then
+ * move them to t1 and t2. We first need to recompute the
+ * inverse table for NTT.
+ */
+ modp_mkgm2(t1, t4, logn, PRIMES[0].g, p, p0i);
+ modp_iNTT2(t2, t4, logn, p, p0i);
+ modp_iNTT2(t3, t4, logn, p, p0i);
+ for (u = 0; u < n; u ++) {
+ t1[u] = (uint32_t)modp_norm(t2[u], p);
+ t2[u] = (uint32_t)modp_norm(t3[u], p);
+ }
+
+ /*
+ * At that point, array contents are:
+ *
+ * F (NTT representation) (Fp)
+ * G (NTT representation) (Gp)
+ * F*adj(f)+G*adj(g) (t1)
+ * f*adj(f)+g*adj(g) (t2)
+ *
+ * We want to divide t1 by t2. The result is not integral; it
+ * must be rounded. We thus need to use the FFT.
+ */
+
+ /*
+ * Get f*adj(f)+g*adj(g) in FFT representation. Since this
+ * polynomial is auto-adjoint, all its coordinates in FFT
+ * representation are actually real, so we can truncate off
+ * the imaginary parts.
+ */
+ rt3 = align_fpr(tmp, t3);
+ for (u = 0; u < n; u ++) {
+ rt3[u] = fpr_of(((int32_t *)t2)[u]);
+ }
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt3, logn);
+ rt2 = align_fpr(tmp, t2);
+ memmove(rt2, rt3, hn * sizeof * rt3);
+
+ /*
+ * Convert F*adj(f)+G*adj(g) in FFT representation.
+ */
+ rt3 = rt2 + hn;
+ for (u = 0; u < n; u ++) {
+ rt3[u] = fpr_of(((int32_t *)t1)[u]);
+ }
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt3, logn);
+
+ /*
+ * Compute (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) and get
+ * its rounded normal representation in t1.
+ */
+ PQCLEAN_FALCON1024_CLEAN_poly_div_autoadj_fft(rt3, rt2, logn);
+ PQCLEAN_FALCON1024_CLEAN_iFFT(rt3, logn);
+ for (u = 0; u < n; u ++) {
+ t1[u] = modp_set((int32_t)fpr_rint(rt3[u]), p);
+ }
+
+ /*
+ * RAM contents are now:
+ *
+ * F (NTT representation) (Fp)
+ * G (NTT representation) (Gp)
+ * k (t1)
+ *
+ * We want to compute F-k*f, and G-k*g.
+ */
+ t2 = t1 + n;
+ t3 = t2 + n;
+ t4 = t3 + n;
+ t5 = t4 + n;
+ modp_mkgm2(t2, t3, logn, PRIMES[0].g, p, p0i);
+ for (u = 0; u < n; u ++) {
+ t4[u] = modp_set(f[u], p);
+ t5[u] = modp_set(g[u], p);
+ }
+ modp_NTT2(t1, t2, logn, p, p0i);
+ modp_NTT2(t4, t2, logn, p, p0i);
+ modp_NTT2(t5, t2, logn, p, p0i);
+ for (u = 0; u < n; u ++) {
+ uint32_t kw;
+
+ kw = modp_montymul(t1[u], R2, p, p0i);
+ Fp[u] = modp_sub(Fp[u],
+ modp_montymul(kw, t4[u], p, p0i), p);
+ Gp[u] = modp_sub(Gp[u],
+ modp_montymul(kw, t5[u], p, p0i), p);
+ }
+ modp_iNTT2(Fp, t3, logn, p, p0i);
+ modp_iNTT2(Gp, t3, logn, p, p0i);
+ for (u = 0; u < n; u ++) {
+ Fp[u] = (uint32_t)modp_norm(Fp[u], p);
+ Gp[u] = (uint32_t)modp_norm(Gp[u], p);
+ }
+
+ return 1;
+}
+
+/*
+ * Solve the NTRU equation. Returned value is 1 on success, 0 on error.
+ * G can be NULL, in which case that value is computed but not returned.
+ * If any of the coefficients of F and G exceeds lim (in absolute value),
+ * then 0 is returned.
+ */
+static int
+solve_NTRU(unsigned logn, int8_t *F, int8_t *G,
+ const int8_t *f, const int8_t *g, int lim, uint32_t *tmp) {
+ size_t n, u;
+ uint32_t *ft, *gt, *Ft, *Gt, *gm;
+ uint32_t p, p0i, r;
+ const small_prime *primes;
+
+ n = MKN(logn);
+
+ if (!solve_NTRU_deepest(logn, f, g, tmp)) {
+ return 0;
+ }
+
+ /*
+ * For logn <= 2, we need to use solve_NTRU_intermediate()
+ * directly, because coefficients are a bit too large and
+ * do not fit the hypotheses in solve_NTRU_binary_depth0().
+ */
+ if (logn <= 2) {
+ unsigned depth;
+
+ depth = logn;
+ while (depth -- > 0) {
+ if (!solve_NTRU_intermediate(logn, f, g, depth, tmp)) {
+ return 0;
+ }
+ }
+ } else {
+ unsigned depth;
+
+ depth = logn;
+ while (depth -- > 2) {
+ if (!solve_NTRU_intermediate(logn, f, g, depth, tmp)) {
+ return 0;
+ }
+ }
+ if (!solve_NTRU_binary_depth1(logn, f, g, tmp)) {
+ return 0;
+ }
+ if (!solve_NTRU_binary_depth0(logn, f, g, tmp)) {
+ return 0;
+ }
+ }
+
+ /*
+ * If no buffer has been provided for G, use a temporary one.
+ */
+ if (G == NULL) {
+ G = (int8_t *)(tmp + 2 * n);
+ }
+
+ /*
+ * Final F and G are in fk->tmp, one word per coefficient
+ * (signed value over 31 bits).
+ */
+ if (!poly_big_to_small(F, tmp, lim, logn)
+ || !poly_big_to_small(G, tmp + n, lim, logn)) {
+ return 0;
+ }
+
+ /*
+ * Verify that the NTRU equation is fulfilled. Since all elements
+ * have short lengths, verifying modulo a small prime p works, and
+ * allows using the NTT.
+ *
+ * We put Gt[] first in tmp[], and process it first, so that it does
+ * not overlap with G[] in case we allocated it ourselves.
+ */
+ Gt = tmp;
+ ft = Gt + n;
+ gt = ft + n;
+ Ft = gt + n;
+ gm = Ft + n;
+
+ primes = PRIMES;
+ p = primes[0].p;
+ p0i = modp_ninv31(p);
+ modp_mkgm2(gm, tmp, logn, primes[0].g, p, p0i);
+ for (u = 0; u < n; u ++) {
+ Gt[u] = modp_set(G[u], p);
+ }
+ for (u = 0; u < n; u ++) {
+ ft[u] = modp_set(f[u], p);
+ gt[u] = modp_set(g[u], p);
+ Ft[u] = modp_set(F[u], p);
+ }
+ modp_NTT2(ft, gm, logn, p, p0i);
+ modp_NTT2(gt, gm, logn, p, p0i);
+ modp_NTT2(Ft, gm, logn, p, p0i);
+ modp_NTT2(Gt, gm, logn, p, p0i);
+ r = modp_montymul(12289, 1, p, p0i);
+ for (u = 0; u < n; u ++) {
+ uint32_t z;
+
+ z = modp_sub(modp_montymul(ft[u], Gt[u], p, p0i),
+ modp_montymul(gt[u], Ft[u], p, p0i), p);
+ if (z != r) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * Generate a random polynomial with a Gaussian distribution. This function
+ * also makes sure that the resultant of the polynomial with phi is odd.
+ */
+static void
+poly_small_mkgauss(RNG_CONTEXT *rng, int8_t *f, unsigned logn) {
+ size_t n, u;
+ unsigned mod2;
+
+ n = MKN(logn);
+ mod2 = 0;
+ for (u = 0; u < n; u ++) {
+ int s;
+
+restart:
+ s = mkgauss(rng, logn);
+
+ /*
+ * We need the coefficient to fit within -127..+127;
+ * realistically, this is always the case except for
+ * the very low degrees (N = 2 or 4), for which there
+ * is no real security anyway.
+ */
+ if (s < -127 || s > 127) {
+ goto restart;
+ }
+
+ /*
+ * We need the sum of all coefficients to be 1; otherwise,
+ * the resultant of the polynomial with X^N+1 will be even,
+ * and the binary GCD will fail.
+ */
+ if (u == n - 1) {
+ if ((mod2 ^ (unsigned)(s & 1)) == 0) {
+ goto restart;
+ }
+ } else {
+ mod2 ^= (unsigned)(s & 1);
+ }
+ f[u] = (int8_t)s;
+ }
+}
+
+/* see falcon.h */
+void
+PQCLEAN_FALCON1024_CLEAN_keygen(inner_shake256_context *rng,
+ int8_t *f, int8_t *g, int8_t *F, int8_t *G, uint16_t *h,
+ unsigned logn, uint8_t *tmp) {
+ /*
+ * Algorithm is the following:
+ *
+ * - Generate f and g with the Gaussian distribution.
+ *
+ * - If either Res(f,phi) or Res(g,phi) is even, try again.
+ *
+ * - If ||(f,g)|| is too large, try again.
+ *
+ * - If ||B~_{f,g}|| is too large, try again.
+ *
+ * - If f is not invertible mod phi mod q, try again.
+ *
+ * - Compute h = g/f mod phi mod q.
+ *
+ * - Solve the NTRU equation fG - gF = q; if the solving fails,
+ * try again. Usual failure condition is when Res(f,phi)
+ * and Res(g,phi) are not prime to each other.
+ */
+ size_t n, u;
+ uint16_t *h2, *tmp2;
+ RNG_CONTEXT *rc;
+
+ n = MKN(logn);
+ rc = rng;
+
+ /*
+ * We need to generate f and g randomly, until we find values
+ * such that the norm of (g,-f), and of the orthogonalized
+ * vector, are satisfying. The orthogonalized vector is:
+ * (q*adj(f)/(f*adj(f)+g*adj(g)), q*adj(g)/(f*adj(f)+g*adj(g)))
+ * (it is actually the (N+1)-th row of the Gram-Schmidt basis).
+ *
+ * In the binary case, coefficients of f and g are generated
+ * independently of each other, with a discrete Gaussian
+ * distribution of standard deviation 1.17*sqrt(q/(2*N)). Then,
+ * the two vectors have expected norm 1.17*sqrt(q), which is
+ * also our acceptance bound: we require both vectors to be no
+ * larger than that (this will be satisfied about 1/4th of the
+ * time, thus we expect sampling new (f,g) about 4 times for that
+ * step).
+ *
+ * We require that Res(f,phi) and Res(g,phi) are both odd (the
+ * NTRU equation solver requires it).
+ */
+ for (;;) {
+ fpr *rt1, *rt2, *rt3;
+ fpr bnorm;
+ uint32_t normf, normg, norm;
+ int lim;
+
+ /*
+ * The poly_small_mkgauss() function makes sure
+ * that the sum of coefficients is 1 modulo 2
+ * (i.e. the resultant of the polynomial with phi
+ * will be odd).
+ */
+ poly_small_mkgauss(rc, f, logn);
+ poly_small_mkgauss(rc, g, logn);
+
+ /*
+ * Verify that all coefficients are within the bounds
+ * defined in max_fg_bits. This is the case with
+ * overwhelming probability; this guarantees that the
+ * key will be encodable with FALCON_COMP_TRIM.
+ */
+ lim = 1 << (PQCLEAN_FALCON1024_CLEAN_max_fg_bits[logn] - 1);
+ for (u = 0; u < n; u ++) {
+ /*
+ * We can use non-CT tests since on any failure
+ * we will discard f and g.
+ */
+ if (f[u] >= lim || f[u] <= -lim
+ || g[u] >= lim || g[u] <= -lim) {
+ lim = -1;
+ break;
+ }
+ }
+ if (lim < 0) {
+ continue;
+ }
+
+ /*
+ * Bound is 1.17*sqrt(q). We compute the squared
+ * norms. With q = 12289, the squared bound is:
+ * (1.17^2)* 12289 = 16822.4121
+ * Since f and g are integral, the squared norm
+ * of (g,-f) is an integer.
+ */
+ normf = poly_small_sqnorm(f, logn);
+ normg = poly_small_sqnorm(g, logn);
+ norm = (normf + normg) | -((normf | normg) >> 31);
+ if (norm >= 16823) {
+ continue;
+ }
+
+ /*
+ * We compute the orthogonalized vector norm.
+ */
+ rt1 = (fpr *)tmp;
+ rt2 = rt1 + n;
+ rt3 = rt2 + n;
+ poly_small_to_fp(rt1, f, logn);
+ poly_small_to_fp(rt2, g, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt1, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rt2, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_invnorm2_fft(rt3, rt1, rt2, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_adj_fft(rt1, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_adj_fft(rt2, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulconst(rt1, fpr_q, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulconst(rt2, fpr_q, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_autoadj_fft(rt1, rt3, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_autoadj_fft(rt2, rt3, logn);
+ PQCLEAN_FALCON1024_CLEAN_iFFT(rt1, logn);
+ PQCLEAN_FALCON1024_CLEAN_iFFT(rt2, logn);
+ bnorm = fpr_zero;
+ for (u = 0; u < n; u ++) {
+ bnorm = fpr_add(bnorm, fpr_sqr(rt1[u]));
+ bnorm = fpr_add(bnorm, fpr_sqr(rt2[u]));
+ }
+ if (!fpr_lt(bnorm, fpr_bnorm_max)) {
+ continue;
+ }
+
+ /*
+ * Compute public key h = g/f mod X^N+1 mod q. If this
+ * fails, we must restart.
+ */
+ if (h == NULL) {
+ h2 = (uint16_t *)tmp;
+ tmp2 = h2 + n;
+ } else {
+ h2 = h;
+ tmp2 = (uint16_t *)tmp;
+ }
+ if (!PQCLEAN_FALCON1024_CLEAN_compute_public(h2, f, g, logn, (uint8_t *)tmp2)) {
+ continue;
+ }
+
+ /*
+ * Solve the NTRU equation to get F and G.
+ */
+ lim = (1 << (PQCLEAN_FALCON1024_CLEAN_max_FG_bits[logn] - 1)) - 1;
+ if (!solve_NTRU(logn, F, G, f, g, lim, (uint32_t *)tmp)) {
+ continue;
+ }
+
+ /*
+ * Key pair is generated.
+ */
+ break;
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/manifest.mn b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/manifest.mn
new file mode 100644
index 000000000..ed57e0aee
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/manifest.mn
@@ -0,0 +1,32 @@
+# DO NOT EDIT: generated from manifest.mn.subdirs.template
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+CORE_DEPTH = ../../../../../..
+
+MODULE = oqs
+
+LIBRARY_NAME = oqs_src_sig_falcon_pqclean_falcon-1024_clean
+SHARED_LIBRARY = $(NULL)
+
+CSRCS = \
+ codec.c \
+ common.c \
+ fft.c \
+ fpr.c \
+ inner.c \
+ keygen.c \
+ pqclean.c \
+ rng.c \
+ sign.c \
+ vrfy.c \
+ $(NULL)
+
+# only add module debugging in opt builds if DEBUG_PKCS11 is set
+ifdef DEBUG_PKCS11
+ DEFINES += -DDEBUG_MODULE
+endif
+
+# This part of the code, including all sub-dirs, can be optimized for size
+export ALLOW_OPT_CODE_SIZE = 1
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/pqclean.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/pqclean.c
new file mode 100644
index 000000000..292357a86
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/pqclean.c
@@ -0,0 +1,386 @@
+#include "api.h"
+#include "inner.h"
+#include "randombytes.h"
+#include <stddef.h>
+#include <string.h>
+/*
+ * Wrapper for implementing the PQClean API.
+ */
+
+
+
+#define NONCELEN 40
+#define SEEDLEN 48
+
+/*
+ * Encoding formats (nnnn = log of degree, 9 for Falcon-512, 10 for Falcon-1024)
+ *
+ * private key:
+ * header byte: 0101nnnn
+ * private f (6 or 5 bits by element, depending on degree)
+ * private g (6 or 5 bits by element, depending on degree)
+ * private F (8 bits by element)
+ *
+ * public key:
+ * header byte: 0000nnnn
+ * public h (14 bits by element)
+ *
+ * signature:
+ * header byte: 0011nnnn
+ * nonce 40 bytes
+ * value (12 bits by element)
+ *
+ * message + signature:
+ * signature length (2 bytes, big-endian)
+ * nonce 40 bytes
+ * message
+ * header byte: 0010nnnn
+ * value (12 bits by element)
+ * (signature length is 1+len(value), not counting the nonce)
+ */
+
+/* see api.h */
+int
+PQCLEAN_FALCON1024_CLEAN_crypto_sign_keypair(unsigned char *pk, unsigned char *sk) {
+ union {
+ uint8_t b[28 * 1024];
+ uint64_t dummy_u64;
+ fpr dummy_fpr;
+ } tmp;
+ int8_t f[1024], g[1024], F[1024], G[1024];
+ uint16_t h[1024];
+ unsigned char seed[SEEDLEN];
+ inner_shake256_context rng;
+ size_t u, v;
+
+
+ /*
+ * Generate key pair.
+ */
+ randombytes(seed, sizeof seed);
+ inner_shake256_init(&rng);
+ inner_shake256_inject(&rng, seed, sizeof seed);
+ inner_shake256_flip(&rng);
+ PQCLEAN_FALCON1024_CLEAN_keygen(&rng, f, g, F, G, h, 10, tmp.b);
+ inner_shake256_ctx_release(&rng);
+
+ /*
+ * Encode private key.
+ */
+ sk[0] = 0x50 + 10;
+ u = 1;
+ v = PQCLEAN_FALCON1024_CLEAN_trim_i8_encode(
+ sk + u, PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES - u,
+ f, 10, PQCLEAN_FALCON1024_CLEAN_max_fg_bits[10]);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ v = PQCLEAN_FALCON1024_CLEAN_trim_i8_encode(
+ sk + u, PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES - u,
+ g, 10, PQCLEAN_FALCON1024_CLEAN_max_fg_bits[10]);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ v = PQCLEAN_FALCON1024_CLEAN_trim_i8_encode(
+ sk + u, PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES - u,
+ F, 10, PQCLEAN_FALCON1024_CLEAN_max_FG_bits[10]);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ if (u != PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES) {
+ return -1;
+ }
+
+ /*
+ * Encode public key.
+ */
+ pk[0] = 0x00 + 10;
+ v = PQCLEAN_FALCON1024_CLEAN_modq_encode(
+ pk + 1, PQCLEAN_FALCON1024_CLEAN_CRYPTO_PUBLICKEYBYTES - 1,
+ h, 10);
+ if (v != PQCLEAN_FALCON1024_CLEAN_CRYPTO_PUBLICKEYBYTES - 1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Compute the signature. nonce[] receives the nonce and must have length
+ * NONCELEN bytes. sigbuf[] receives the signature value (without nonce
+ * or header byte), with *sigbuflen providing the maximum value length and
+ * receiving the actual value length.
+ *
+ * If a signature could be computed but not encoded because it would
+ * exceed the output buffer size, then a new signature is computed. If
+ * the provided buffer size is too low, this could loop indefinitely, so
+ * the caller must provide a size that can accommodate signatures with a
+ * large enough probability.
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+static int
+do_sign(uint8_t *nonce, uint8_t *sigbuf, size_t *sigbuflen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk) {
+ union {
+ uint8_t b[72 * 1024];
+ uint64_t dummy_u64;
+ fpr dummy_fpr;
+ } tmp;
+ int8_t f[1024], g[1024], F[1024], G[1024];
+ union {
+ int16_t sig[1024];
+ uint16_t hm[1024];
+ } r;
+ unsigned char seed[SEEDLEN];
+ inner_shake256_context sc;
+ size_t u, v;
+
+ /*
+ * Decode the private key.
+ */
+ if (sk[0] != 0x50 + 10) {
+ return -1;
+ }
+ u = 1;
+ v = PQCLEAN_FALCON1024_CLEAN_trim_i8_decode(
+ f, 10, PQCLEAN_FALCON1024_CLEAN_max_fg_bits[10],
+ sk + u, PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES - u);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ v = PQCLEAN_FALCON1024_CLEAN_trim_i8_decode(
+ g, 10, PQCLEAN_FALCON1024_CLEAN_max_fg_bits[10],
+ sk + u, PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES - u);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ v = PQCLEAN_FALCON1024_CLEAN_trim_i8_decode(
+ F, 10, PQCLEAN_FALCON1024_CLEAN_max_FG_bits[10],
+ sk + u, PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES - u);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ if (u != PQCLEAN_FALCON1024_CLEAN_CRYPTO_SECRETKEYBYTES) {
+ return -1;
+ }
+ if (!PQCLEAN_FALCON1024_CLEAN_complete_private(G, f, g, F, 10, tmp.b)) {
+ return -1;
+ }
+
+
+ /*
+ * Create a random nonce (40 bytes).
+ */
+ randombytes(nonce, NONCELEN);
+
+ /*
+ * Hash message nonce + message into a vector.
+ */
+ inner_shake256_init(&sc);
+ inner_shake256_inject(&sc, nonce, NONCELEN);
+ inner_shake256_inject(&sc, m, mlen);
+ inner_shake256_flip(&sc);
+ PQCLEAN_FALCON1024_CLEAN_hash_to_point_ct(&sc, r.hm, 10, tmp.b);
+ inner_shake256_ctx_release(&sc);
+
+ /*
+ * Initialize a RNG.
+ */
+ randombytes(seed, sizeof seed);
+ inner_shake256_init(&sc);
+ inner_shake256_inject(&sc, seed, sizeof seed);
+ inner_shake256_flip(&sc);
+
+ /*
+ * Compute and return the signature. This loops until a signature
+ * value is found that fits in the provided buffer.
+ */
+ for (;;) {
+ PQCLEAN_FALCON1024_CLEAN_sign_dyn(r.sig, &sc, f, g, F, G, r.hm, 10, tmp.b);
+ v = PQCLEAN_FALCON1024_CLEAN_comp_encode(sigbuf, *sigbuflen, r.sig, 10);
+ if (v != 0) {
+ inner_shake256_ctx_release(&sc);
+ *sigbuflen = v;
+ return 0;
+ }
+ }
+}
+
+/*
+ * Verify a sigature. The nonce has size NONCELEN bytes. sigbuf[]
+ * (of size sigbuflen) contains the signature value, not including the
+ * header byte or nonce. Return value is 0 on success, -1 on error.
+ */
+static int
+do_verify(
+ const uint8_t *nonce, const uint8_t *sigbuf, size_t sigbuflen,
+ const uint8_t *m, size_t mlen, const uint8_t *pk) {
+ union {
+ uint8_t b[2 * 1024];
+ uint64_t dummy_u64;
+ fpr dummy_fpr;
+ } tmp;
+ uint16_t h[1024], hm[1024];
+ int16_t sig[1024];
+ inner_shake256_context sc;
+
+ /*
+ * Decode public key.
+ */
+ if (pk[0] != 0x00 + 10) {
+ return -1;
+ }
+ if (PQCLEAN_FALCON1024_CLEAN_modq_decode(h, 10,
+ pk + 1, PQCLEAN_FALCON1024_CLEAN_CRYPTO_PUBLICKEYBYTES - 1)
+ != PQCLEAN_FALCON1024_CLEAN_CRYPTO_PUBLICKEYBYTES - 1) {
+ return -1;
+ }
+ PQCLEAN_FALCON1024_CLEAN_to_ntt_monty(h, 10);
+
+ /*
+ * Decode signature.
+ */
+ if (sigbuflen == 0) {
+ return -1;
+ }
+ if (PQCLEAN_FALCON1024_CLEAN_comp_decode(sig, 10, sigbuf, sigbuflen) != sigbuflen) {
+ return -1;
+ }
+
+ /*
+ * Hash nonce + message into a vector.
+ */
+ inner_shake256_init(&sc);
+ inner_shake256_inject(&sc, nonce, NONCELEN);
+ inner_shake256_inject(&sc, m, mlen);
+ inner_shake256_flip(&sc);
+ PQCLEAN_FALCON1024_CLEAN_hash_to_point_ct(&sc, hm, 10, tmp.b);
+ inner_shake256_ctx_release(&sc);
+
+ /*
+ * Verify signature.
+ */
+ if (!PQCLEAN_FALCON1024_CLEAN_verify_raw(hm, sig, h, 10, tmp.b)) {
+ return -1;
+ }
+ return 0;
+}
+
+/* see api.h */
+int
+PQCLEAN_FALCON1024_CLEAN_crypto_sign_signature(
+ uint8_t *sig, size_t *siglen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk) {
+ /*
+ * The PQCLEAN_FALCON1024_CLEAN_CRYPTO_BYTES constant is used for
+ * the signed message object (as produced by PQCLEAN_FALCON1024_CLEAN_crypto_sign())
+ * and includes a two-byte length value, so we take care here
+ * to only generate signatures that are two bytes shorter than
+ * the maximum. This is done to ensure that PQCLEAN_FALCON1024_CLEAN_crypto_sign()
+ * and PQCLEAN_FALCON1024_CLEAN_crypto_sign_signature() produce the exact same signature
+ * value, if used on the same message, with the same private key,
+ * and using the same output from randombytes() (this is for
+ * reproducibility of tests).
+ */
+ size_t vlen;
+
+ vlen = PQCLEAN_FALCON1024_CLEAN_CRYPTO_BYTES - NONCELEN - 3;
+ if (do_sign(sig + 1, sig + 1 + NONCELEN, &vlen, m, mlen, sk) < 0) {
+ return -1;
+ }
+ sig[0] = 0x30 + 10;
+ *siglen = 1 + NONCELEN + vlen;
+ return 0;
+}
+
+/* see api.h */
+int
+PQCLEAN_FALCON1024_CLEAN_crypto_sign_verify(
+ const uint8_t *sig, size_t siglen,
+ const uint8_t *m, size_t mlen, const uint8_t *pk) {
+ if (siglen < 1 + NONCELEN) {
+ return -1;
+ }
+ if (sig[0] != 0x30 + 10) {
+ return -1;
+ }
+ return do_verify(sig + 1,
+ sig + 1 + NONCELEN, siglen - 1 - NONCELEN, m, mlen, pk);
+}
+
+/* see api.h */
+int
+PQCLEAN_FALCON1024_CLEAN_crypto_sign(
+ uint8_t *sm, size_t *smlen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk) {
+ uint8_t *pm, *sigbuf;
+ size_t sigbuflen;
+
+ /*
+ * Move the message to its final location; this is a memmove() so
+ * it handles overlaps properly.
+ */
+ memmove(sm + 2 + NONCELEN, m, mlen);
+ pm = sm + 2 + NONCELEN;
+ sigbuf = pm + 1 + mlen;
+ sigbuflen = PQCLEAN_FALCON1024_CLEAN_CRYPTO_BYTES - NONCELEN - 3;
+ if (do_sign(sm + 2, sigbuf, &sigbuflen, pm, mlen, sk) < 0) {
+ return -1;
+ }
+ pm[mlen] = 0x20 + 10;
+ sigbuflen ++;
+ sm[0] = (uint8_t)(sigbuflen >> 8);
+ sm[1] = (uint8_t)sigbuflen;
+ *smlen = mlen + 2 + NONCELEN + sigbuflen;
+ return 0;
+}
+
+/* see api.h */
+int
+PQCLEAN_FALCON1024_CLEAN_crypto_sign_open(
+ uint8_t *m, size_t *mlen,
+ const uint8_t *sm, size_t smlen, const uint8_t *pk) {
+ const uint8_t *sigbuf;
+ size_t pmlen, sigbuflen;
+
+ if (smlen < 3 + NONCELEN) {
+ return -1;
+ }
+ sigbuflen = ((size_t)sm[0] << 8) | (size_t)sm[1];
+ if (sigbuflen < 2 || sigbuflen > (smlen - NONCELEN - 2)) {
+ return -1;
+ }
+ sigbuflen --;
+ pmlen = smlen - NONCELEN - 3 - sigbuflen;
+ if (sm[2 + NONCELEN + pmlen] != 0x20 + 10) {
+ return -1;
+ }
+ sigbuf = sm + 2 + NONCELEN + pmlen + 1;
+
+ /*
+ * The 2-byte length header and the one-byte signature header
+ * have been verified. Nonce is at sm+2, followed by the message
+ * itself. Message length is in pmlen. sigbuf/sigbuflen point to
+ * the signature value (excluding the header byte).
+ */
+ if (do_verify(sm + 2, sigbuf, sigbuflen,
+ sm + 2 + NONCELEN, pmlen, pk) < 0) {
+ return -1;
+ }
+
+ /*
+ * Signature is correct, we just have to copy/move the message
+ * to its final destination. The memmove() properly handles
+ * overlaps.
+ */
+ memmove(m, sm + 2 + NONCELEN, pmlen);
+ *mlen = pmlen;
+ return 0;
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/pqclean_falcon-1024_clean.gyp b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/pqclean_falcon-1024_clean.gyp
new file mode 100644
index 000000000..5e06930d4
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/pqclean_falcon-1024_clean.gyp
@@ -0,0 +1,48 @@
+# DO NOT EDIT: generated from subdir.gyp.template
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+{
+ 'includes': [
+ '../../../../../../coreconf/config.gypi'
+ ],
+ 'targets': [
+ {
+ 'target_name': 'oqs_src_sig_falcon_pqclean_falcon-1024_clean',
+ 'type': 'static_library',
+ 'sources': [
+ 'codec.c',
+ 'common.c',
+ 'fft.c',
+ 'fpr.c',
+ 'inner.c',
+ 'keygen.c',
+ 'pqclean.c',
+ 'rng.c',
+ 'sign.c',
+ 'vrfy.c',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/exports.gyp:nss_exports'
+ ]
+ }
+ ],
+ 'target_defaults': {
+ 'defines': [
+ ],
+ 'include_dirs': [
+ '<(DEPTH)/lib/liboqs/src/common/pqclean_shims',
+ '<(DEPTH)/lib/liboqs/src/common/sha3/xkcp_low/KeccakP-1600/plain-64bits',
+ ],
+ [ 'OS=="mac"', {
+ 'defines': [
+ 'OQS_HAVE_POSIX_MEMALIGN',
+ 'OQS_HAVE_ALIGNED_ALLOC',
+ 'OQS_HAVE_MEMALIGN'
+ ]
+ }]
+ },
+ 'variables': {
+ 'module': 'oqs'
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/rng.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/rng.c
new file mode 100644
index 000000000..f5739a8f4
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/rng.c
@@ -0,0 +1,201 @@
+#include "inner.h"
+#include <assert.h>
+/*
+ * PRNG and interface to the system RNG.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+
+/*
+ * Include relevant system header files. For Win32, this will also need
+ * linking with advapi32.dll, which we trigger with an appropriate #pragma.
+ */
+
+/* see inner.h */
+int
+PQCLEAN_FALCON1024_CLEAN_get_seed(void *seed, size_t len) {
+ (void)seed;
+ if (len == 0) {
+ return 1;
+ }
+ return 0;
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_prng_init(prng *p, inner_shake256_context *src) {
+ /*
+ * To ensure reproducibility for a given seed, we
+ * must enforce little-endian interpretation of
+ * the state words.
+ */
+ uint8_t tmp[56];
+ uint64_t th, tl;
+ int i;
+
+ inner_shake256_extract(src, tmp, 56);
+ for (i = 0; i < 14; i ++) {
+ uint32_t w;
+
+ w = (uint32_t)tmp[(i << 2) + 0]
+ | ((uint32_t)tmp[(i << 2) + 1] << 8)
+ | ((uint32_t)tmp[(i << 2) + 2] << 16)
+ | ((uint32_t)tmp[(i << 2) + 3] << 24);
+ *(uint32_t *)(p->state.d + (i << 2)) = w;
+ }
+ tl = *(uint32_t *)(p->state.d + 48);
+ th = *(uint32_t *)(p->state.d + 52);
+ *(uint64_t *)(p->state.d + 48) = tl + (th << 32);
+ PQCLEAN_FALCON1024_CLEAN_prng_refill(p);
+}
+
+/*
+ * PRNG based on ChaCha20.
+ *
+ * State consists in key (32 bytes) then IV (16 bytes) and block counter
+ * (8 bytes). Normally, we should not care about local endianness (this
+ * is for a PRNG), but for the NIST competition we need reproducible KAT
+ * vectors that work across architectures, so we enforce little-endian
+ * interpretation where applicable. Moreover, output words are "spread
+ * out" over the output buffer with the interleaving pattern that is
+ * naturally obtained from the AVX2 implementation that runs eight
+ * ChaCha20 instances in parallel.
+ *
+ * The block counter is XORed into the first 8 bytes of the IV.
+ */
+void
+PQCLEAN_FALCON1024_CLEAN_prng_refill(prng *p) {
+
+ static const uint32_t CW[] = {
+ 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574
+ };
+
+ uint64_t cc;
+ size_t u;
+
+ /*
+ * State uses local endianness. Only the output bytes must be
+ * converted to little endian (if used on a big-endian machine).
+ */
+ cc = *(uint64_t *)(p->state.d + 48);
+ for (u = 0; u < 8; u ++) {
+ uint32_t state[16];
+ size_t v;
+ int i;
+
+ memcpy(&state[0], CW, sizeof CW);
+ memcpy(&state[4], p->state.d, 48);
+ state[14] ^= (uint32_t)cc;
+ state[15] ^= (uint32_t)(cc >> 32);
+ for (i = 0; i < 10; i ++) {
+
+#define QROUND(a, b, c, d) do { \
+ state[a] += state[b]; \
+ state[d] ^= state[a]; \
+ state[d] = (state[d] << 16) | (state[d] >> 16); \
+ state[c] += state[d]; \
+ state[b] ^= state[c]; \
+ state[b] = (state[b] << 12) | (state[b] >> 20); \
+ state[a] += state[b]; \
+ state[d] ^= state[a]; \
+ state[d] = (state[d] << 8) | (state[d] >> 24); \
+ state[c] += state[d]; \
+ state[b] ^= state[c]; \
+ state[b] = (state[b] << 7) | (state[b] >> 25); \
+ } while (0)
+
+ QROUND( 0, 4, 8, 12);
+ QROUND( 1, 5, 9, 13);
+ QROUND( 2, 6, 10, 14);
+ QROUND( 3, 7, 11, 15);
+ QROUND( 0, 5, 10, 15);
+ QROUND( 1, 6, 11, 12);
+ QROUND( 2, 7, 8, 13);
+ QROUND( 3, 4, 9, 14);
+
+#undef QROUND
+
+ }
+
+ for (v = 0; v < 4; v ++) {
+ state[v] += CW[v];
+ }
+ for (v = 4; v < 14; v ++) {
+ state[v] += ((uint32_t *)p->state.d)[v - 4];
+ }
+ state[14] += ((uint32_t *)p->state.d)[10]
+ ^ (uint32_t)cc;
+ state[15] += ((uint32_t *)p->state.d)[11]
+ ^ (uint32_t)(cc >> 32);
+ cc ++;
+
+ /*
+ * We mimic the interleaving that is used in the AVX2
+ * implementation.
+ */
+ for (v = 0; v < 16; v ++) {
+ p->buf.d[(u << 2) + (v << 5) + 0] =
+ (uint8_t)state[v];
+ p->buf.d[(u << 2) + (v << 5) + 1] =
+ (uint8_t)(state[v] >> 8);
+ p->buf.d[(u << 2) + (v << 5) + 2] =
+ (uint8_t)(state[v] >> 16);
+ p->buf.d[(u << 2) + (v << 5) + 3] =
+ (uint8_t)(state[v] >> 24);
+ }
+ }
+ *(uint64_t *)(p->state.d + 48) = cc;
+
+
+ p->ptr = 0;
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_prng_get_bytes(prng *p, void *dst, size_t len) {
+ uint8_t *buf;
+
+ buf = dst;
+ while (len > 0) {
+ size_t clen;
+
+ clen = (sizeof p->buf.d) - p->ptr;
+ if (clen > len) {
+ clen = len;
+ }
+ memcpy(buf, p->buf.d, clen);
+ buf += clen;
+ len -= clen;
+ p->ptr += clen;
+ if (p->ptr == sizeof p->buf.d) {
+ PQCLEAN_FALCON1024_CLEAN_prng_refill(p);
+ }
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/sign.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/sign.c
new file mode 100644
index 000000000..0baa9148e
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/sign.c
@@ -0,0 +1,1254 @@
+#include "inner.h"
+
+/*
+ * Falcon signature generation.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* =================================================================== */
+
+/*
+ * Compute degree N from logarithm 'logn'.
+ */
+#define MKN(logn) ((size_t)1 << (logn))
+
+/* =================================================================== */
+/*
+ * Binary case:
+ * N = 2^logn
+ * phi = X^N+1
+ */
+
+/*
+ * Get the size of the LDL tree for an input with polynomials of size
+ * 2^logn. The size is expressed in the number of elements.
+ */
+static inline unsigned
+ffLDL_treesize(unsigned logn) {
+ /*
+ * For logn = 0 (polynomials are constant), the "tree" is a
+ * single element. Otherwise, the tree node has size 2^logn, and
+ * has two child trees for size logn-1 each. Thus, treesize s()
+ * must fulfill these two relations:
+ *
+ * s(0) = 1
+ * s(logn) = (2^logn) + 2*s(logn-1)
+ */
+ return (logn + 1) << logn;
+}
+
+/*
+ * Inner function for ffLDL_fft(). It expects the matrix to be both
+ * auto-adjoint and quasicyclic; also, it uses the source operands
+ * as modifiable temporaries.
+ *
+ * tmp[] must have room for at least one polynomial.
+ */
+static void
+ffLDL_fft_inner(fpr *tree,
+ fpr *g0, fpr *g1, unsigned logn, fpr *tmp) {
+ size_t n, hn;
+
+ n = MKN(logn);
+ if (n == 1) {
+ tree[0] = g0[0];
+ return;
+ }
+ hn = n >> 1;
+
+ /*
+ * The LDL decomposition yields L (which is written in the tree)
+ * and the diagonal of D. Since d00 = g0, we just write d11
+ * into tmp.
+ */
+ PQCLEAN_FALCON1024_CLEAN_poly_LDLmv_fft(tmp, tree, g0, g1, g0, logn);
+
+ /*
+ * Split d00 (currently in g0) and d11 (currently in tmp). We
+ * reuse g0 and g1 as temporary storage spaces:
+ * d00 splits into g1, g1+hn
+ * d11 splits into g0, g0+hn
+ */
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(g1, g1 + hn, g0, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(g0, g0 + hn, tmp, logn);
+
+ /*
+ * Each split result is the first row of a new auto-adjoint
+ * quasicyclic matrix for the next recursive step.
+ */
+ ffLDL_fft_inner(tree + n,
+ g1, g1 + hn, logn - 1, tmp);
+ ffLDL_fft_inner(tree + n + ffLDL_treesize(logn - 1),
+ g0, g0 + hn, logn - 1, tmp);
+}
+
+/*
+ * Compute the ffLDL tree of an auto-adjoint matrix G. The matrix
+ * is provided as three polynomials (FFT representation).
+ *
+ * The "tree" array is filled with the computed tree, of size
+ * (logn+1)*(2^logn) elements (see ffLDL_treesize()).
+ *
+ * Input arrays MUST NOT overlap, except possibly the three unmodified
+ * arrays g00, g01 and g11. tmp[] should have room for at least three
+ * polynomials of 2^logn elements each.
+ */
+static void
+ffLDL_fft(fpr *tree, const fpr *g00,
+ const fpr *g01, const fpr *g11,
+ unsigned logn, fpr *tmp) {
+ size_t n, hn;
+ fpr *d00, *d11;
+
+ n = MKN(logn);
+ if (n == 1) {
+ tree[0] = g00[0];
+ return;
+ }
+ hn = n >> 1;
+ d00 = tmp;
+ d11 = tmp + n;
+ tmp += n << 1;
+
+ memcpy(d00, g00, n * sizeof * g00);
+ PQCLEAN_FALCON1024_CLEAN_poly_LDLmv_fft(d11, tree, g00, g01, g11, logn);
+
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(tmp, tmp + hn, d00, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(d00, d00 + hn, d11, logn);
+ memcpy(d11, tmp, n * sizeof * tmp);
+ ffLDL_fft_inner(tree + n,
+ d11, d11 + hn, logn - 1, tmp);
+ ffLDL_fft_inner(tree + n + ffLDL_treesize(logn - 1),
+ d00, d00 + hn, logn - 1, tmp);
+}
+
+/*
+ * Normalize an ffLDL tree: each leaf of value x is replaced with
+ * sigma / sqrt(x).
+ */
+static void
+ffLDL_binary_normalize(fpr *tree, unsigned logn) {
+ /*
+ * TODO: make an iterative version.
+ */
+ size_t n;
+
+ n = MKN(logn);
+ if (n == 1) {
+ /*
+ * We actually store in the tree leaf the inverse of
+ * the value mandated by the specification: this
+ * saves a division both here and in the sampler.
+ */
+ tree[0] = fpr_mul(fpr_sqrt(tree[0]), fpr_inv_sigma);
+ } else {
+ ffLDL_binary_normalize(tree + n, logn - 1);
+ ffLDL_binary_normalize(tree + n + ffLDL_treesize(logn - 1),
+ logn - 1);
+ }
+}
+
+/* =================================================================== */
+
+/*
+ * Convert an integer polynomial (with small values) into the
+ * representation with complex numbers.
+ */
+static void
+smallints_to_fpr(fpr *r, const int8_t *t, unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ for (u = 0; u < n; u ++) {
+ r[u] = fpr_of(t[u]);
+ }
+}
+
+/*
+ * The expanded private key contains:
+ * - The B0 matrix (four elements)
+ * - The ffLDL tree
+ */
+
+static inline size_t
+skoff_b00(unsigned logn) {
+ (void)logn;
+ return 0;
+}
+
+static inline size_t
+skoff_b01(unsigned logn) {
+ return MKN(logn);
+}
+
+static inline size_t
+skoff_b10(unsigned logn) {
+ return 2 * MKN(logn);
+}
+
+static inline size_t
+skoff_b11(unsigned logn) {
+ return 3 * MKN(logn);
+}
+
+static inline size_t
+skoff_tree(unsigned logn) {
+ return 4 * MKN(logn);
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_expand_privkey(fpr *expanded_key,
+ const int8_t *f, const int8_t *g,
+ const int8_t *F, const int8_t *G,
+ unsigned logn, uint8_t *tmp) {
+ size_t n;
+ fpr *rf, *rg, *rF, *rG;
+ fpr *b00, *b01, *b10, *b11;
+ fpr *g00, *g01, *g11, *gxx;
+ fpr *tree;
+
+ n = MKN(logn);
+ b00 = expanded_key + skoff_b00(logn);
+ b01 = expanded_key + skoff_b01(logn);
+ b10 = expanded_key + skoff_b10(logn);
+ b11 = expanded_key + skoff_b11(logn);
+ tree = expanded_key + skoff_tree(logn);
+
+ /*
+ * We load the private key elements directly into the B0 matrix,
+ * since B0 = [[g, -f], [G, -F]].
+ */
+ rf = b01;
+ rg = b00;
+ rF = b11;
+ rG = b10;
+
+ smallints_to_fpr(rf, f, logn);
+ smallints_to_fpr(rg, g, logn);
+ smallints_to_fpr(rF, F, logn);
+ smallints_to_fpr(rG, G, logn);
+
+ /*
+ * Compute the FFT for the key elements, and negate f and F.
+ */
+ PQCLEAN_FALCON1024_CLEAN_FFT(rf, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rg, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rF, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(rG, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_neg(rf, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_neg(rF, logn);
+
+ /*
+ * The Gram matrix is G = B x B*. Formulas are:
+ * g00 = b00*adj(b00) + b01*adj(b01)
+ * g01 = b00*adj(b10) + b01*adj(b11)
+ * g10 = b10*adj(b00) + b11*adj(b01)
+ * g11 = b10*adj(b10) + b11*adj(b11)
+ *
+ * For historical reasons, this implementation uses
+ * g00, g01 and g11 (upper triangle).
+ */
+ g00 = (fpr *)tmp;
+ g01 = g00 + n;
+ g11 = g01 + n;
+ gxx = g11 + n;
+
+ memcpy(g00, b00, n * sizeof * b00);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(g00, logn);
+ memcpy(gxx, b01, n * sizeof * b01);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(gxx, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(g00, gxx, logn);
+
+ memcpy(g01, b00, n * sizeof * b00);
+ PQCLEAN_FALCON1024_CLEAN_poly_muladj_fft(g01, b10, logn);
+ memcpy(gxx, b01, n * sizeof * b01);
+ PQCLEAN_FALCON1024_CLEAN_poly_muladj_fft(gxx, b11, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(g01, gxx, logn);
+
+ memcpy(g11, b10, n * sizeof * b10);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(g11, logn);
+ memcpy(gxx, b11, n * sizeof * b11);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(gxx, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(g11, gxx, logn);
+
+ /*
+ * Compute the Falcon tree.
+ */
+ ffLDL_fft(tree, g00, g01, g11, logn, gxx);
+
+ /*
+ * Normalize tree.
+ */
+ ffLDL_binary_normalize(tree, logn);
+}
+
+typedef int (*samplerZ)(void *ctx, fpr mu, fpr sigma);
+
+/*
+ * Perform Fast Fourier Sampling for target vector t. The Gram matrix
+ * is provided (G = [[g00, g01], [adj(g01), g11]]). The sampled vector
+ * is written over (t0,t1). The Gram matrix is modified as well. The
+ * tmp[] buffer must have room for four polynomials.
+ */
+static void
+ffSampling_fft_dyntree(samplerZ samp, void *samp_ctx,
+ fpr *t0, fpr *t1,
+ fpr *g00, fpr *g01, fpr *g11,
+ unsigned logn, fpr *tmp) {
+ size_t n, hn;
+ fpr *z0, *z1;
+
+ /*
+ * Deepest level: the LDL tree leaf value is just g00 (the
+ * array has length only 1 at this point); we normalize it
+ * with regards to sigma, then use it for sampling.
+ */
+ if (logn == 0) {
+ fpr leaf;
+
+ leaf = g00[0];
+ leaf = fpr_mul(fpr_sqrt(leaf), fpr_inv_sigma);
+ t0[0] = fpr_of(samp(samp_ctx, t0[0], leaf));
+ t1[0] = fpr_of(samp(samp_ctx, t1[0], leaf));
+ return;
+ }
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+
+ /*
+ * Decompose G into LDL. We only need d00 (identical to g00),
+ * d11, and l10; we do that in place.
+ */
+ PQCLEAN_FALCON1024_CLEAN_poly_LDL_fft(g00, g01, g11, logn);
+
+ /*
+ * Split d00 and d11 and expand them into half-size quasi-cyclic
+ * Gram matrices. We also save l10 in tmp[].
+ */
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(tmp, tmp + hn, g00, logn);
+ memcpy(g00, tmp, n * sizeof * tmp);
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(tmp, tmp + hn, g11, logn);
+ memcpy(g11, tmp, n * sizeof * tmp);
+ memcpy(tmp, g01, n * sizeof * g01);
+ memcpy(g01, g00, hn * sizeof * g00);
+ memcpy(g01 + hn, g11, hn * sizeof * g00);
+
+ /*
+ * The half-size Gram matrices for the recursive LDL tree
+ * building are now:
+ * - left sub-tree: g00, g00+hn, g01
+ * - right sub-tree: g11, g11+hn, g01+hn
+ * l10 is in tmp[].
+ */
+
+ /*
+ * We split t1 and use the first recursive call on the two
+ * halves, using the right sub-tree. The result is merged
+ * back into tmp + 2*n.
+ */
+ z1 = tmp + n;
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(z1, z1 + hn, t1, logn);
+ ffSampling_fft_dyntree(samp, samp_ctx, z1, z1 + hn,
+ g11, g11 + hn, g01 + hn, logn - 1, z1 + n);
+ PQCLEAN_FALCON1024_CLEAN_poly_merge_fft(tmp + (n << 1), z1, z1 + hn, logn);
+
+ /*
+ * Compute tb0 = t0 + (t1 - z1) * l10.
+ * At that point, l10 is in tmp, t1 is unmodified, and z1 is
+ * in tmp + (n << 1). The buffer in z1 is free.
+ *
+ * In the end, z1 is written over t1, and tb0 is in t0.
+ */
+ memcpy(z1, t1, n * sizeof * t1);
+ PQCLEAN_FALCON1024_CLEAN_poly_sub(z1, tmp + (n << 1), logn);
+ memcpy(t1, tmp + (n << 1), n * sizeof * tmp);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(tmp, z1, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(t0, tmp, logn);
+
+ /*
+ * Second recursive invocation, on the split tb0 (currently in t0)
+ * and the left sub-tree.
+ */
+ z0 = tmp;
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(z0, z0 + hn, t0, logn);
+ ffSampling_fft_dyntree(samp, samp_ctx, z0, z0 + hn,
+ g00, g00 + hn, g01, logn - 1, z0 + n);
+ PQCLEAN_FALCON1024_CLEAN_poly_merge_fft(t0, z0, z0 + hn, logn);
+}
+
+/*
+ * Perform Fast Fourier Sampling for target vector t and LDL tree T.
+ * tmp[] must have size for at least two polynomials of size 2^logn.
+ */
+static void
+ffSampling_fft(samplerZ samp, void *samp_ctx,
+ fpr *z0, fpr *z1,
+ const fpr *tree,
+ const fpr *t0, const fpr *t1, unsigned logn,
+ fpr *tmp) {
+ size_t n, hn;
+ const fpr *tree0, *tree1;
+
+ /*
+ * When logn == 2, we inline the last two recursion levels.
+ */
+ if (logn == 2) {
+ fpr x0, x1, y0, y1, w0, w1, w2, w3, sigma;
+ fpr a_re, a_im, b_re, b_im, c_re, c_im;
+
+ tree0 = tree + 4;
+ tree1 = tree + 8;
+
+ /*
+ * We split t1 into w*, then do the recursive invocation,
+ * with output in w*. We finally merge back into z1.
+ */
+ a_re = t1[0];
+ a_im = t1[2];
+ b_re = t1[1];
+ b_im = t1[3];
+ c_re = fpr_add(a_re, b_re);
+ c_im = fpr_add(a_im, b_im);
+ w0 = fpr_half(c_re);
+ w1 = fpr_half(c_im);
+ c_re = fpr_sub(a_re, b_re);
+ c_im = fpr_sub(a_im, b_im);
+ w2 = fpr_mul(fpr_add(c_re, c_im), fpr_invsqrt8);
+ w3 = fpr_mul(fpr_sub(c_im, c_re), fpr_invsqrt8);
+
+ x0 = w2;
+ x1 = w3;
+ sigma = tree1[3];
+ w2 = fpr_of(samp(samp_ctx, x0, sigma));
+ w3 = fpr_of(samp(samp_ctx, x1, sigma));
+ a_re = fpr_sub(x0, w2);
+ a_im = fpr_sub(x1, w3);
+ b_re = tree1[0];
+ b_im = tree1[1];
+ c_re = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ c_im = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+ x0 = fpr_add(c_re, w0);
+ x1 = fpr_add(c_im, w1);
+ sigma = tree1[2];
+ w0 = fpr_of(samp(samp_ctx, x0, sigma));
+ w1 = fpr_of(samp(samp_ctx, x1, sigma));
+
+ a_re = w0;
+ a_im = w1;
+ b_re = w2;
+ b_im = w3;
+ c_re = fpr_mul(fpr_sub(b_re, b_im), fpr_invsqrt2);
+ c_im = fpr_mul(fpr_add(b_re, b_im), fpr_invsqrt2);
+ z1[0] = w0 = fpr_add(a_re, c_re);
+ z1[2] = w2 = fpr_add(a_im, c_im);
+ z1[1] = w1 = fpr_sub(a_re, c_re);
+ z1[3] = w3 = fpr_sub(a_im, c_im);
+
+ /*
+ * Compute tb0 = t0 + (t1 - z1) * L. Value tb0 ends up in w*.
+ */
+ w0 = fpr_sub(t1[0], w0);
+ w1 = fpr_sub(t1[1], w1);
+ w2 = fpr_sub(t1[2], w2);
+ w3 = fpr_sub(t1[3], w3);
+
+ a_re = w0;
+ a_im = w2;
+ b_re = tree[0];
+ b_im = tree[2];
+ w0 = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ w2 = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+ a_re = w1;
+ a_im = w3;
+ b_re = tree[1];
+ b_im = tree[3];
+ w1 = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ w3 = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+
+ w0 = fpr_add(w0, t0[0]);
+ w1 = fpr_add(w1, t0[1]);
+ w2 = fpr_add(w2, t0[2]);
+ w3 = fpr_add(w3, t0[3]);
+
+ /*
+ * Second recursive invocation.
+ */
+ a_re = w0;
+ a_im = w2;
+ b_re = w1;
+ b_im = w3;
+ c_re = fpr_add(a_re, b_re);
+ c_im = fpr_add(a_im, b_im);
+ w0 = fpr_half(c_re);
+ w1 = fpr_half(c_im);
+ c_re = fpr_sub(a_re, b_re);
+ c_im = fpr_sub(a_im, b_im);
+ w2 = fpr_mul(fpr_add(c_re, c_im), fpr_invsqrt8);
+ w3 = fpr_mul(fpr_sub(c_im, c_re), fpr_invsqrt8);
+
+ x0 = w2;
+ x1 = w3;
+ sigma = tree0[3];
+ w2 = y0 = fpr_of(samp(samp_ctx, x0, sigma));
+ w3 = y1 = fpr_of(samp(samp_ctx, x1, sigma));
+ a_re = fpr_sub(x0, y0);
+ a_im = fpr_sub(x1, y1);
+ b_re = tree0[0];
+ b_im = tree0[1];
+ c_re = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ c_im = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+ x0 = fpr_add(c_re, w0);
+ x1 = fpr_add(c_im, w1);
+ sigma = tree0[2];
+ w0 = fpr_of(samp(samp_ctx, x0, sigma));
+ w1 = fpr_of(samp(samp_ctx, x1, sigma));
+
+ a_re = w0;
+ a_im = w1;
+ b_re = w2;
+ b_im = w3;
+ c_re = fpr_mul(fpr_sub(b_re, b_im), fpr_invsqrt2);
+ c_im = fpr_mul(fpr_add(b_re, b_im), fpr_invsqrt2);
+ z0[0] = fpr_add(a_re, c_re);
+ z0[2] = fpr_add(a_im, c_im);
+ z0[1] = fpr_sub(a_re, c_re);
+ z0[3] = fpr_sub(a_im, c_im);
+
+ return;
+ }
+
+ /*
+ * Case logn == 1 is reachable only when using Falcon-2 (the
+ * smallest size for which Falcon is mathematically defined, but
+ * of course way too insecure to be of any use).
+ */
+ if (logn == 1) {
+ fpr x0, x1, y0, y1, sigma;
+ fpr a_re, a_im, b_re, b_im, c_re, c_im;
+
+ x0 = t1[0];
+ x1 = t1[1];
+ sigma = tree[3];
+ z1[0] = y0 = fpr_of(samp(samp_ctx, x0, sigma));
+ z1[1] = y1 = fpr_of(samp(samp_ctx, x1, sigma));
+ a_re = fpr_sub(x0, y0);
+ a_im = fpr_sub(x1, y1);
+ b_re = tree[0];
+ b_im = tree[1];
+ c_re = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ c_im = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+ x0 = fpr_add(c_re, t0[0]);
+ x1 = fpr_add(c_im, t0[1]);
+ sigma = tree[2];
+ z0[0] = fpr_of(samp(samp_ctx, x0, sigma));
+ z0[1] = fpr_of(samp(samp_ctx, x1, sigma));
+
+ return;
+ }
+
+ /*
+ * Normal end of recursion is for logn == 0. Since the last
+ * steps of the recursions were inlined in the blocks above
+ * (when logn == 1 or 2), this case is not reachable, and is
+ * retained here only for documentation purposes.
+
+ if (logn == 0) {
+ fpr x0, x1, sigma;
+
+ x0 = t0[0];
+ x1 = t1[0];
+ sigma = tree[0];
+ z0[0] = fpr_of(samp(samp_ctx, x0, sigma));
+ z1[0] = fpr_of(samp(samp_ctx, x1, sigma));
+ return;
+ }
+
+ */
+
+ /*
+ * General recursive case (logn >= 3).
+ */
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ tree0 = tree + n;
+ tree1 = tree + n + ffLDL_treesize(logn - 1);
+
+ /*
+ * We split t1 into z1 (reused as temporary storage), then do
+ * the recursive invocation, with output in tmp. We finally
+ * merge back into z1.
+ */
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(z1, z1 + hn, t1, logn);
+ ffSampling_fft(samp, samp_ctx, tmp, tmp + hn,
+ tree1, z1, z1 + hn, logn - 1, tmp + n);
+ PQCLEAN_FALCON1024_CLEAN_poly_merge_fft(z1, tmp, tmp + hn, logn);
+
+ /*
+ * Compute tb0 = t0 + (t1 - z1) * L. Value tb0 ends up in tmp[].
+ */
+ memcpy(tmp, t1, n * sizeof * t1);
+ PQCLEAN_FALCON1024_CLEAN_poly_sub(tmp, z1, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(tmp, tree, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(tmp, t0, logn);
+
+ /*
+ * Second recursive invocation.
+ */
+ PQCLEAN_FALCON1024_CLEAN_poly_split_fft(z0, z0 + hn, tmp, logn);
+ ffSampling_fft(samp, samp_ctx, tmp, tmp + hn,
+ tree0, z0, z0 + hn, logn - 1, tmp + n);
+ PQCLEAN_FALCON1024_CLEAN_poly_merge_fft(z0, tmp, tmp + hn, logn);
+}
+
+/*
+ * Compute a signature: the signature contains two vectors, s1 and s2.
+ * The s1 vector is not returned. The squared norm of (s1,s2) is
+ * computed, and if it is short enough, then s2 is returned into the
+ * s2[] buffer, and 1 is returned; otherwise, s2[] is untouched and 0 is
+ * returned; the caller should then try again. This function uses an
+ * expanded key.
+ *
+ * tmp[] must have room for at least six polynomials.
+ */
+static int
+do_sign_tree(samplerZ samp, void *samp_ctx, int16_t *s2,
+ const fpr *expanded_key,
+ const uint16_t *hm,
+ unsigned logn, fpr *tmp) {
+ size_t n, u;
+ fpr *t0, *t1, *tx, *ty;
+ const fpr *b00, *b01, *b10, *b11, *tree;
+ fpr ni;
+ uint32_t sqn, ng;
+ int16_t *s1tmp, *s2tmp;
+
+ n = MKN(logn);
+ t0 = tmp;
+ t1 = t0 + n;
+ b00 = expanded_key + skoff_b00(logn);
+ b01 = expanded_key + skoff_b01(logn);
+ b10 = expanded_key + skoff_b10(logn);
+ b11 = expanded_key + skoff_b11(logn);
+ tree = expanded_key + skoff_tree(logn);
+
+ /*
+ * Set the target vector to [hm, 0] (hm is the hashed message).
+ */
+ for (u = 0; u < n; u ++) {
+ t0[u] = fpr_of(hm[u]);
+ /* This is implicit.
+ t1[u] = fpr_zero;
+ */
+ }
+
+ /*
+ * Apply the lattice basis to obtain the real target
+ * vector (after normalization with regards to modulus).
+ */
+ PQCLEAN_FALCON1024_CLEAN_FFT(t0, logn);
+ ni = fpr_inverse_of_q;
+ memcpy(t1, t0, n * sizeof * t0);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(t1, b01, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulconst(t1, fpr_neg(ni), logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(t0, b11, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulconst(t0, ni, logn);
+
+ tx = t1 + n;
+ ty = tx + n;
+
+ /*
+ * Apply sampling. Output is written back in [tx, ty].
+ */
+ ffSampling_fft(samp, samp_ctx, tx, ty, tree, t0, t1, logn, ty + n);
+
+ /*
+ * Get the lattice point corresponding to that tiny vector.
+ */
+ memcpy(t0, tx, n * sizeof * tx);
+ memcpy(t1, ty, n * sizeof * ty);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(tx, b00, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(ty, b10, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(tx, ty, logn);
+ memcpy(ty, t0, n * sizeof * t0);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(ty, b01, logn);
+
+ memcpy(t0, tx, n * sizeof * tx);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(t1, b11, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(t1, ty, logn);
+
+ PQCLEAN_FALCON1024_CLEAN_iFFT(t0, logn);
+ PQCLEAN_FALCON1024_CLEAN_iFFT(t1, logn);
+
+ /*
+ * Compute the signature.
+ */
+ s1tmp = (int16_t *)tx;
+ sqn = 0;
+ ng = 0;
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = (int32_t)hm[u] - (int32_t)fpr_rint(t0[u]);
+ sqn += (uint32_t)(z * z);
+ ng |= sqn;
+ s1tmp[u] = (int16_t)z;
+ }
+ sqn |= -(ng >> 31);
+
+ /*
+ * With "normal" degrees (e.g. 512 or 1024), it is very
+ * improbable that the computed vector is not short enough;
+ * however, it may happen in practice for the very reduced
+ * versions (e.g. degree 16 or below). In that case, the caller
+ * will loop, and we must not write anything into s2[] because
+ * s2[] may overlap with the hashed message hm[] and we need
+ * hm[] for the next iteration.
+ */
+ s2tmp = (int16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ s2tmp[u] = (int16_t) - fpr_rint(t1[u]);
+ }
+ if (PQCLEAN_FALCON1024_CLEAN_is_short_half(sqn, s2tmp, logn)) {
+ memcpy(s2, s2tmp, n * sizeof * s2);
+ memcpy(tmp, s1tmp, n * sizeof * s1tmp);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Compute a signature: the signature contains two vectors, s1 and s2.
+ * The s1 vector is not returned. The squared norm of (s1,s2) is
+ * computed, and if it is short enough, then s2 is returned into the
+ * s2[] buffer, and 1 is returned; otherwise, s2[] is untouched and 0 is
+ * returned; the caller should then try again.
+ *
+ * tmp[] must have room for at least nine polynomials.
+ */
+static int
+do_sign_dyn(samplerZ samp, void *samp_ctx, int16_t *s2,
+ const int8_t *f, const int8_t *g,
+ const int8_t *F, const int8_t *G,
+ const uint16_t *hm, unsigned logn, fpr *tmp) {
+ size_t n, u;
+ fpr *t0, *t1, *tx, *ty;
+ fpr *b00, *b01, *b10, *b11, *g00, *g01, *g11;
+ fpr ni;
+ uint32_t sqn, ng;
+ int16_t *s1tmp, *s2tmp;
+
+ n = MKN(logn);
+
+ /*
+ * Lattice basis is B = [[g, -f], [G, -F]]. We convert it to FFT.
+ */
+ b00 = tmp;
+ b01 = b00 + n;
+ b10 = b01 + n;
+ b11 = b10 + n;
+ smallints_to_fpr(b01, f, logn);
+ smallints_to_fpr(b00, g, logn);
+ smallints_to_fpr(b11, F, logn);
+ smallints_to_fpr(b10, G, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(b01, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(b00, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(b11, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(b10, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_neg(b01, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_neg(b11, logn);
+
+ /*
+ * Compute the Gram matrix G = B x B*. Formulas are:
+ * g00 = b00*adj(b00) + b01*adj(b01)
+ * g01 = b00*adj(b10) + b01*adj(b11)
+ * g10 = b10*adj(b00) + b11*adj(b01)
+ * g11 = b10*adj(b10) + b11*adj(b11)
+ *
+ * For historical reasons, this implementation uses
+ * g00, g01 and g11 (upper triangle). g10 is not kept
+ * since it is equal to adj(g01).
+ *
+ * We _replace_ the matrix B with the Gram matrix, but we
+ * must keep b01 and b11 for computing the target vector.
+ */
+ t0 = b11 + n;
+ t1 = t0 + n;
+
+ memcpy(t0, b01, n * sizeof * b01);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(t0, logn); // t0 <- b01*adj(b01)
+
+ memcpy(t1, b00, n * sizeof * b00);
+ PQCLEAN_FALCON1024_CLEAN_poly_muladj_fft(t1, b10, logn); // t1 <- b00*adj(b10)
+ PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(b00, logn); // b00 <- b00*adj(b00)
+ PQCLEAN_FALCON1024_CLEAN_poly_add(b00, t0, logn); // b00 <- g00
+ memcpy(t0, b01, n * sizeof * b01);
+ PQCLEAN_FALCON1024_CLEAN_poly_muladj_fft(b01, b11, logn); // b01 <- b01*adj(b11)
+ PQCLEAN_FALCON1024_CLEAN_poly_add(b01, t1, logn); // b01 <- g01
+
+ PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(b10, logn); // b10 <- b10*adj(b10)
+ memcpy(t1, b11, n * sizeof * b11);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulselfadj_fft(t1, logn); // t1 <- b11*adj(b11)
+ PQCLEAN_FALCON1024_CLEAN_poly_add(b10, t1, logn); // b10 <- g11
+
+ /*
+ * We rename variables to make things clearer. The three elements
+ * of the Gram matrix uses the first 3*n slots of tmp[], followed
+ * by b11 and b01 (in that order).
+ */
+ g00 = b00;
+ g01 = b01;
+ g11 = b10;
+ b01 = t0;
+ t0 = b01 + n;
+ t1 = t0 + n;
+
+ /*
+ * Memory layout at that point:
+ * g00 g01 g11 b11 b01 t0 t1
+ */
+
+ /*
+ * Set the target vector to [hm, 0] (hm is the hashed message).
+ */
+ for (u = 0; u < n; u ++) {
+ t0[u] = fpr_of(hm[u]);
+ /* This is implicit.
+ t1[u] = fpr_zero;
+ */
+ }
+
+ /*
+ * Apply the lattice basis to obtain the real target
+ * vector (after normalization with regards to modulus).
+ */
+ PQCLEAN_FALCON1024_CLEAN_FFT(t0, logn);
+ ni = fpr_inverse_of_q;
+ memcpy(t1, t0, n * sizeof * t0);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(t1, b01, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulconst(t1, fpr_neg(ni), logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(t0, b11, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mulconst(t0, ni, logn);
+
+ /*
+ * b01 and b11 can be discarded, so we move back (t0,t1).
+ * Memory layout is now:
+ * g00 g01 g11 t0 t1
+ */
+ memcpy(b11, t0, n * 2 * sizeof * t0);
+ t0 = g11 + n;
+ t1 = t0 + n;
+
+ /*
+ * Apply sampling; result is written over (t0,t1).
+ */
+ ffSampling_fft_dyntree(samp, samp_ctx,
+ t0, t1, g00, g01, g11, logn, t1 + n);
+
+ /*
+ * We arrange the layout back to:
+ * b00 b01 b10 b11 t0 t1
+ *
+ * We did not conserve the matrix basis, so we must recompute
+ * it now.
+ */
+ b00 = tmp;
+ b01 = b00 + n;
+ b10 = b01 + n;
+ b11 = b10 + n;
+ memmove(b11 + n, t0, n * 2 * sizeof * t0);
+ t0 = b11 + n;
+ t1 = t0 + n;
+ smallints_to_fpr(b01, f, logn);
+ smallints_to_fpr(b00, g, logn);
+ smallints_to_fpr(b11, F, logn);
+ smallints_to_fpr(b10, G, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(b01, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(b00, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(b11, logn);
+ PQCLEAN_FALCON1024_CLEAN_FFT(b10, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_neg(b01, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_neg(b11, logn);
+ tx = t1 + n;
+ ty = tx + n;
+
+ /*
+ * Get the lattice point corresponding to that tiny vector.
+ */
+ memcpy(tx, t0, n * sizeof * t0);
+ memcpy(ty, t1, n * sizeof * t1);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(tx, b00, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(ty, b10, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(tx, ty, logn);
+ memcpy(ty, t0, n * sizeof * t0);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(ty, b01, logn);
+
+ memcpy(t0, tx, n * sizeof * tx);
+ PQCLEAN_FALCON1024_CLEAN_poly_mul_fft(t1, b11, logn);
+ PQCLEAN_FALCON1024_CLEAN_poly_add(t1, ty, logn);
+ PQCLEAN_FALCON1024_CLEAN_iFFT(t0, logn);
+ PQCLEAN_FALCON1024_CLEAN_iFFT(t1, logn);
+
+ s1tmp = (int16_t *)tx;
+ sqn = 0;
+ ng = 0;
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = (int32_t)hm[u] - (int32_t)fpr_rint(t0[u]);
+ sqn += (uint32_t)(z * z);
+ ng |= sqn;
+ s1tmp[u] = (int16_t)z;
+ }
+ sqn |= -(ng >> 31);
+
+ /*
+ * With "normal" degrees (e.g. 512 or 1024), it is very
+ * improbable that the computed vector is not short enough;
+ * however, it may happen in practice for the very reduced
+ * versions (e.g. degree 16 or below). In that case, the caller
+ * will loop, and we must not write anything into s2[] because
+ * s2[] may overlap with the hashed message hm[] and we need
+ * hm[] for the next iteration.
+ */
+ s2tmp = (int16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ s2tmp[u] = (int16_t) - fpr_rint(t1[u]);
+ }
+ if (PQCLEAN_FALCON1024_CLEAN_is_short_half(sqn, s2tmp, logn)) {
+ memcpy(s2, s2tmp, n * sizeof * s2);
+ memcpy(tmp, s1tmp, n * sizeof * s1tmp);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Sample an integer value along a half-gaussian distribution centered
+ * on zero and standard deviation 1.8205, with a precision of 72 bits.
+ */
+int
+PQCLEAN_FALCON1024_CLEAN_gaussian0_sampler(prng *p) {
+
+ static const uint32_t dist[] = {
+ 10745844u, 3068844u, 3741698u,
+ 5559083u, 1580863u, 8248194u,
+ 2260429u, 13669192u, 2736639u,
+ 708981u, 4421575u, 10046180u,
+ 169348u, 7122675u, 4136815u,
+ 30538u, 13063405u, 7650655u,
+ 4132u, 14505003u, 7826148u,
+ 417u, 16768101u, 11363290u,
+ 31u, 8444042u, 8086568u,
+ 1u, 12844466u, 265321u,
+ 0u, 1232676u, 13644283u,
+ 0u, 38047u, 9111839u,
+ 0u, 870u, 6138264u,
+ 0u, 14u, 12545723u,
+ 0u, 0u, 3104126u,
+ 0u, 0u, 28824u,
+ 0u, 0u, 198u,
+ 0u, 0u, 1u
+ };
+
+ uint32_t v0, v1, v2, hi;
+ uint64_t lo;
+ size_t u;
+ int z;
+
+ /*
+ * Get a random 72-bit value, into three 24-bit limbs v0..v2.
+ */
+ lo = prng_get_u64(p);
+ hi = prng_get_u8(p);
+ v0 = (uint32_t)lo & 0xFFFFFF;
+ v1 = (uint32_t)(lo >> 24) & 0xFFFFFF;
+ v2 = (uint32_t)(lo >> 48) | (hi << 16);
+
+ /*
+ * Sampled value is z, such that v0..v2 is lower than the first
+ * z elements of the table.
+ */
+ z = 0;
+ for (u = 0; u < (sizeof dist) / sizeof(dist[0]); u += 3) {
+ uint32_t w0, w1, w2, cc;
+
+ w0 = dist[u + 2];
+ w1 = dist[u + 1];
+ w2 = dist[u + 0];
+ cc = (v0 - w0) >> 31;
+ cc = (v1 - w1 - cc) >> 31;
+ cc = (v2 - w2 - cc) >> 31;
+ z += (int)cc;
+ }
+ return z;
+
+}
+
+/*
+ * Sample a bit with probability exp(-x) for some x >= 0.
+ */
+static int
+BerExp(prng *p, fpr x, fpr ccs) {
+ int s, i;
+ fpr r;
+ uint32_t sw, w;
+ uint64_t z;
+
+ /*
+ * Reduce x modulo log(2): x = s*log(2) + r, with s an integer,
+ * and 0 <= r < log(2). Since x >= 0, we can use fpr_trunc().
+ */
+ s = (int)fpr_trunc(fpr_mul(x, fpr_inv_log2));
+ r = fpr_sub(x, fpr_mul(fpr_of(s), fpr_log2));
+
+ /*
+ * It may happen (quite rarely) that s >= 64; if sigma = 1.2
+ * (the minimum value for sigma), r = 0 and b = 1, then we get
+ * s >= 64 if the half-Gaussian produced a z >= 13, which happens
+ * with probability about 0.000000000230383991, which is
+ * approximatively equal to 2^(-32). In any case, if s >= 64,
+ * then BerExp will be non-zero with probability less than
+ * 2^(-64), so we can simply saturate s at 63.
+ */
+ sw = (uint32_t)s;
+ sw ^= (sw ^ 63) & -((63 - sw) >> 31);
+ s = (int)sw;
+
+ /*
+ * Compute exp(-r); we know that 0 <= r < log(2) at this point, so
+ * we can use fpr_expm_p63(), which yields a result scaled to 2^63.
+ * We scale it up to 2^64, then right-shift it by s bits because
+ * we really want exp(-x) = 2^(-s)*exp(-r).
+ *
+ * The "-1" operation makes sure that the value fits on 64 bits
+ * (i.e. if r = 0, we may get 2^64, and we prefer 2^64-1 in that
+ * case). The bias is negligible since fpr_expm_p63() only computes
+ * with 51 bits of precision or so.
+ */
+ z = ((fpr_expm_p63(r, ccs) << 1) - 1) >> s;
+
+ /*
+ * Sample a bit with probability exp(-x). Since x = s*log(2) + r,
+ * exp(-x) = 2^-s * exp(-r), we compare lazily exp(-x) with the
+ * PRNG output to limit its consumption, the sign of the difference
+ * yields the expected result.
+ */
+ i = 64;
+ do {
+ i -= 8;
+ w = prng_get_u8(p) - ((uint32_t)(z >> i) & 0xFF);
+ } while (!w && i > 0);
+ return (int)(w >> 31);
+}
+
+/*
+ * The sampler produces a random integer that follows a discrete Gaussian
+ * distribution, centered on mu, and with standard deviation sigma. The
+ * provided parameter isigma is equal to 1/sigma.
+ *
+ * The value of sigma MUST lie between 1 and 2 (i.e. isigma lies between
+ * 0.5 and 1); in Falcon, sigma should always be between 1.2 and 1.9.
+ */
+int
+PQCLEAN_FALCON1024_CLEAN_sampler(void *ctx, fpr mu, fpr isigma) {
+ sampler_context *spc;
+ int s, z0, z, b;
+ fpr r, dss, ccs, x;
+
+ spc = ctx;
+
+ /*
+ * Center is mu. We compute mu = s + r where s is an integer
+ * and 0 <= r < 1.
+ */
+ s = (int)fpr_floor(mu);
+ r = fpr_sub(mu, fpr_of(s));
+
+ /*
+ * dss = 1/(2*sigma^2) = 0.5*(isigma^2).
+ */
+ dss = fpr_half(fpr_sqr(isigma));
+
+ /*
+ * ccs = sigma_min / sigma = sigma_min * isigma.
+ */
+ ccs = fpr_mul(isigma, spc->sigma_min);
+
+ /*
+ * We now need to sample on center r.
+ */
+ for (;;) {
+ /*
+ * Sample z for a Gaussian distribution. Then get a
+ * random bit b to turn the sampling into a bimodal
+ * distribution: if b = 1, we use z+1, otherwise we
+ * use -z. We thus have two situations:
+ *
+ * - b = 1: z >= 1 and sampled against a Gaussian
+ * centered on 1.
+ * - b = 0: z <= 0 and sampled against a Gaussian
+ * centered on 0.
+ */
+ z0 = PQCLEAN_FALCON1024_CLEAN_gaussian0_sampler(&spc->p);
+ b = (int)prng_get_u8(&spc->p) & 1;
+ z = b + ((b << 1) - 1) * z0;
+
+ /*
+ * Rejection sampling. We want a Gaussian centered on r;
+ * but we sampled against a Gaussian centered on b (0 or
+ * 1). But we know that z is always in the range where
+ * our sampling distribution is greater than the Gaussian
+ * distribution, so rejection works.
+ *
+ * We got z with distribution:
+ * G(z) = exp(-((z-b)^2)/(2*sigma0^2))
+ * We target distribution:
+ * S(z) = exp(-((z-r)^2)/(2*sigma^2))
+ * Rejection sampling works by keeping the value z with
+ * probability S(z)/G(z), and starting again otherwise.
+ * This requires S(z) <= G(z), which is the case here.
+ * Thus, we simply need to keep our z with probability:
+ * P = exp(-x)
+ * where:
+ * x = ((z-r)^2)/(2*sigma^2) - ((z-b)^2)/(2*sigma0^2)
+ *
+ * Here, we scale up the Bernouilli distribution, which
+ * makes rejection more probable, but makes rejection
+ * rate sufficiently decorrelated from the Gaussian
+ * center and standard deviation that the whole sampler
+ * can be said to be constant-time.
+ */
+ x = fpr_mul(fpr_sqr(fpr_sub(fpr_of(z), r)), dss);
+ x = fpr_sub(x, fpr_mul(fpr_of(z0 * z0), fpr_inv_2sqrsigma0));
+ if (BerExp(&spc->p, x, ccs)) {
+ /*
+ * Rejection sampling was centered on r, but the
+ * actual center is mu = s + r.
+ */
+ return s + z;
+ }
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_sign_tree(int16_t *sig, inner_shake256_context *rng,
+ const fpr *expanded_key,
+ const uint16_t *hm, unsigned logn, uint8_t *tmp) {
+ fpr *ftmp;
+
+ ftmp = (fpr *)tmp;
+ for (;;) {
+ /*
+ * Signature produces short vectors s1 and s2. The
+ * signature is acceptable only if the aggregate vector
+ * s1,s2 is short; we must use the same bound as the
+ * verifier.
+ *
+ * If the signature is acceptable, then we return only s2
+ * (the verifier recomputes s1 from s2, the hashed message,
+ * and the public key).
+ */
+ sampler_context spc;
+ samplerZ samp;
+ void *samp_ctx;
+
+ /*
+ * Normal sampling. We use a fast PRNG seeded from our
+ * SHAKE context ('rng').
+ */
+ if (logn == 10) {
+ spc.sigma_min = fpr_sigma_min_10;
+ } else {
+ spc.sigma_min = fpr_sigma_min_9;
+ }
+ PQCLEAN_FALCON1024_CLEAN_prng_init(&spc.p, rng);
+ samp = PQCLEAN_FALCON1024_CLEAN_sampler;
+ samp_ctx = &spc;
+
+ /*
+ * Do the actual signature.
+ */
+ if (do_sign_tree(samp, samp_ctx, sig,
+ expanded_key, hm, logn, ftmp)) {
+ break;
+ }
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_sign_dyn(int16_t *sig, inner_shake256_context *rng,
+ const int8_t *f, const int8_t *g,
+ const int8_t *F, const int8_t *G,
+ const uint16_t *hm, unsigned logn, uint8_t *tmp) {
+ fpr *ftmp;
+
+ ftmp = (fpr *)tmp;
+ for (;;) {
+ /*
+ * Signature produces short vectors s1 and s2. The
+ * signature is acceptable only if the aggregate vector
+ * s1,s2 is short; we must use the same bound as the
+ * verifier.
+ *
+ * If the signature is acceptable, then we return only s2
+ * (the verifier recomputes s1 from s2, the hashed message,
+ * and the public key).
+ */
+ sampler_context spc;
+ samplerZ samp;
+ void *samp_ctx;
+
+ /*
+ * Normal sampling. We use a fast PRNG seeded from our
+ * SHAKE context ('rng').
+ */
+ if (logn == 10) {
+ spc.sigma_min = fpr_sigma_min_10;
+ } else {
+ spc.sigma_min = fpr_sigma_min_9;
+ }
+ PQCLEAN_FALCON1024_CLEAN_prng_init(&spc.p, rng);
+ samp = PQCLEAN_FALCON1024_CLEAN_sampler;
+ samp_ctx = &spc;
+
+ /*
+ * Do the actual signature.
+ */
+ if (do_sign_dyn(samp, samp_ctx, sig,
+ f, g, F, G, hm, logn, ftmp)) {
+ break;
+ }
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/vrfy.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/vrfy.c
new file mode 100644
index 000000000..93f2d526b
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-1024_clean/vrfy.c
@@ -0,0 +1,853 @@
+#include "inner.h"
+
+/*
+ * Falcon signature verification.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* ===================================================================== */
+/*
+ * Constants for NTT.
+ *
+ * n = 2^logn (2 <= n <= 1024)
+ * phi = X^n + 1
+ * q = 12289
+ * q0i = -1/q mod 2^16
+ * R = 2^16 mod q
+ * R2 = 2^32 mod q
+ */
+
+#define Q 12289
+#define Q0I 12287
+#define R 4091
+#define R2 10952
+
+/*
+ * Table for NTT, binary case:
+ * GMb[x] = R*(g^rev(x)) mod q
+ * where g = 7 (it is a 2048-th primitive root of 1 modulo q)
+ * and rev() is the bit-reversal function over 10 bits.
+ */
+static const uint16_t GMb[] = {
+ 4091, 7888, 11060, 11208, 6960, 4342, 6275, 9759,
+ 1591, 6399, 9477, 5266, 586, 5825, 7538, 9710,
+ 1134, 6407, 1711, 965, 7099, 7674, 3743, 6442,
+ 10414, 8100, 1885, 1688, 1364, 10329, 10164, 9180,
+ 12210, 6240, 997, 117, 4783, 4407, 1549, 7072,
+ 2829, 6458, 4431, 8877, 7144, 2564, 5664, 4042,
+ 12189, 432, 10751, 1237, 7610, 1534, 3983, 7863,
+ 2181, 6308, 8720, 6570, 4843, 1690, 14, 3872,
+ 5569, 9368, 12163, 2019, 7543, 2315, 4673, 7340,
+ 1553, 1156, 8401, 11389, 1020, 2967, 10772, 7045,
+ 3316, 11236, 5285, 11578, 10637, 10086, 9493, 6180,
+ 9277, 6130, 3323, 883, 10469, 489, 1502, 2851,
+ 11061, 9729, 2742, 12241, 4970, 10481, 10078, 1195,
+ 730, 1762, 3854, 2030, 5892, 10922, 9020, 5274,
+ 9179, 3604, 3782, 10206, 3180, 3467, 4668, 2446,
+ 7613, 9386, 834, 7703, 6836, 3403, 5351, 12276,
+ 3580, 1739, 10820, 9787, 10209, 4070, 12250, 8525,
+ 10401, 2749, 7338, 10574, 6040, 943, 9330, 1477,
+ 6865, 9668, 3585, 6633, 12145, 4063, 3684, 7680,
+ 8188, 6902, 3533, 9807, 6090, 727, 10099, 7003,
+ 6945, 1949, 9731, 10559, 6057, 378, 7871, 8763,
+ 8901, 9229, 8846, 4551, 9589, 11664, 7630, 8821,
+ 5680, 4956, 6251, 8388, 10156, 8723, 2341, 3159,
+ 1467, 5460, 8553, 7783, 2649, 2320, 9036, 6188,
+ 737, 3698, 4699, 5753, 9046, 3687, 16, 914,
+ 5186, 10531, 4552, 1964, 3509, 8436, 7516, 5381,
+ 10733, 3281, 7037, 1060, 2895, 7156, 8887, 5357,
+ 6409, 8197, 2962, 6375, 5064, 6634, 5625, 278,
+ 932, 10229, 8927, 7642, 351, 9298, 237, 5858,
+ 7692, 3146, 12126, 7586, 2053, 11285, 3802, 5204,
+ 4602, 1748, 11300, 340, 3711, 4614, 300, 10993,
+ 5070, 10049, 11616, 12247, 7421, 10707, 5746, 5654,
+ 3835, 5553, 1224, 8476, 9237, 3845, 250, 11209,
+ 4225, 6326, 9680, 12254, 4136, 2778, 692, 8808,
+ 6410, 6718, 10105, 10418, 3759, 7356, 11361, 8433,
+ 6437, 3652, 6342, 8978, 5391, 2272, 6476, 7416,
+ 8418, 10824, 11986, 5733, 876, 7030, 2167, 2436,
+ 3442, 9217, 8206, 4858, 5964, 2746, 7178, 1434,
+ 7389, 8879, 10661, 11457, 4220, 1432, 10832, 4328,
+ 8557, 1867, 9454, 2416, 3816, 9076, 686, 5393,
+ 2523, 4339, 6115, 619, 937, 2834, 7775, 3279,
+ 2363, 7488, 6112, 5056, 824, 10204, 11690, 1113,
+ 2727, 9848, 896, 2028, 5075, 2654, 10464, 7884,
+ 12169, 5434, 3070, 6400, 9132, 11672, 12153, 4520,
+ 1273, 9739, 11468, 9937, 10039, 9720, 2262, 9399,
+ 11192, 315, 4511, 1158, 6061, 6751, 11865, 357,
+ 7367, 4550, 983, 8534, 8352, 10126, 7530, 9253,
+ 4367, 5221, 3999, 8777, 3161, 6990, 4130, 11652,
+ 3374, 11477, 1753, 292, 8681, 2806, 10378, 12188,
+ 5800, 11811, 3181, 1988, 1024, 9340, 2477, 10928,
+ 4582, 6750, 3619, 5503, 5233, 2463, 8470, 7650,
+ 7964, 6395, 1071, 1272, 3474, 11045, 3291, 11344,
+ 8502, 9478, 9837, 1253, 1857, 6233, 4720, 11561,
+ 6034, 9817, 3339, 1797, 2879, 6242, 5200, 2114,
+ 7962, 9353, 11363, 5475, 6084, 9601, 4108, 7323,
+ 10438, 9471, 1271, 408, 6911, 3079, 360, 8276,
+ 11535, 9156, 9049, 11539, 850, 8617, 784, 7919,
+ 8334, 12170, 1846, 10213, 12184, 7827, 11903, 5600,
+ 9779, 1012, 721, 2784, 6676, 6552, 5348, 4424,
+ 6816, 8405, 9959, 5150, 2356, 5552, 5267, 1333,
+ 8801, 9661, 7308, 5788, 4910, 909, 11613, 4395,
+ 8238, 6686, 4302, 3044, 2285, 12249, 1963, 9216,
+ 4296, 11918, 695, 4371, 9793, 4884, 2411, 10230,
+ 2650, 841, 3890, 10231, 7248, 8505, 11196, 6688,
+ 4059, 6060, 3686, 4722, 11853, 5816, 7058, 6868,
+ 11137, 7926, 4894, 12284, 4102, 3908, 3610, 6525,
+ 7938, 7982, 11977, 6755, 537, 4562, 1623, 8227,
+ 11453, 7544, 906, 11816, 9548, 10858, 9703, 2815,
+ 11736, 6813, 6979, 819, 8903, 6271, 10843, 348,
+ 7514, 8339, 6439, 694, 852, 5659, 2781, 3716,
+ 11589, 3024, 1523, 8659, 4114, 10738, 3303, 5885,
+ 2978, 7289, 11884, 9123, 9323, 11830, 98, 2526,
+ 2116, 4131, 11407, 1844, 3645, 3916, 8133, 2224,
+ 10871, 8092, 9651, 5989, 7140, 8480, 1670, 159,
+ 10923, 4918, 128, 7312, 725, 9157, 5006, 6393,
+ 3494, 6043, 10972, 6181, 11838, 3423, 10514, 7668,
+ 3693, 6658, 6905, 11953, 10212, 11922, 9101, 8365,
+ 5110, 45, 2400, 1921, 4377, 2720, 1695, 51,
+ 2808, 650, 1896, 9997, 9971, 11980, 8098, 4833,
+ 4135, 4257, 5838, 4765, 10985, 11532, 590, 12198,
+ 482, 12173, 2006, 7064, 10018, 3912, 12016, 10519,
+ 11362, 6954, 2210, 284, 5413, 6601, 3865, 10339,
+ 11188, 6231, 517, 9564, 11281, 3863, 1210, 4604,
+ 8160, 11447, 153, 7204, 5763, 5089, 9248, 12154,
+ 11748, 1354, 6672, 179, 5532, 2646, 5941, 12185,
+ 862, 3158, 477, 7279, 5678, 7914, 4254, 302,
+ 2893, 10114, 6890, 9560, 9647, 11905, 4098, 9824,
+ 10269, 1353, 10715, 5325, 6254, 3951, 1807, 6449,
+ 5159, 1308, 8315, 3404, 1877, 1231, 112, 6398,
+ 11724, 12272, 7286, 1459, 12274, 9896, 3456, 800,
+ 1397, 10678, 103, 7420, 7976, 936, 764, 632,
+ 7996, 8223, 8445, 7758, 10870, 9571, 2508, 1946,
+ 6524, 10158, 1044, 4338, 2457, 3641, 1659, 4139,
+ 4688, 9733, 11148, 3946, 2082, 5261, 2036, 11850,
+ 7636, 12236, 5366, 2380, 1399, 7720, 2100, 3217,
+ 10912, 8898, 7578, 11995, 2791, 1215, 3355, 2711,
+ 2267, 2004, 8568, 10176, 3214, 2337, 1750, 4729,
+ 4997, 7415, 6315, 12044, 4374, 7157, 4844, 211,
+ 8003, 10159, 9290, 11481, 1735, 2336, 5793, 9875,
+ 8192, 986, 7527, 1401, 870, 3615, 8465, 2756,
+ 9770, 2034, 10168, 3264, 6132, 54, 2880, 4763,
+ 11805, 3074, 8286, 9428, 4881, 6933, 1090, 10038,
+ 2567, 708, 893, 6465, 4962, 10024, 2090, 5718,
+ 10743, 780, 4733, 4623, 2134, 2087, 4802, 884,
+ 5372, 5795, 5938, 4333, 6559, 7549, 5269, 10664,
+ 4252, 3260, 5917, 10814, 5768, 9983, 8096, 7791,
+ 6800, 7491, 6272, 1907, 10947, 6289, 11803, 6032,
+ 11449, 1171, 9201, 7933, 2479, 7970, 11337, 7062,
+ 8911, 6728, 6542, 8114, 8828, 6595, 3545, 4348,
+ 4610, 2205, 6999, 8106, 5560, 10390, 9321, 2499,
+ 2413, 7272, 6881, 10582, 9308, 9437, 3554, 3326,
+ 5991, 11969, 3415, 12283, 9838, 12063, 4332, 7830,
+ 11329, 6605, 12271, 2044, 11611, 7353, 11201, 11582,
+ 3733, 8943, 9978, 1627, 7168, 3935, 5050, 2762,
+ 7496, 10383, 755, 1654, 12053, 4952, 10134, 4394,
+ 6592, 7898, 7497, 8904, 12029, 3581, 10748, 5674,
+ 10358, 4901, 7414, 8771, 710, 6764, 8462, 7193,
+ 5371, 7274, 11084, 290, 7864, 6827, 11822, 2509,
+ 6578, 4026, 5807, 1458, 5721, 5762, 4178, 2105,
+ 11621, 4852, 8897, 2856, 11510, 9264, 2520, 8776,
+ 7011, 2647, 1898, 7039, 5950, 11163, 5488, 6277,
+ 9182, 11456, 633, 10046, 11554, 5633, 9587, 2333,
+ 7008, 7084, 5047, 7199, 9865, 8997, 569, 6390,
+ 10845, 9679, 8268, 11472, 4203, 1997, 2, 9331,
+ 162, 6182, 2000, 3649, 9792, 6363, 7557, 6187,
+ 8510, 9935, 5536, 9019, 3706, 12009, 1452, 3067,
+ 5494, 9692, 4865, 6019, 7106, 9610, 4588, 10165,
+ 6261, 5887, 2652, 10172, 1580, 10379, 4638, 9949
+};
+
+/*
+ * Table for inverse NTT, binary case:
+ * iGMb[x] = R*((1/g)^rev(x)) mod q
+ * Since g = 7, 1/g = 8778 mod 12289.
+ */
+static const uint16_t iGMb[] = {
+ 4091, 4401, 1081, 1229, 2530, 6014, 7947, 5329,
+ 2579, 4751, 6464, 11703, 7023, 2812, 5890, 10698,
+ 3109, 2125, 1960, 10925, 10601, 10404, 4189, 1875,
+ 5847, 8546, 4615, 5190, 11324, 10578, 5882, 11155,
+ 8417, 12275, 10599, 7446, 5719, 3569, 5981, 10108,
+ 4426, 8306, 10755, 4679, 11052, 1538, 11857, 100,
+ 8247, 6625, 9725, 5145, 3412, 7858, 5831, 9460,
+ 5217, 10740, 7882, 7506, 12172, 11292, 6049, 79,
+ 13, 6938, 8886, 5453, 4586, 11455, 2903, 4676,
+ 9843, 7621, 8822, 9109, 2083, 8507, 8685, 3110,
+ 7015, 3269, 1367, 6397, 10259, 8435, 10527, 11559,
+ 11094, 2211, 1808, 7319, 48, 9547, 2560, 1228,
+ 9438, 10787, 11800, 1820, 11406, 8966, 6159, 3012,
+ 6109, 2796, 2203, 1652, 711, 7004, 1053, 8973,
+ 5244, 1517, 9322, 11269, 900, 3888, 11133, 10736,
+ 4949, 7616, 9974, 4746, 10270, 126, 2921, 6720,
+ 6635, 6543, 1582, 4868, 42, 673, 2240, 7219,
+ 1296, 11989, 7675, 8578, 11949, 989, 10541, 7687,
+ 7085, 8487, 1004, 10236, 4703, 163, 9143, 4597,
+ 6431, 12052, 2991, 11938, 4647, 3362, 2060, 11357,
+ 12011, 6664, 5655, 7225, 5914, 9327, 4092, 5880,
+ 6932, 3402, 5133, 9394, 11229, 5252, 9008, 1556,
+ 6908, 4773, 3853, 8780, 10325, 7737, 1758, 7103,
+ 11375, 12273, 8602, 3243, 6536, 7590, 8591, 11552,
+ 6101, 3253, 9969, 9640, 4506, 3736, 6829, 10822,
+ 9130, 9948, 3566, 2133, 3901, 6038, 7333, 6609,
+ 3468, 4659, 625, 2700, 7738, 3443, 3060, 3388,
+ 3526, 4418, 11911, 6232, 1730, 2558, 10340, 5344,
+ 5286, 2190, 11562, 6199, 2482, 8756, 5387, 4101,
+ 4609, 8605, 8226, 144, 5656, 8704, 2621, 5424,
+ 10812, 2959, 11346, 6249, 1715, 4951, 9540, 1888,
+ 3764, 39, 8219, 2080, 2502, 1469, 10550, 8709,
+ 5601, 1093, 3784, 5041, 2058, 8399, 11448, 9639,
+ 2059, 9878, 7405, 2496, 7918, 11594, 371, 7993,
+ 3073, 10326, 40, 10004, 9245, 7987, 5603, 4051,
+ 7894, 676, 11380, 7379, 6501, 4981, 2628, 3488,
+ 10956, 7022, 6737, 9933, 7139, 2330, 3884, 5473,
+ 7865, 6941, 5737, 5613, 9505, 11568, 11277, 2510,
+ 6689, 386, 4462, 105, 2076, 10443, 119, 3955,
+ 4370, 11505, 3672, 11439, 750, 3240, 3133, 754,
+ 4013, 11929, 9210, 5378, 11881, 11018, 2818, 1851,
+ 4966, 8181, 2688, 6205, 6814, 926, 2936, 4327,
+ 10175, 7089, 6047, 9410, 10492, 8950, 2472, 6255,
+ 728, 7569, 6056, 10432, 11036, 2452, 2811, 3787,
+ 945, 8998, 1244, 8815, 11017, 11218, 5894, 4325,
+ 4639, 3819, 9826, 7056, 6786, 8670, 5539, 7707,
+ 1361, 9812, 2949, 11265, 10301, 9108, 478, 6489,
+ 101, 1911, 9483, 3608, 11997, 10536, 812, 8915,
+ 637, 8159, 5299, 9128, 3512, 8290, 7068, 7922,
+ 3036, 4759, 2163, 3937, 3755, 11306, 7739, 4922,
+ 11932, 424, 5538, 6228, 11131, 7778, 11974, 1097,
+ 2890, 10027, 2569, 2250, 2352, 821, 2550, 11016,
+ 7769, 136, 617, 3157, 5889, 9219, 6855, 120,
+ 4405, 1825, 9635, 7214, 10261, 11393, 2441, 9562,
+ 11176, 599, 2085, 11465, 7233, 6177, 4801, 9926,
+ 9010, 4514, 9455, 11352, 11670, 6174, 7950, 9766,
+ 6896, 11603, 3213, 8473, 9873, 2835, 10422, 3732,
+ 7961, 1457, 10857, 8069, 832, 1628, 3410, 4900,
+ 10855, 5111, 9543, 6325, 7431, 4083, 3072, 8847,
+ 9853, 10122, 5259, 11413, 6556, 303, 1465, 3871,
+ 4873, 5813, 10017, 6898, 3311, 5947, 8637, 5852,
+ 3856, 928, 4933, 8530, 1871, 2184, 5571, 5879,
+ 3481, 11597, 9511, 8153, 35, 2609, 5963, 8064,
+ 1080, 12039, 8444, 3052, 3813, 11065, 6736, 8454,
+ 2340, 7651, 1910, 10709, 2117, 9637, 6402, 6028,
+ 2124, 7701, 2679, 5183, 6270, 7424, 2597, 6795,
+ 9222, 10837, 280, 8583, 3270, 6753, 2354, 3779,
+ 6102, 4732, 5926, 2497, 8640, 10289, 6107, 12127,
+ 2958, 12287, 10292, 8086, 817, 4021, 2610, 1444,
+ 5899, 11720, 3292, 2424, 5090, 7242, 5205, 5281,
+ 9956, 2702, 6656, 735, 2243, 11656, 833, 3107,
+ 6012, 6801, 1126, 6339, 5250, 10391, 9642, 5278,
+ 3513, 9769, 3025, 779, 9433, 3392, 7437, 668,
+ 10184, 8111, 6527, 6568, 10831, 6482, 8263, 5711,
+ 9780, 467, 5462, 4425, 11999, 1205, 5015, 6918,
+ 5096, 3827, 5525, 11579, 3518, 4875, 7388, 1931,
+ 6615, 1541, 8708, 260, 3385, 4792, 4391, 5697,
+ 7895, 2155, 7337, 236, 10635, 11534, 1906, 4793,
+ 9527, 7239, 8354, 5121, 10662, 2311, 3346, 8556,
+ 707, 1088, 4936, 678, 10245, 18, 5684, 960,
+ 4459, 7957, 226, 2451, 6, 8874, 320, 6298,
+ 8963, 8735, 2852, 2981, 1707, 5408, 5017, 9876,
+ 9790, 2968, 1899, 6729, 4183, 5290, 10084, 7679,
+ 7941, 8744, 5694, 3461, 4175, 5747, 5561, 3378,
+ 5227, 952, 4319, 9810, 4356, 3088, 11118, 840,
+ 6257, 486, 6000, 1342, 10382, 6017, 4798, 5489,
+ 4498, 4193, 2306, 6521, 1475, 6372, 9029, 8037,
+ 1625, 7020, 4740, 5730, 7956, 6351, 6494, 6917,
+ 11405, 7487, 10202, 10155, 7666, 7556, 11509, 1546,
+ 6571, 10199, 2265, 7327, 5824, 11396, 11581, 9722,
+ 2251, 11199, 5356, 7408, 2861, 4003, 9215, 484,
+ 7526, 9409, 12235, 6157, 9025, 2121, 10255, 2519,
+ 9533, 3824, 8674, 11419, 10888, 4762, 11303, 4097,
+ 2414, 6496, 9953, 10554, 808, 2999, 2130, 4286,
+ 12078, 7445, 5132, 7915, 245, 5974, 4874, 7292,
+ 7560, 10539, 9952, 9075, 2113, 3721, 10285, 10022,
+ 9578, 8934, 11074, 9498, 294, 4711, 3391, 1377,
+ 9072, 10189, 4569, 10890, 9909, 6923, 53, 4653,
+ 439, 10253, 7028, 10207, 8343, 1141, 2556, 7601,
+ 8150, 10630, 8648, 9832, 7951, 11245, 2131, 5765,
+ 10343, 9781, 2718, 1419, 4531, 3844, 4066, 4293,
+ 11657, 11525, 11353, 4313, 4869, 12186, 1611, 10892,
+ 11489, 8833, 2393, 15, 10830, 5003, 17, 565,
+ 5891, 12177, 11058, 10412, 8885, 3974, 10981, 7130,
+ 5840, 10482, 8338, 6035, 6964, 1574, 10936, 2020,
+ 2465, 8191, 384, 2642, 2729, 5399, 2175, 9396,
+ 11987, 8035, 4375, 6611, 5010, 11812, 9131, 11427,
+ 104, 6348, 9643, 6757, 12110, 5617, 10935, 541,
+ 135, 3041, 7200, 6526, 5085, 12136, 842, 4129,
+ 7685, 11079, 8426, 1008, 2725, 11772, 6058, 1101,
+ 1950, 8424, 5688, 6876, 12005, 10079, 5335, 927,
+ 1770, 273, 8377, 2271, 5225, 10283, 116, 11807,
+ 91, 11699, 757, 1304, 7524, 6451, 8032, 8154,
+ 7456, 4191, 309, 2318, 2292, 10393, 11639, 9481,
+ 12238, 10594, 9569, 7912, 10368, 9889, 12244, 7179,
+ 3924, 3188, 367, 2077, 336, 5384, 5631, 8596,
+ 4621, 1775, 8866, 451, 6108, 1317, 6246, 8795,
+ 5896, 7283, 3132, 11564, 4977, 12161, 7371, 1366,
+ 12130, 10619, 3809, 5149, 6300, 2638, 4197, 1418,
+ 10065, 4156, 8373, 8644, 10445, 882, 8158, 10173,
+ 9763, 12191, 459, 2966, 3166, 405, 5000, 9311,
+ 6404, 8986, 1551, 8175, 3630, 10766, 9265, 700,
+ 8573, 9508, 6630, 11437, 11595, 5850, 3950, 4775,
+ 11941, 1446, 6018, 3386, 11470, 5310, 5476, 553,
+ 9474, 2586, 1431, 2741, 473, 11383, 4745, 836,
+ 4062, 10666, 7727, 11752, 5534, 312, 4307, 4351,
+ 5764, 8679, 8381, 8187, 5, 7395, 4363, 1152,
+ 5421, 5231, 6473, 436, 7567, 8603, 6229, 8230
+};
+
+/*
+ * Reduce a small signed integer modulo q. The source integer MUST
+ * be between -q/2 and +q/2.
+ */
+static inline uint32_t
+mq_conv_small(int x) {
+ /*
+ * If x < 0, the cast to uint32_t will set the high bit to 1.
+ */
+ uint32_t y;
+
+ y = (uint32_t)x;
+ y += Q & -(y >> 31);
+ return y;
+}
+
+/*
+ * Addition modulo q. Operands must be in the 0..q-1 range.
+ */
+static inline uint32_t
+mq_add(uint32_t x, uint32_t y) {
+ /*
+ * We compute x + y - q. If the result is negative, then the
+ * high bit will be set, and 'd >> 31' will be equal to 1;
+ * thus '-(d >> 31)' will be an all-one pattern. Otherwise,
+ * it will be an all-zero pattern. In other words, this
+ * implements a conditional addition of q.
+ */
+ uint32_t d;
+
+ d = x + y - Q;
+ d += Q & -(d >> 31);
+ return d;
+}
+
+/*
+ * Subtraction modulo q. Operands must be in the 0..q-1 range.
+ */
+static inline uint32_t
+mq_sub(uint32_t x, uint32_t y) {
+ /*
+ * As in mq_add(), we use a conditional addition to ensure the
+ * result is in the 0..q-1 range.
+ */
+ uint32_t d;
+
+ d = x - y;
+ d += Q & -(d >> 31);
+ return d;
+}
+
+/*
+ * Division by 2 modulo q. Operand must be in the 0..q-1 range.
+ */
+static inline uint32_t
+mq_rshift1(uint32_t x) {
+ x += Q & -(x & 1);
+ return (x >> 1);
+}
+
+/*
+ * Montgomery multiplication modulo q. If we set R = 2^16 mod q, then
+ * this function computes: x * y / R mod q
+ * Operands must be in the 0..q-1 range.
+ */
+static inline uint32_t
+mq_montymul(uint32_t x, uint32_t y) {
+ uint32_t z, w;
+
+ /*
+ * We compute x*y + k*q with a value of k chosen so that the 16
+ * low bits of the result are 0. We can then shift the value.
+ * After the shift, result may still be larger than q, but it
+ * will be lower than 2*q, so a conditional subtraction works.
+ */
+
+ z = x * y;
+ w = ((z * Q0I) & 0xFFFF) * Q;
+
+ /*
+ * When adding z and w, the result will have its low 16 bits
+ * equal to 0. Since x, y and z are lower than q, the sum will
+ * be no more than (2^15 - 1) * q + (q - 1)^2, which will
+ * fit on 29 bits.
+ */
+ z = (z + w) >> 16;
+
+ /*
+ * After the shift, analysis shows that the value will be less
+ * than 2q. We do a subtraction then conditional subtraction to
+ * ensure the result is in the expected range.
+ */
+ z -= Q;
+ z += Q & -(z >> 31);
+ return z;
+}
+
+/*
+ * Montgomery squaring (computes (x^2)/R).
+ */
+static inline uint32_t
+mq_montysqr(uint32_t x) {
+ return mq_montymul(x, x);
+}
+
+/*
+ * Divide x by y modulo q = 12289.
+ */
+static inline uint32_t
+mq_div_12289(uint32_t x, uint32_t y) {
+ /*
+ * We invert y by computing y^(q-2) mod q.
+ *
+ * We use the following addition chain for exponent e = 12287:
+ *
+ * e0 = 1
+ * e1 = 2 * e0 = 2
+ * e2 = e1 + e0 = 3
+ * e3 = e2 + e1 = 5
+ * e4 = 2 * e3 = 10
+ * e5 = 2 * e4 = 20
+ * e6 = 2 * e5 = 40
+ * e7 = 2 * e6 = 80
+ * e8 = 2 * e7 = 160
+ * e9 = e8 + e2 = 163
+ * e10 = e9 + e8 = 323
+ * e11 = 2 * e10 = 646
+ * e12 = 2 * e11 = 1292
+ * e13 = e12 + e9 = 1455
+ * e14 = 2 * e13 = 2910
+ * e15 = 2 * e14 = 5820
+ * e16 = e15 + e10 = 6143
+ * e17 = 2 * e16 = 12286
+ * e18 = e17 + e0 = 12287
+ *
+ * Additions on exponents are converted to Montgomery
+ * multiplications. We define all intermediate results as so
+ * many local variables, and let the C compiler work out which
+ * must be kept around.
+ */
+ uint32_t y0, y1, y2, y3, y4, y5, y6, y7, y8, y9;
+ uint32_t y10, y11, y12, y13, y14, y15, y16, y17, y18;
+
+ y0 = mq_montymul(y, R2);
+ y1 = mq_montysqr(y0);
+ y2 = mq_montymul(y1, y0);
+ y3 = mq_montymul(y2, y1);
+ y4 = mq_montysqr(y3);
+ y5 = mq_montysqr(y4);
+ y6 = mq_montysqr(y5);
+ y7 = mq_montysqr(y6);
+ y8 = mq_montysqr(y7);
+ y9 = mq_montymul(y8, y2);
+ y10 = mq_montymul(y9, y8);
+ y11 = mq_montysqr(y10);
+ y12 = mq_montysqr(y11);
+ y13 = mq_montymul(y12, y9);
+ y14 = mq_montysqr(y13);
+ y15 = mq_montysqr(y14);
+ y16 = mq_montymul(y15, y10);
+ y17 = mq_montysqr(y16);
+ y18 = mq_montymul(y17, y0);
+
+ /*
+ * Final multiplication with x, which is not in Montgomery
+ * representation, computes the correct division result.
+ */
+ return mq_montymul(y18, x);
+}
+
+/*
+ * Compute NTT on a ring element.
+ */
+static void
+mq_NTT(uint16_t *a, unsigned logn) {
+ size_t n, t, m;
+
+ n = (size_t)1 << logn;
+ t = n;
+ for (m = 1; m < n; m <<= 1) {
+ size_t ht, i, j1;
+
+ ht = t >> 1;
+ for (i = 0, j1 = 0; i < m; i ++, j1 += t) {
+ size_t j, j2;
+ uint32_t s;
+
+ s = GMb[m + i];
+ j2 = j1 + ht;
+ for (j = j1; j < j2; j ++) {
+ uint32_t u, v;
+
+ u = a[j];
+ v = mq_montymul(a[j + ht], s);
+ a[j] = (uint16_t)mq_add(u, v);
+ a[j + ht] = (uint16_t)mq_sub(u, v);
+ }
+ }
+ t = ht;
+ }
+}
+
+/*
+ * Compute the inverse NTT on a ring element, binary case.
+ */
+static void
+mq_iNTT(uint16_t *a, unsigned logn) {
+ size_t n, t, m;
+ uint32_t ni;
+
+ n = (size_t)1 << logn;
+ t = 1;
+ m = n;
+ while (m > 1) {
+ size_t hm, dt, i, j1;
+
+ hm = m >> 1;
+ dt = t << 1;
+ for (i = 0, j1 = 0; i < hm; i ++, j1 += dt) {
+ size_t j, j2;
+ uint32_t s;
+
+ j2 = j1 + t;
+ s = iGMb[hm + i];
+ for (j = j1; j < j2; j ++) {
+ uint32_t u, v, w;
+
+ u = a[j];
+ v = a[j + t];
+ a[j] = (uint16_t)mq_add(u, v);
+ w = mq_sub(u, v);
+ a[j + t] = (uint16_t)
+ mq_montymul(w, s);
+ }
+ }
+ t = dt;
+ m = hm;
+ }
+
+ /*
+ * To complete the inverse NTT, we must now divide all values by
+ * n (the vector size). We thus need the inverse of n, i.e. we
+ * need to divide 1 by 2 logn times. But we also want it in
+ * Montgomery representation, i.e. we also want to multiply it
+ * by R = 2^16. In the common case, this should be a simple right
+ * shift. The loop below is generic and works also in corner cases;
+ * its computation time is negligible.
+ */
+ ni = R;
+ for (m = n; m > 1; m >>= 1) {
+ ni = mq_rshift1(ni);
+ }
+ for (m = 0; m < n; m ++) {
+ a[m] = (uint16_t)mq_montymul(a[m], ni);
+ }
+}
+
+/*
+ * Convert a polynomial (mod q) to Montgomery representation.
+ */
+static void
+mq_poly_tomonty(uint16_t *f, unsigned logn) {
+ size_t u, n;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ f[u] = (uint16_t)mq_montymul(f[u], R2);
+ }
+}
+
+/*
+ * Multiply two polynomials together (NTT representation, and using
+ * a Montgomery multiplication). Result f*g is written over f.
+ */
+static void
+mq_poly_montymul_ntt(uint16_t *f, const uint16_t *g, unsigned logn) {
+ size_t u, n;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ f[u] = (uint16_t)mq_montymul(f[u], g[u]);
+ }
+}
+
+/*
+ * Subtract polynomial g from polynomial f.
+ */
+static void
+mq_poly_sub(uint16_t *f, const uint16_t *g, unsigned logn) {
+ size_t u, n;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ f[u] = (uint16_t)mq_sub(f[u], g[u]);
+ }
+}
+
+/* ===================================================================== */
+
+/* see inner.h */
+void
+PQCLEAN_FALCON1024_CLEAN_to_ntt_monty(uint16_t *h, unsigned logn) {
+ mq_NTT(h, logn);
+ mq_poly_tomonty(h, logn);
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON1024_CLEAN_verify_raw(const uint16_t *c0, const int16_t *s2,
+ const uint16_t *h, unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *tt;
+
+ n = (size_t)1 << logn;
+ tt = (uint16_t *)tmp;
+
+ /*
+ * Reduce s2 elements modulo q ([0..q-1] range).
+ */
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)s2[u];
+ w += Q & -(w >> 31);
+ tt[u] = (uint16_t)w;
+ }
+
+ /*
+ * Compute -s1 = s2*h - c0 mod phi mod q (in tt[]).
+ */
+ mq_NTT(tt, logn);
+ mq_poly_montymul_ntt(tt, h, logn);
+ mq_iNTT(tt, logn);
+ mq_poly_sub(tt, c0, logn);
+
+ /*
+ * Normalize -s1 elements into the [-q/2..q/2] range.
+ */
+ for (u = 0; u < n; u ++) {
+ int32_t w;
+
+ w = (int32_t)tt[u];
+ w -= (int32_t)(Q & -(((Q >> 1) - (uint32_t)w) >> 31));
+ ((int16_t *)tt)[u] = (int16_t)w;
+ }
+
+ /*
+ * Signature is valid if and only if the aggregate (-s1,s2) vector
+ * is short enough.
+ */
+ return PQCLEAN_FALCON1024_CLEAN_is_short((int16_t *)tt, s2, logn);
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON1024_CLEAN_compute_public(uint16_t *h,
+ const int8_t *f, const int8_t *g, unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *tt;
+
+ n = (size_t)1 << logn;
+ tt = (uint16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ tt[u] = (uint16_t)mq_conv_small(f[u]);
+ h[u] = (uint16_t)mq_conv_small(g[u]);
+ }
+ mq_NTT(h, logn);
+ mq_NTT(tt, logn);
+ for (u = 0; u < n; u ++) {
+ if (tt[u] == 0) {
+ return 0;
+ }
+ h[u] = (uint16_t)mq_div_12289(h[u], tt[u]);
+ }
+ mq_iNTT(h, logn);
+ return 1;
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON1024_CLEAN_complete_private(int8_t *G,
+ const int8_t *f, const int8_t *g, const int8_t *F,
+ unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *t1, *t2;
+
+ n = (size_t)1 << logn;
+ t1 = (uint16_t *)tmp;
+ t2 = t1 + n;
+ for (u = 0; u < n; u ++) {
+ t1[u] = (uint16_t)mq_conv_small(g[u]);
+ t2[u] = (uint16_t)mq_conv_small(F[u]);
+ }
+ mq_NTT(t1, logn);
+ mq_NTT(t2, logn);
+ mq_poly_tomonty(t1, logn);
+ mq_poly_montymul_ntt(t1, t2, logn);
+ for (u = 0; u < n; u ++) {
+ t2[u] = (uint16_t)mq_conv_small(f[u]);
+ }
+ mq_NTT(t2, logn);
+ for (u = 0; u < n; u ++) {
+ if (t2[u] == 0) {
+ return 0;
+ }
+ t1[u] = (uint16_t)mq_div_12289(t1[u], t2[u]);
+ }
+ mq_iNTT(t1, logn);
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+ int32_t gi;
+
+ w = t1[u];
+ w -= (Q & ~ -((w - (Q >> 1)) >> 31));
+ gi = *(int32_t *)&w;
+ if (gi < -127 || gi > +127) {
+ return 0;
+ }
+ G[u] = (int8_t)gi;
+ }
+ return 1;
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON1024_CLEAN_is_invertible(
+ const int16_t *s2, unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *tt;
+ uint32_t r;
+
+ n = (size_t)1 << logn;
+ tt = (uint16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)s2[u];
+ w += Q & -(w >> 31);
+ tt[u] = (uint16_t)w;
+ }
+ mq_NTT(tt, logn);
+ r = 0;
+ for (u = 0; u < n; u ++) {
+ r |= (uint32_t)(tt[u] - 1);
+ }
+ return (int)(1u - (r >> 31));
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON1024_CLEAN_verify_recover(uint16_t *h,
+ const uint16_t *c0, const int16_t *s1, const int16_t *s2,
+ unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *tt;
+ uint32_t r;
+
+ n = (size_t)1 << logn;
+
+ /*
+ * Reduce elements of s1 and s2 modulo q; then write s2 into tt[]
+ * and c0 - s1 into h[].
+ */
+ tt = (uint16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)s2[u];
+ w += Q & -(w >> 31);
+ tt[u] = (uint16_t)w;
+
+ w = (uint32_t)s1[u];
+ w += Q & -(w >> 31);
+ w = mq_sub(c0[u], w);
+ h[u] = (uint16_t)w;
+ }
+
+ /*
+ * Compute h = (c0 - s1) / s2. If one of the coefficients of s2
+ * is zero (in NTT representation) then the operation fails. We
+ * keep that information into a flag so that we do not deviate
+ * from strict constant-time processing; if all coefficients of
+ * s2 are non-zero, then the high bit of r will be zero.
+ */
+ mq_NTT(tt, logn);
+ mq_NTT(h, logn);
+ r = 0;
+ for (u = 0; u < n; u ++) {
+ r |= (uint32_t)(tt[u] - 1);
+ h[u] = (uint16_t)mq_div_12289(h[u], tt[u]);
+ }
+ mq_iNTT(h, logn);
+
+ /*
+ * Signature is acceptable if and only if it is short enough,
+ * and s2 was invertible mod phi mod q. The caller must still
+ * check that the rebuilt public key matches the expected
+ * value (e.g. through a hash).
+ */
+ r = ~r & (uint32_t) - PQCLEAN_FALCON1024_CLEAN_is_short(s1, s2, logn);
+ return (int)(r >> 31);
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON1024_CLEAN_count_nttzero(const int16_t *sig, unsigned logn, uint8_t *tmp) {
+ uint16_t *s2;
+ size_t u, n;
+ uint32_t r;
+
+ n = (size_t)1 << logn;
+ s2 = (uint16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)sig[u];
+ w += Q & -(w >> 31);
+ s2[u] = (uint16_t)w;
+ }
+ mq_NTT(s2, logn);
+ r = 0;
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)s2[u] - 1u;
+ r += (w >> 31);
+ }
+ return (int)r;
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/Makefile b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/Makefile
new file mode 100644
index 000000000..fe090f3ff
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/Makefile
@@ -0,0 +1,49 @@
+#! gmake
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#######################################################################
+# (1) Include initial platform-independent assignments (MANDATORY). #
+#######################################################################
+
+include manifest.mn
+
+#######################################################################
+# (2) Include "global" configuration information. (OPTIONAL) #
+#######################################################################
+
+USE_GCOV =
+include $(CORE_DEPTH)/coreconf/config.mk
+
+#######################################################################
+# (3) Include "component" configuration information. (OPTIONAL) #
+#######################################################################
+
+
+
+#######################################################################
+# (4) Include "local" platform-dependent assignments (OPTIONAL). #
+#######################################################################
+
+include config.mk
+
+#######################################################################
+# (5) Execute "global" rules. (OPTIONAL) #
+#######################################################################
+
+include $(CORE_DEPTH)/coreconf/rules.mk
+
+#######################################################################
+# (6) Execute "component" rules. (OPTIONAL) #
+#######################################################################
+
+
+
+#######################################################################
+# (7) Execute "local" rules. (OPTIONAL). #
+#######################################################################
+
+WARNING_CFLAGS = $(NULL)
+
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/api.h b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/api.h
new file mode 100644
index 000000000..a2e5e1d50
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/api.h
@@ -0,0 +1,80 @@
+#ifndef PQCLEAN_FALCON512_CLEAN_API_H
+#define PQCLEAN_FALCON512_CLEAN_API_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES 1281
+#define PQCLEAN_FALCON512_CLEAN_CRYPTO_PUBLICKEYBYTES 897
+#define PQCLEAN_FALCON512_CLEAN_CRYPTO_BYTES 690
+
+#define PQCLEAN_FALCON512_CLEAN_CRYPTO_ALGNAME "Falcon-512"
+
+/*
+ * Generate a new key pair. Public key goes into pk[], private key in sk[].
+ * Key sizes are exact (in bytes):
+ * public (pk): PQCLEAN_FALCON512_CLEAN_CRYPTO_PUBLICKEYBYTES
+ * private (sk): PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON512_CLEAN_crypto_sign_keypair(
+ uint8_t *pk, uint8_t *sk);
+
+/*
+ * Compute a signature on a provided message (m, mlen), with a given
+ * private key (sk). Signature is written in sig[], with length written
+ * into *siglen. Signature length is variable; maximum signature length
+ * (in bytes) is PQCLEAN_FALCON512_CLEAN_CRYPTO_BYTES.
+ *
+ * sig[], m[] and sk[] may overlap each other arbitrarily.
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON512_CLEAN_crypto_sign_signature(
+ uint8_t *sig, size_t *siglen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk);
+
+/*
+ * Verify a signature (sig, siglen) on a message (m, mlen) with a given
+ * public key (pk).
+ *
+ * sig[], m[] and pk[] may overlap each other arbitrarily.
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON512_CLEAN_crypto_sign_verify(
+ const uint8_t *sig, size_t siglen,
+ const uint8_t *m, size_t mlen, const uint8_t *pk);
+
+/*
+ * Compute a signature on a message and pack the signature and message
+ * into a single object, written into sm[]. The length of that output is
+ * written in *smlen; that length may be larger than the message length
+ * (mlen) by up to PQCLEAN_FALCON512_CLEAN_CRYPTO_BYTES.
+ *
+ * sm[] and m[] may overlap each other arbitrarily; however, sm[] shall
+ * not overlap with sk[].
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON512_CLEAN_crypto_sign(
+ uint8_t *sm, size_t *smlen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk);
+
+/*
+ * Open a signed message object (sm, smlen) and verify the signature;
+ * on success, the message itself is written into m[] and its length
+ * into *mlen. The message is shorter than the signed message object,
+ * but the size difference depends on the signature value; the difference
+ * may range up to PQCLEAN_FALCON512_CLEAN_CRYPTO_BYTES.
+ *
+ * m[], sm[] and pk[] may overlap each other arbitrarily.
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+int PQCLEAN_FALCON512_CLEAN_crypto_sign_open(
+ uint8_t *m, size_t *mlen,
+ const uint8_t *sm, size_t smlen, const uint8_t *pk);
+
+#endif
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/codec.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/codec.c
new file mode 100644
index 000000000..76709bc9d
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/codec.c
@@ -0,0 +1,555 @@
+#include "inner.h"
+
+/*
+ * Encoding/decoding of keys and signatures.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON512_CLEAN_modq_encode(
+ void *out, size_t max_out_len,
+ const uint16_t *x, unsigned logn) {
+ size_t n, out_len, u;
+ uint8_t *buf;
+ uint32_t acc;
+ int acc_len;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ if (x[u] >= 12289) {
+ return 0;
+ }
+ }
+ out_len = ((n * 14) + 7) >> 3;
+ if (out == NULL) {
+ return out_len;
+ }
+ if (out_len > max_out_len) {
+ return 0;
+ }
+ buf = out;
+ acc = 0;
+ acc_len = 0;
+ for (u = 0; u < n; u ++) {
+ acc = (acc << 14) | x[u];
+ acc_len += 14;
+ while (acc_len >= 8) {
+ acc_len -= 8;
+ *buf ++ = (uint8_t)(acc >> acc_len);
+ }
+ }
+ if (acc_len > 0) {
+ *buf = (uint8_t)(acc << (8 - acc_len));
+ }
+ return out_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON512_CLEAN_modq_decode(
+ uint16_t *x, unsigned logn,
+ const void *in, size_t max_in_len) {
+ size_t n, in_len, u;
+ const uint8_t *buf;
+ uint32_t acc;
+ int acc_len;
+
+ n = (size_t)1 << logn;
+ in_len = ((n * 14) + 7) >> 3;
+ if (in_len > max_in_len) {
+ return 0;
+ }
+ buf = in;
+ acc = 0;
+ acc_len = 0;
+ u = 0;
+ while (u < n) {
+ acc = (acc << 8) | (*buf ++);
+ acc_len += 8;
+ if (acc_len >= 14) {
+ unsigned w;
+
+ acc_len -= 14;
+ w = (acc >> acc_len) & 0x3FFF;
+ if (w >= 12289) {
+ return 0;
+ }
+ x[u ++] = (uint16_t)w;
+ }
+ }
+ if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) {
+ return 0;
+ }
+ return in_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON512_CLEAN_trim_i16_encode(
+ void *out, size_t max_out_len,
+ const int16_t *x, unsigned logn, unsigned bits) {
+ size_t n, u, out_len;
+ int minv, maxv;
+ uint8_t *buf;
+ uint32_t acc, mask;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ maxv = (1 << (bits - 1)) - 1;
+ minv = -maxv;
+ for (u = 0; u < n; u ++) {
+ if (x[u] < minv || x[u] > maxv) {
+ return 0;
+ }
+ }
+ out_len = ((n * bits) + 7) >> 3;
+ if (out == NULL) {
+ return out_len;
+ }
+ if (out_len > max_out_len) {
+ return 0;
+ }
+ buf = out;
+ acc = 0;
+ acc_len = 0;
+ mask = ((uint32_t)1 << bits) - 1;
+ for (u = 0; u < n; u ++) {
+ acc = (acc << bits) | ((uint16_t)x[u] & mask);
+ acc_len += bits;
+ while (acc_len >= 8) {
+ acc_len -= 8;
+ *buf ++ = (uint8_t)(acc >> acc_len);
+ }
+ }
+ if (acc_len > 0) {
+ *buf ++ = (uint8_t)(acc << (8 - acc_len));
+ }
+ return out_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON512_CLEAN_trim_i16_decode(
+ int16_t *x, unsigned logn, unsigned bits,
+ const void *in, size_t max_in_len) {
+ size_t n, in_len;
+ const uint8_t *buf;
+ size_t u;
+ uint32_t acc, mask1, mask2;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ in_len = ((n * bits) + 7) >> 3;
+ if (in_len > max_in_len) {
+ return 0;
+ }
+ buf = in;
+ u = 0;
+ acc = 0;
+ acc_len = 0;
+ mask1 = ((uint32_t)1 << bits) - 1;
+ mask2 = (uint32_t)1 << (bits - 1);
+ while (u < n) {
+ acc = (acc << 8) | *buf ++;
+ acc_len += 8;
+ while (acc_len >= bits && u < n) {
+ uint32_t w;
+
+ acc_len -= bits;
+ w = (acc >> acc_len) & mask1;
+ w |= -(w & mask2);
+ if (w == -mask2) {
+ /*
+ * The -2^(bits-1) value is forbidden.
+ */
+ return 0;
+ }
+ w |= -(w & mask2);
+ x[u ++] = (int16_t) * (int32_t *)&w;
+ }
+ }
+ if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) {
+ /*
+ * Extra bits in the last byte must be zero.
+ */
+ return 0;
+ }
+ return in_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON512_CLEAN_trim_i8_encode(
+ void *out, size_t max_out_len,
+ const int8_t *x, unsigned logn, unsigned bits) {
+ size_t n, u, out_len;
+ int minv, maxv;
+ uint8_t *buf;
+ uint32_t acc, mask;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ maxv = (1 << (bits - 1)) - 1;
+ minv = -maxv;
+ for (u = 0; u < n; u ++) {
+ if (x[u] < minv || x[u] > maxv) {
+ return 0;
+ }
+ }
+ out_len = ((n * bits) + 7) >> 3;
+ if (out == NULL) {
+ return out_len;
+ }
+ if (out_len > max_out_len) {
+ return 0;
+ }
+ buf = out;
+ acc = 0;
+ acc_len = 0;
+ mask = ((uint32_t)1 << bits) - 1;
+ for (u = 0; u < n; u ++) {
+ acc = (acc << bits) | ((uint8_t)x[u] & mask);
+ acc_len += bits;
+ while (acc_len >= 8) {
+ acc_len -= 8;
+ *buf ++ = (uint8_t)(acc >> acc_len);
+ }
+ }
+ if (acc_len > 0) {
+ *buf ++ = (uint8_t)(acc << (8 - acc_len));
+ }
+ return out_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON512_CLEAN_trim_i8_decode(
+ int8_t *x, unsigned logn, unsigned bits,
+ const void *in, size_t max_in_len) {
+ size_t n, in_len;
+ const uint8_t *buf;
+ size_t u;
+ uint32_t acc, mask1, mask2;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ in_len = ((n * bits) + 7) >> 3;
+ if (in_len > max_in_len) {
+ return 0;
+ }
+ buf = in;
+ u = 0;
+ acc = 0;
+ acc_len = 0;
+ mask1 = ((uint32_t)1 << bits) - 1;
+ mask2 = (uint32_t)1 << (bits - 1);
+ while (u < n) {
+ acc = (acc << 8) | *buf ++;
+ acc_len += 8;
+ while (acc_len >= bits && u < n) {
+ uint32_t w;
+
+ acc_len -= bits;
+ w = (acc >> acc_len) & mask1;
+ w |= -(w & mask2);
+ if (w == -mask2) {
+ /*
+ * The -2^(bits-1) value is forbidden.
+ */
+ return 0;
+ }
+ x[u ++] = (int8_t) * (int32_t *)&w;
+ }
+ }
+ if ((acc & (((uint32_t)1 << acc_len) - 1)) != 0) {
+ /*
+ * Extra bits in the last byte must be zero.
+ */
+ return 0;
+ }
+ return in_len;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON512_CLEAN_comp_encode(
+ void *out, size_t max_out_len,
+ const int16_t *x, unsigned logn) {
+ uint8_t *buf;
+ size_t n, u, v;
+ uint32_t acc;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ buf = out;
+
+ /*
+ * Make sure that all values are within the -2047..+2047 range.
+ */
+ for (u = 0; u < n; u ++) {
+ if (x[u] < -2047 || x[u] > +2047) {
+ return 0;
+ }
+ }
+
+ acc = 0;
+ acc_len = 0;
+ v = 0;
+ for (u = 0; u < n; u ++) {
+ int t;
+ unsigned w;
+
+ /*
+ * Get sign and absolute value of next integer; push the
+ * sign bit.
+ */
+ acc <<= 1;
+ t = x[u];
+ if (t < 0) {
+ t = -t;
+ acc |= 1;
+ }
+ w = (unsigned)t;
+
+ /*
+ * Push the low 7 bits of the absolute value.
+ */
+ acc <<= 7;
+ acc |= w & 127u;
+ w >>= 7;
+
+ /*
+ * We pushed exactly 8 bits.
+ */
+ acc_len += 8;
+
+ /*
+ * Push as many zeros as necessary, then a one. Since the
+ * absolute value is at most 2047, w can only range up to
+ * 15 at this point, thus we will add at most 16 bits
+ * here. With the 8 bits above and possibly up to 7 bits
+ * from previous iterations, we may go up to 31 bits, which
+ * will fit in the accumulator, which is an uint32_t.
+ */
+ acc <<= (w + 1);
+ acc |= 1;
+ acc_len += w + 1;
+
+ /*
+ * Produce all full bytes.
+ */
+ while (acc_len >= 8) {
+ acc_len -= 8;
+ if (buf != NULL) {
+ if (v >= max_out_len) {
+ return 0;
+ }
+ buf[v] = (uint8_t)(acc >> acc_len);
+ }
+ v ++;
+ }
+ }
+
+ /*
+ * Flush remaining bits (if any).
+ */
+ if (acc_len > 0) {
+ if (buf != NULL) {
+ if (v >= max_out_len) {
+ return 0;
+ }
+ buf[v] = (uint8_t)(acc << (8 - acc_len));
+ }
+ v ++;
+ }
+
+ return v;
+}
+
+/* see inner.h */
+size_t
+PQCLEAN_FALCON512_CLEAN_comp_decode(
+ int16_t *x, unsigned logn,
+ const void *in, size_t max_in_len) {
+ const uint8_t *buf;
+ size_t n, u, v;
+ uint32_t acc;
+ unsigned acc_len;
+
+ n = (size_t)1 << logn;
+ buf = in;
+ acc = 0;
+ acc_len = 0;
+ v = 0;
+ for (u = 0; u < n; u ++) {
+ unsigned b, s, m;
+
+ /*
+ * Get next eight bits: sign and low seven bits of the
+ * absolute value.
+ */
+ if (v >= max_in_len) {
+ return 0;
+ }
+ acc = (acc << 8) | (uint32_t)buf[v ++];
+ b = acc >> acc_len;
+ s = b & 128;
+ m = b & 127;
+
+ /*
+ * Get next bits until a 1 is reached.
+ */
+ for (;;) {
+ if (acc_len == 0) {
+ if (v >= max_in_len) {
+ return 0;
+ }
+ acc = (acc << 8) | (uint32_t)buf[v ++];
+ acc_len = 8;
+ }
+ acc_len --;
+ if (((acc >> acc_len) & 1) != 0) {
+ break;
+ }
+ m += 128;
+ if (m > 2047) {
+ return 0;
+ }
+ }
+ x[u] = (int16_t) m;
+ if (s) {
+ x[u] = (int16_t) - x[u];
+ }
+ }
+ return v;
+}
+
+/*
+ * Key elements and signatures are polynomials with small integer
+ * coefficients. Here are some statistics gathered over many
+ * generated key pairs (10000 or more for each degree):
+ *
+ * log(n) n max(f,g) std(f,g) max(F,G) std(F,G)
+ * 1 2 129 56.31 143 60.02
+ * 2 4 123 40.93 160 46.52
+ * 3 8 97 28.97 159 38.01
+ * 4 16 100 21.48 154 32.50
+ * 5 32 71 15.41 151 29.36
+ * 6 64 59 11.07 138 27.77
+ * 7 128 39 7.91 144 27.00
+ * 8 256 32 5.63 148 26.61
+ * 9 512 22 4.00 137 26.46
+ * 10 1024 15 2.84 146 26.41
+ *
+ * We want a compact storage format for private key, and, as part of
+ * key generation, we are allowed to reject some keys which would
+ * otherwise be fine (this does not induce any noticeable vulnerability
+ * as long as we reject only a small proportion of possible keys).
+ * Hence, we enforce at key generation time maximum values for the
+ * elements of f, g, F and G, so that their encoding can be expressed
+ * in fixed-width values. Limits have been chosen so that generated
+ * keys are almost always within bounds, thus not impacting neither
+ * security or performance.
+ *
+ * IMPORTANT: the code assumes that all coefficients of f, g, F and G
+ * ultimately fit in the -127..+127 range. Thus, none of the elements
+ * of max_fg_bits[] and max_FG_bits[] shall be greater than 8.
+ */
+
+const uint8_t PQCLEAN_FALCON512_CLEAN_max_fg_bits[] = {
+ 0, /* unused */
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 7,
+ 7,
+ 6,
+ 6,
+ 5
+};
+
+const uint8_t PQCLEAN_FALCON512_CLEAN_max_FG_bits[] = {
+ 0, /* unused */
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8
+};
+
+/*
+ * When generating a new key pair, we can always reject keys which
+ * feature an abnormally large coefficient. This can also be done for
+ * signatures, albeit with some care: in case the signature process is
+ * used in a derandomized setup (explicitly seeded with the message and
+ * private key), we have to follow the specification faithfully, and the
+ * specification only enforces a limit on the L2 norm of the signature
+ * vector. The limit on the L2 norm implies that the absolute value of
+ * a coefficient of the signature cannot be more than the following:
+ *
+ * log(n) n max sig coeff (theoretical)
+ * 1 2 412
+ * 2 4 583
+ * 3 8 824
+ * 4 16 1166
+ * 5 32 1649
+ * 6 64 2332
+ * 7 128 3299
+ * 8 256 4665
+ * 9 512 6598
+ * 10 1024 9331
+ *
+ * However, the largest observed signature coefficients during our
+ * experiments was 1077 (in absolute value), hence we can assume that,
+ * with overwhelming probability, signature coefficients will fit
+ * in -2047..2047, i.e. 12 bits.
+ */
+
+const uint8_t PQCLEAN_FALCON512_CLEAN_max_sig_bits[] = {
+ 0, /* unused */
+ 10,
+ 11,
+ 11,
+ 12,
+ 12,
+ 12,
+ 12,
+ 12,
+ 12,
+ 12
+};
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/common.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/common.c
new file mode 100644
index 000000000..dea433f6c
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/common.c
@@ -0,0 +1,294 @@
+#include "inner.h"
+
+/*
+ * Support functions for signatures (hash-to-point, norm).
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_hash_to_point_vartime(
+ inner_shake256_context *sc,
+ uint16_t *x, unsigned logn) {
+ /*
+ * This is the straightforward per-the-spec implementation. It
+ * is not constant-time, thus it might reveal information on the
+ * plaintext (at least, enough to check the plaintext against a
+ * list of potential plaintexts) in a scenario where the
+ * attacker does not have access to the signature value or to
+ * the public key, but knows the nonce (without knowledge of the
+ * nonce, the hashed output cannot be matched against potential
+ * plaintexts).
+ */
+ size_t n;
+
+ n = (size_t)1 << logn;
+ while (n > 0) {
+ uint8_t buf[2];
+ uint32_t w;
+
+ inner_shake256_extract(sc, (void *)buf, sizeof buf);
+ w = ((unsigned)buf[0] << 8) | (unsigned)buf[1];
+ if (w < 61445) {
+ while (w >= 12289) {
+ w -= 12289;
+ }
+ *x ++ = (uint16_t)w;
+ n --;
+ }
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_hash_to_point_ct(
+ inner_shake256_context *sc,
+ uint16_t *x, unsigned logn, uint8_t *tmp) {
+ /*
+ * Each 16-bit sample is a value in 0..65535. The value is
+ * kept if it falls in 0..61444 (because 61445 = 5*12289)
+ * and rejected otherwise; thus, each sample has probability
+ * about 0.93758 of being selected.
+ *
+ * We want to oversample enough to be sure that we will
+ * have enough values with probability at least 1 - 2^(-256).
+ * Depending on degree N, this leads to the following
+ * required oversampling:
+ *
+ * logn n oversampling
+ * 1 2 65
+ * 2 4 67
+ * 3 8 71
+ * 4 16 77
+ * 5 32 86
+ * 6 64 100
+ * 7 128 122
+ * 8 256 154
+ * 9 512 205
+ * 10 1024 287
+ *
+ * If logn >= 7, then the provided temporary buffer is large
+ * enough. Otherwise, we use a stack buffer of 63 entries
+ * (i.e. 126 bytes) for the values that do not fit in tmp[].
+ */
+
+ static const uint16_t overtab[] = {
+ 0, /* unused */
+ 65,
+ 67,
+ 71,
+ 77,
+ 86,
+ 100,
+ 122,
+ 154,
+ 205,
+ 287
+ };
+
+ unsigned n, n2, u, m, p, over;
+ uint16_t *tt1, tt2[63];
+
+ /*
+ * We first generate m 16-bit value. Values 0..n-1 go to x[].
+ * Values n..2*n-1 go to tt1[]. Values 2*n and later go to tt2[].
+ * We also reduce modulo q the values; rejected values are set
+ * to 0xFFFF.
+ */
+ n = 1U << logn;
+ n2 = n << 1;
+ over = overtab[logn];
+ m = n + over;
+ tt1 = (uint16_t *)tmp;
+ for (u = 0; u < m; u ++) {
+ uint8_t buf[2];
+ uint32_t w, wr;
+
+ inner_shake256_extract(sc, buf, sizeof buf);
+ w = ((uint32_t)buf[0] << 8) | (uint32_t)buf[1];
+ wr = w - ((uint32_t)24578 & (((w - 24578) >> 31) - 1));
+ wr = wr - ((uint32_t)24578 & (((wr - 24578) >> 31) - 1));
+ wr = wr - ((uint32_t)12289 & (((wr - 12289) >> 31) - 1));
+ wr |= ((w - 61445) >> 31) - 1;
+ if (u < n) {
+ x[u] = (uint16_t)wr;
+ } else if (u < n2) {
+ tt1[u - n] = (uint16_t)wr;
+ } else {
+ tt2[u - n2] = (uint16_t)wr;
+ }
+ }
+
+ /*
+ * Now we must "squeeze out" the invalid values. We do this in
+ * a logarithmic sequence of passes; each pass computes where a
+ * value should go, and moves it down by 'p' slots if necessary,
+ * where 'p' uses an increasing powers-of-two scale. It can be
+ * shown that in all cases where the loop decides that a value
+ * has to be moved down by p slots, the destination slot is
+ * "free" (i.e. contains an invalid value).
+ */
+ for (p = 1; p <= over; p <<= 1) {
+ unsigned v;
+
+ /*
+ * In the loop below:
+ *
+ * - v contains the index of the final destination of
+ * the value; it is recomputed dynamically based on
+ * whether values are valid or not.
+ *
+ * - u is the index of the value we consider ("source");
+ * its address is s.
+ *
+ * - The loop may swap the value with the one at index
+ * u-p. The address of the swap destination is d.
+ */
+ v = 0;
+ for (u = 0; u < m; u ++) {
+ uint16_t *s, *d;
+ unsigned j, sv, dv, mk;
+
+ if (u < n) {
+ s = &x[u];
+ } else if (u < n2) {
+ s = &tt1[u - n];
+ } else {
+ s = &tt2[u - n2];
+ }
+ sv = *s;
+
+ /*
+ * The value in sv should ultimately go to
+ * address v, i.e. jump back by u-v slots.
+ */
+ j = u - v;
+
+ /*
+ * We increment v for the next iteration, but
+ * only if the source value is valid. The mask
+ * 'mk' is -1 if the value is valid, 0 otherwise,
+ * so we _subtract_ mk.
+ */
+ mk = (sv >> 15) - 1U;
+ v -= mk;
+
+ /*
+ * In this loop we consider jumps by p slots; if
+ * u < p then there is nothing more to do.
+ */
+ if (u < p) {
+ continue;
+ }
+
+ /*
+ * Destination for the swap: value at address u-p.
+ */
+ if ((u - p) < n) {
+ d = &x[u - p];
+ } else if ((u - p) < n2) {
+ d = &tt1[(u - p) - n];
+ } else {
+ d = &tt2[(u - p) - n2];
+ }
+ dv = *d;
+
+ /*
+ * The swap should be performed only if the source
+ * is valid AND the jump j has its 'p' bit set.
+ */
+ mk &= -(((j & p) + 0x1FF) >> 9);
+
+ *s = (uint16_t)(sv ^ (mk & (sv ^ dv)));
+ *d = (uint16_t)(dv ^ (mk & (sv ^ dv)));
+ }
+ }
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON512_CLEAN_is_short(
+ const int16_t *s1, const int16_t *s2, unsigned logn) {
+ /*
+ * We use the l2-norm. Code below uses only 32-bit operations to
+ * compute the square of the norm with saturation to 2^32-1 if
+ * the value exceeds 2^31-1.
+ */
+ size_t n, u;
+ uint32_t s, ng;
+
+ n = (size_t)1 << logn;
+ s = 0;
+ ng = 0;
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = s1[u];
+ s += (uint32_t)(z * z);
+ ng |= s;
+ z = s2[u];
+ s += (uint32_t)(z * z);
+ ng |= s;
+ }
+ s |= -(ng >> 31);
+
+ /*
+ * Acceptance bound on the l2-norm is:
+ * 1.2*1.55*sqrt(q)*sqrt(2*N)
+ * Value 7085 is floor((1.2^2)*(1.55^2)*2*1024).
+ */
+ return s < (((uint32_t)7085 * (uint32_t)12289) >> (10 - logn));
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON512_CLEAN_is_short_half(
+ uint32_t sqn, const int16_t *s2, unsigned logn) {
+ size_t n, u;
+ uint32_t ng;
+
+ n = (size_t)1 << logn;
+ ng = -(sqn >> 31);
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = s2[u];
+ sqn += (uint32_t)(z * z);
+ ng |= sqn;
+ }
+ sqn |= -(ng >> 31);
+
+ /*
+ * Acceptance bound on the l2-norm is:
+ * 1.2*1.55*sqrt(q)*sqrt(2*N)
+ * Value 7085 is floor((1.2^2)*(1.55^2)*2*1024).
+ */
+ return sqn < (((uint32_t)7085 * (uint32_t)12289) >> (10 - logn));
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/config.mk b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/config.mk
new file mode 100644
index 000000000..b28c9ce64
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/config.mk
@@ -0,0 +1,17 @@
+# DO NOT EDIT: generated from config.mk.subdirs.template
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# add fixes for platform integration issues here.
+#
+# liboqs programs expect the public include files to be in oqs/xxxx,
+# So we put liboqs in it's own module, oqs, and point to the dist files
+INCLUDES += -I$(CORE_DEPTH)/lib/liboqs/src/common/pqclean_shims -I$(CORE_DEPTH)/lib/liboqs/src/common/sha3/xkcp_low/KeccakP-1600/plain-64bits
+DEFINES +=
+
+ifeq ($(OS_ARCH), Darwin)
+DEFINES += -DOQS_HAVE_ALIGNED_ALLOC -DOQS_HAVE_MEMALIGN -DOQS_HAVE_POSIX_MEMALIGN
+endif
+
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fft.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fft.c
new file mode 100644
index 000000000..a7d9bdad0
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fft.c
@@ -0,0 +1,700 @@
+#include "inner.h"
+
+/*
+ * FFT code.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/*
+ * Rules for complex number macros:
+ * --------------------------------
+ *
+ * Operand order is: destination, source1, source2...
+ *
+ * Each operand is a real and an imaginary part.
+ *
+ * All overlaps are allowed.
+ */
+
+/*
+ * Addition of two complex numbers (d = a + b).
+ */
+#define FPC_ADD(d_re, d_im, a_re, a_im, b_re, b_im) do { \
+ fpr fpct_re, fpct_im; \
+ fpct_re = fpr_add(a_re, b_re); \
+ fpct_im = fpr_add(a_im, b_im); \
+ (d_re) = fpct_re; \
+ (d_im) = fpct_im; \
+ } while (0)
+
+/*
+ * Subtraction of two complex numbers (d = a - b).
+ */
+#define FPC_SUB(d_re, d_im, a_re, a_im, b_re, b_im) do { \
+ fpr fpct_re, fpct_im; \
+ fpct_re = fpr_sub(a_re, b_re); \
+ fpct_im = fpr_sub(a_im, b_im); \
+ (d_re) = fpct_re; \
+ (d_im) = fpct_im; \
+ } while (0)
+
+/*
+ * Multplication of two complex numbers (d = a * b).
+ */
+#define FPC_MUL(d_re, d_im, a_re, a_im, b_re, b_im) do { \
+ fpr fpct_a_re, fpct_a_im; \
+ fpr fpct_b_re, fpct_b_im; \
+ fpr fpct_d_re, fpct_d_im; \
+ fpct_a_re = (a_re); \
+ fpct_a_im = (a_im); \
+ fpct_b_re = (b_re); \
+ fpct_b_im = (b_im); \
+ fpct_d_re = fpr_sub( \
+ fpr_mul(fpct_a_re, fpct_b_re), \
+ fpr_mul(fpct_a_im, fpct_b_im)); \
+ fpct_d_im = fpr_add( \
+ fpr_mul(fpct_a_re, fpct_b_im), \
+ fpr_mul(fpct_a_im, fpct_b_re)); \
+ (d_re) = fpct_d_re; \
+ (d_im) = fpct_d_im; \
+ } while (0)
+
+/*
+ * Squaring of a complex number (d = a * a).
+ */
+#define FPC_SQR(d_re, d_im, a_re, a_im) do { \
+ fpr fpct_a_re, fpct_a_im; \
+ fpr fpct_d_re, fpct_d_im; \
+ fpct_a_re = (a_re); \
+ fpct_a_im = (a_im); \
+ fpct_d_re = fpr_sub(fpr_sqr(fpct_a_re), fpr_sqr(fpct_a_im)); \
+ fpct_d_im = fpr_double(fpr_mul(fpct_a_re, fpct_a_im)); \
+ (d_re) = fpct_d_re; \
+ (d_im) = fpct_d_im; \
+ } while (0)
+
+/*
+ * Inversion of a complex number (d = 1 / a).
+ */
+#define FPC_INV(d_re, d_im, a_re, a_im) do { \
+ fpr fpct_a_re, fpct_a_im; \
+ fpr fpct_d_re, fpct_d_im; \
+ fpr fpct_m; \
+ fpct_a_re = (a_re); \
+ fpct_a_im = (a_im); \
+ fpct_m = fpr_add(fpr_sqr(fpct_a_re), fpr_sqr(fpct_a_im)); \
+ fpct_m = fpr_inv(fpct_m); \
+ fpct_d_re = fpr_mul(fpct_a_re, fpct_m); \
+ fpct_d_im = fpr_mul(fpr_neg(fpct_a_im), fpct_m); \
+ (d_re) = fpct_d_re; \
+ (d_im) = fpct_d_im; \
+ } while (0)
+
+/*
+ * Division of complex numbers (d = a / b).
+ */
+#define FPC_DIV(d_re, d_im, a_re, a_im, b_re, b_im) do { \
+ fpr fpct_a_re, fpct_a_im; \
+ fpr fpct_b_re, fpct_b_im; \
+ fpr fpct_d_re, fpct_d_im; \
+ fpr fpct_m; \
+ fpct_a_re = (a_re); \
+ fpct_a_im = (a_im); \
+ fpct_b_re = (b_re); \
+ fpct_b_im = (b_im); \
+ fpct_m = fpr_add(fpr_sqr(fpct_b_re), fpr_sqr(fpct_b_im)); \
+ fpct_m = fpr_inv(fpct_m); \
+ fpct_b_re = fpr_mul(fpct_b_re, fpct_m); \
+ fpct_b_im = fpr_mul(fpr_neg(fpct_b_im), fpct_m); \
+ fpct_d_re = fpr_sub( \
+ fpr_mul(fpct_a_re, fpct_b_re), \
+ fpr_mul(fpct_a_im, fpct_b_im)); \
+ fpct_d_im = fpr_add( \
+ fpr_mul(fpct_a_re, fpct_b_im), \
+ fpr_mul(fpct_a_im, fpct_b_re)); \
+ (d_re) = fpct_d_re; \
+ (d_im) = fpct_d_im; \
+ } while (0)
+
+/*
+ * Let w = exp(i*pi/N); w is a primitive 2N-th root of 1. We define the
+ * values w_j = w^(2j+1) for all j from 0 to N-1: these are the roots
+ * of X^N+1 in the field of complex numbers. A crucial property is that
+ * w_{N-1-j} = conj(w_j) = 1/w_j for all j.
+ *
+ * FFT representation of a polynomial f (taken modulo X^N+1) is the
+ * set of values f(w_j). Since f is real, conj(f(w_j)) = f(conj(w_j)),
+ * thus f(w_{N-1-j}) = conj(f(w_j)). We thus store only half the values,
+ * for j = 0 to N/2-1; the other half can be recomputed easily when (if)
+ * needed. A consequence is that FFT representation has the same size
+ * as normal representation: N/2 complex numbers use N real numbers (each
+ * complex number is the combination of a real and an imaginary part).
+ *
+ * We use a specific ordering which makes computations easier. Let rev()
+ * be the bit-reversal function over log(N) bits. For j in 0..N/2-1, we
+ * store the real and imaginary parts of f(w_j) in slots:
+ *
+ * Re(f(w_j)) -> slot rev(j)/2
+ * Im(f(w_j)) -> slot rev(j)/2+N/2
+ *
+ * (Note that rev(j) is even for j < N/2.)
+ */
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_FFT(fpr *f, unsigned logn) {
+ /*
+ * FFT algorithm in bit-reversal order uses the following
+ * iterative algorithm:
+ *
+ * t = N
+ * for m = 1; m < N; m *= 2:
+ * ht = t/2
+ * for i1 = 0; i1 < m; i1 ++:
+ * j1 = i1 * t
+ * s = GM[m + i1]
+ * for j = j1; j < (j1 + ht); j ++:
+ * x = f[j]
+ * y = s * f[j + ht]
+ * f[j] = x + y
+ * f[j + ht] = x - y
+ * t = ht
+ *
+ * GM[k] contains w^rev(k) for primitive root w = exp(i*pi/N).
+ *
+ * In the description above, f[] is supposed to contain complex
+ * numbers. In our in-memory representation, the real and
+ * imaginary parts of f[k] are in array slots k and k+N/2.
+ *
+ * We only keep the first half of the complex numbers. We can
+ * see that after the first iteration, the first and second halves
+ * of the array of complex numbers have separate lives, so we
+ * simply ignore the second part.
+ */
+
+ unsigned u;
+ size_t t, n, hn, m;
+
+ /*
+ * First iteration: compute f[j] + i * f[j+N/2] for all j < N/2
+ * (because GM[1] = w^rev(1) = w^(N/2) = i).
+ * In our chosen representation, this is a no-op: everything is
+ * already where it should be.
+ */
+
+ /*
+ * Subsequent iterations are truncated to use only the first
+ * half of values.
+ */
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ t = hn;
+ for (u = 1, m = 2; u < logn; u ++, m <<= 1) {
+ size_t ht, hm, i1, j1;
+
+ ht = t >> 1;
+ hm = m >> 1;
+ for (i1 = 0, j1 = 0; i1 < hm; i1 ++, j1 += t) {
+ size_t j, j2;
+
+ j2 = j1 + ht;
+ fpr s_re, s_im;
+
+ s_re = fpr_gm_tab[((m + i1) << 1) + 0];
+ s_im = fpr_gm_tab[((m + i1) << 1) + 1];
+ for (j = j1; j < j2; j ++) {
+ fpr x_re, x_im, y_re, y_im;
+
+ x_re = f[j];
+ x_im = f[j + hn];
+ y_re = f[j + ht];
+ y_im = f[j + ht + hn];
+ FPC_MUL(y_re, y_im, y_re, y_im, s_re, s_im);
+ FPC_ADD(f[j], f[j + hn],
+ x_re, x_im, y_re, y_im);
+ FPC_SUB(f[j + ht], f[j + ht + hn],
+ x_re, x_im, y_re, y_im);
+ }
+ }
+ t = ht;
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_iFFT(fpr *f, unsigned logn) {
+ /*
+ * Inverse FFT algorithm in bit-reversal order uses the following
+ * iterative algorithm:
+ *
+ * t = 1
+ * for m = N; m > 1; m /= 2:
+ * hm = m/2
+ * dt = t*2
+ * for i1 = 0; i1 < hm; i1 ++:
+ * j1 = i1 * dt
+ * s = iGM[hm + i1]
+ * for j = j1; j < (j1 + t); j ++:
+ * x = f[j]
+ * y = f[j + t]
+ * f[j] = x + y
+ * f[j + t] = s * (x - y)
+ * t = dt
+ * for i1 = 0; i1 < N; i1 ++:
+ * f[i1] = f[i1] / N
+ *
+ * iGM[k] contains (1/w)^rev(k) for primitive root w = exp(i*pi/N)
+ * (actually, iGM[k] = 1/GM[k] = conj(GM[k])).
+ *
+ * In the main loop (not counting the final division loop), in
+ * all iterations except the last, the first and second half of f[]
+ * (as an array of complex numbers) are separate. In our chosen
+ * representation, we do not keep the second half.
+ *
+ * The last iteration recombines the recomputed half with the
+ * implicit half, and should yield only real numbers since the
+ * target polynomial is real; moreover, s = i at that step.
+ * Thus, when considering x and y:
+ * y = conj(x) since the final f[j] must be real
+ * Therefore, f[j] is filled with 2*Re(x), and f[j + t] is
+ * filled with 2*Im(x).
+ * But we already have Re(x) and Im(x) in array slots j and j+t
+ * in our chosen representation. That last iteration is thus a
+ * simple doubling of the values in all the array.
+ *
+ * We make the last iteration a no-op by tweaking the final
+ * division into a division by N/2, not N.
+ */
+ size_t u, n, hn, t, m;
+
+ n = (size_t)1 << logn;
+ t = 1;
+ m = n;
+ hn = n >> 1;
+ for (u = logn; u > 1; u --) {
+ size_t hm, dt, i1, j1;
+
+ hm = m >> 1;
+ dt = t << 1;
+ for (i1 = 0, j1 = 0; j1 < hn; i1 ++, j1 += dt) {
+ size_t j, j2;
+
+ j2 = j1 + t;
+ fpr s_re, s_im;
+
+ s_re = fpr_gm_tab[((hm + i1) << 1) + 0];
+ s_im = fpr_neg(fpr_gm_tab[((hm + i1) << 1) + 1]);
+ for (j = j1; j < j2; j ++) {
+ fpr x_re, x_im, y_re, y_im;
+
+ x_re = f[j];
+ x_im = f[j + hn];
+ y_re = f[j + t];
+ y_im = f[j + t + hn];
+ FPC_ADD(f[j], f[j + hn],
+ x_re, x_im, y_re, y_im);
+ FPC_SUB(x_re, x_im, x_re, x_im, y_re, y_im);
+ FPC_MUL(f[j + t], f[j + t + hn],
+ x_re, x_im, s_re, s_im);
+ }
+ }
+ t = dt;
+ m = hm;
+ }
+
+ /*
+ * Last iteration is a no-op, provided that we divide by N/2
+ * instead of N. We need to make a special case for logn = 0.
+ */
+ if (logn > 0) {
+ fpr ni;
+
+ ni = fpr_p2_tab[logn];
+ for (u = 0; u < n; u ++) {
+ f[u] = fpr_mul(f[u], ni);
+ }
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_add(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ a[u] = fpr_add(a[u], b[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_sub(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ a[u] = fpr_sub(a[u], b[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_neg(fpr *a, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ a[u] = fpr_neg(a[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_adj_fft(fpr *a, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = (n >> 1); u < n; u ++) {
+ a[u] = fpr_neg(a[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_mul_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ b_re = b[u];
+ b_im = b[u + hn];
+ FPC_MUL(a[u], a[u + hn], a_re, a_im, b_re, b_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_muladj_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ b_re = b[u];
+ b_im = fpr_neg(b[u + hn]);
+ FPC_MUL(a[u], a[u + hn], a_re, a_im, b_re, b_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(fpr *a, unsigned logn) {
+ /*
+ * Since each coefficient is multiplied with its own conjugate,
+ * the result contains only real values.
+ */
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ a[u] = fpr_add(fpr_sqr(a_re), fpr_sqr(a_im));
+ a[u + hn] = fpr_zero;
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_mulconst(fpr *a, fpr x, unsigned logn) {
+ size_t n, u;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ a[u] = fpr_mul(a[u], x);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_div_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ b_re = b[u];
+ b_im = b[u + hn];
+ FPC_DIV(a[u], a[u + hn], a_re, a_im, b_re, b_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_invnorm2_fft(fpr *d,
+ const fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr a_re, a_im;
+ fpr b_re, b_im;
+
+ a_re = a[u];
+ a_im = a[u + hn];
+ b_re = b[u];
+ b_im = b[u + hn];
+ d[u] = fpr_inv(fpr_add(
+ fpr_add(fpr_sqr(a_re), fpr_sqr(a_im)),
+ fpr_add(fpr_sqr(b_re), fpr_sqr(b_im))));
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_add_muladj_fft(fpr *d,
+ const fpr *F, const fpr *G,
+ const fpr *f, const fpr *g, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr F_re, F_im, G_re, G_im;
+ fpr f_re, f_im, g_re, g_im;
+ fpr a_re, a_im, b_re, b_im;
+
+ F_re = F[u];
+ F_im = F[u + hn];
+ G_re = G[u];
+ G_im = G[u + hn];
+ f_re = f[u];
+ f_im = f[u + hn];
+ g_re = g[u];
+ g_im = g[u + hn];
+
+ FPC_MUL(a_re, a_im, F_re, F_im, f_re, fpr_neg(f_im));
+ FPC_MUL(b_re, b_im, G_re, G_im, g_re, fpr_neg(g_im));
+ d[u] = fpr_add(a_re, b_re);
+ d[u + hn] = fpr_add(a_im, b_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_mul_autoadj_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ a[u] = fpr_mul(a[u], b[u]);
+ a[u + hn] = fpr_mul(a[u + hn], b[u]);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_div_autoadj_fft(
+ fpr *a, const fpr *b, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr ib;
+
+ ib = fpr_inv(b[u]);
+ a[u] = fpr_mul(a[u], ib);
+ a[u + hn] = fpr_mul(a[u + hn], ib);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_LDL_fft(
+ const fpr *g00,
+ fpr *g01, fpr *g11, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr g00_re, g00_im, g01_re, g01_im, g11_re, g11_im;
+ fpr mu_re, mu_im;
+
+ g00_re = g00[u];
+ g00_im = g00[u + hn];
+ g01_re = g01[u];
+ g01_im = g01[u + hn];
+ g11_re = g11[u];
+ g11_im = g11[u + hn];
+ FPC_DIV(mu_re, mu_im, g01_re, g01_im, g00_re, g00_im);
+ FPC_MUL(g01_re, g01_im, mu_re, mu_im, g01_re, fpr_neg(g01_im));
+ FPC_SUB(g11[u], g11[u + hn], g11_re, g11_im, g01_re, g01_im);
+ g01[u] = mu_re;
+ g01[u + hn] = fpr_neg(mu_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_LDLmv_fft(
+ fpr *d11, fpr *l10,
+ const fpr *g00, const fpr *g01,
+ const fpr *g11, unsigned logn) {
+ size_t n, hn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ for (u = 0; u < hn; u ++) {
+ fpr g00_re, g00_im, g01_re, g01_im, g11_re, g11_im;
+ fpr mu_re, mu_im;
+
+ g00_re = g00[u];
+ g00_im = g00[u + hn];
+ g01_re = g01[u];
+ g01_im = g01[u + hn];
+ g11_re = g11[u];
+ g11_im = g11[u + hn];
+ FPC_DIV(mu_re, mu_im, g01_re, g01_im, g00_re, g00_im);
+ FPC_MUL(g01_re, g01_im, mu_re, mu_im, g01_re, fpr_neg(g01_im));
+ FPC_SUB(d11[u], d11[u + hn], g11_re, g11_im, g01_re, g01_im);
+ l10[u] = mu_re;
+ l10[u + hn] = fpr_neg(mu_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_split_fft(
+ fpr *f0, fpr *f1,
+ const fpr *f, unsigned logn) {
+ /*
+ * The FFT representation we use is in bit-reversed order
+ * (element i contains f(w^(rev(i))), where rev() is the
+ * bit-reversal function over the ring degree. This changes
+ * indexes with regards to the Falcon specification.
+ */
+ size_t n, hn, qn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ qn = hn >> 1;
+
+ /*
+ * We process complex values by pairs. For logn = 1, there is only
+ * one complex value (the other one is the implicit conjugate),
+ * so we add the two lines below because the loop will be
+ * skipped.
+ */
+ f0[0] = f[0];
+ f1[0] = f[hn];
+
+ for (u = 0; u < qn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+ fpr t_re, t_im;
+
+ a_re = f[(u << 1) + 0];
+ a_im = f[(u << 1) + 0 + hn];
+ b_re = f[(u << 1) + 1];
+ b_im = f[(u << 1) + 1 + hn];
+
+ FPC_ADD(t_re, t_im, a_re, a_im, b_re, b_im);
+ f0[u] = fpr_half(t_re);
+ f0[u + qn] = fpr_half(t_im);
+
+ FPC_SUB(t_re, t_im, a_re, a_im, b_re, b_im);
+ FPC_MUL(t_re, t_im, t_re, t_im,
+ fpr_gm_tab[((u + hn) << 1) + 0],
+ fpr_neg(fpr_gm_tab[((u + hn) << 1) + 1]));
+ f1[u] = fpr_half(t_re);
+ f1[u + qn] = fpr_half(t_im);
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_poly_merge_fft(
+ fpr *f,
+ const fpr *f0, const fpr *f1, unsigned logn) {
+ size_t n, hn, qn, u;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ qn = hn >> 1;
+
+ /*
+ * An extra copy to handle the special case logn = 1.
+ */
+ f[0] = f0[0];
+ f[hn] = f1[0];
+
+ for (u = 0; u < qn; u ++) {
+ fpr a_re, a_im, b_re, b_im;
+ fpr t_re, t_im;
+
+ a_re = f0[u];
+ a_im = f0[u + qn];
+ FPC_MUL(b_re, b_im, f1[u], f1[u + qn],
+ fpr_gm_tab[((u + hn) << 1) + 0],
+ fpr_gm_tab[((u + hn) << 1) + 1]);
+ FPC_ADD(t_re, t_im, a_re, a_im, b_re, b_im);
+ f[(u << 1) + 0] = t_re;
+ f[(u << 1) + 0 + hn] = t_im;
+ FPC_SUB(t_re, t_im, a_re, a_im, b_re, b_im);
+ f[(u << 1) + 1] = t_re;
+ f[(u << 1) + 1 + hn] = t_im;
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fpr.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fpr.c
new file mode 100644
index 000000000..669c825ee
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fpr.c
@@ -0,0 +1,1890 @@
+#include "inner.h"
+
+/*
+ * Floating-point operations.
+ *
+ * This file implements the non-inline functions declared in
+ * fpr.h, as well as the constants for FFT / iFFT.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+
+/*
+ * Normalize a provided unsigned integer to the 2^63..2^64-1 range by
+ * left-shifting it if necessary. The exponent e is adjusted accordingly
+ * (i.e. if the value was left-shifted by n bits, then n is subtracted
+ * from e). If source m is 0, then it remains 0, but e is altered.
+ * Both m and e must be simple variables (no expressions allowed).
+ */
+#define FPR_NORM64(m, e) do { \
+ uint32_t nt; \
+ \
+ (e) -= 63; \
+ \
+ nt = (uint32_t)((m) >> 32); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 32)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 5); \
+ \
+ nt = (uint32_t)((m) >> 48); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 16)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 4); \
+ \
+ nt = (uint32_t)((m) >> 56); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 8)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 3); \
+ \
+ nt = (uint32_t)((m) >> 60); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 4)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 2); \
+ \
+ nt = (uint32_t)((m) >> 62); \
+ nt = (nt | -nt) >> 31; \
+ (m) ^= ((m) ^ ((m) << 2)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt << 1); \
+ \
+ nt = (uint32_t)((m) >> 63); \
+ (m) ^= ((m) ^ ((m) << 1)) & ((uint64_t)nt - 1); \
+ (e) += (int)(nt); \
+ } while (0)
+
+uint64_t
+fpr_ursh(uint64_t x, int n) {
+ x ^= (x ^ (x >> 32)) & -(uint64_t)(n >> 5);
+ return x >> (n & 31);
+}
+
+int64_t
+fpr_irsh(int64_t x, int n) {
+ x ^= (x ^ (x >> 32)) & -(int64_t)(n >> 5);
+ return x >> (n & 31);
+}
+
+uint64_t
+fpr_ulsh(uint64_t x, int n) {
+ x ^= (x ^ (x << 32)) & -(uint64_t)(n >> 5);
+ return x << (n & 31);
+}
+
+fpr
+FPR(int s, int e, uint64_t m) {
+ fpr x;
+ uint32_t t;
+ unsigned f;
+
+ /*
+ * If e >= -1076, then the value is "normal"; otherwise, it
+ * should be a subnormal, which we clamp down to zero.
+ */
+ e += 1076;
+ t = (uint32_t)e >> 31;
+ m &= (uint64_t)t - 1;
+
+ /*
+ * If m = 0 then we want a zero; make e = 0 too, but conserve
+ * the sign.
+ */
+ t = (uint32_t)(m >> 54);
+ e &= -(int)t;
+
+ /*
+ * The 52 mantissa bits come from m. Value m has its top bit set
+ * (unless it is a zero); we leave it "as is": the top bit will
+ * increment the exponent by 1, except when m = 0, which is
+ * exactly what we want.
+ */
+ x = (((uint64_t)s << 63) | (m >> 2)) + ((uint64_t)(uint32_t)e << 52);
+
+ /*
+ * Rounding: if the low three bits of m are 011, 110 or 111,
+ * then the value should be incremented to get the next
+ * representable value. This implements the usual
+ * round-to-nearest rule (with preference to even values in case
+ * of a tie). Note that the increment may make a carry spill
+ * into the exponent field, which is again exactly what we want
+ * in that case.
+ */
+ f = (unsigned)m & 7U;
+ x += (0xC8U >> f) & 1;
+ return x;
+}
+
+fpr
+fpr_scaled(int64_t i, int sc) {
+ /*
+ * To convert from int to float, we have to do the following:
+ * 1. Get the absolute value of the input, and its sign
+ * 2. Shift right or left the value as appropriate
+ * 3. Pack the result
+ *
+ * We can assume that the source integer is not -2^63.
+ */
+ int s, e;
+ uint32_t t;
+ uint64_t m;
+
+ /*
+ * Extract sign bit.
+ * We have: -i = 1 + ~i
+ */
+ s = (int)((uint64_t)i >> 63);
+ i ^= -(int64_t)s;
+ i += s;
+
+ /*
+ * For now we suppose that i != 0.
+ * Otherwise, we set m to i and left-shift it as much as needed
+ * to get a 1 in the top bit. We can do that in a logarithmic
+ * number of conditional shifts.
+ */
+ m = (uint64_t)i;
+ e = 9 + sc;
+ FPR_NORM64(m, e);
+
+ /*
+ * Now m is in the 2^63..2^64-1 range. We must divide it by 512;
+ * if one of the dropped bits is a 1, this should go into the
+ * "sticky bit".
+ */
+ m |= ((uint32_t)m & 0x1FF) + 0x1FF;
+ m >>= 9;
+
+ /*
+ * Corrective action: if i = 0 then all of the above was
+ * incorrect, and we clamp e and m down to zero.
+ */
+ t = (uint32_t)((uint64_t)(i | -i) >> 63);
+ m &= -(uint64_t)t;
+ e &= -(int)t;
+
+ /*
+ * Assemble back everything. The FPR() function will handle cases
+ * where e is too low.
+ */
+ return FPR(s, e, m);
+}
+
+fpr
+fpr_of(int64_t i) {
+ return fpr_scaled(i, 0);
+}
+
+int64_t
+fpr_rint(fpr x) {
+ uint64_t m, d;
+ int e;
+ uint32_t s, dd, f;
+
+ /*
+ * We assume that the value fits in -(2^63-1)..+(2^63-1). We can
+ * thus extract the mantissa as a 63-bit integer, then right-shift
+ * it as needed.
+ */
+ m = ((x << 10) | ((uint64_t)1 << 62)) & (((uint64_t)1 << 63) - 1);
+ e = 1085 - ((int)(x >> 52) & 0x7FF);
+
+ /*
+ * If a shift of more than 63 bits is needed, then simply set m
+ * to zero. This also covers the case of an input operand equal
+ * to zero.
+ */
+ m &= -(uint64_t)((uint32_t)(e - 64) >> 31);
+ e &= 63;
+
+ /*
+ * Right-shift m as needed. Shift count is e. Proper rounding
+ * mandates that:
+ * - If the highest dropped bit is zero, then round low.
+ * - If the highest dropped bit is one, and at least one of the
+ * other dropped bits is one, then round up.
+ * - If the highest dropped bit is one, and all other dropped
+ * bits are zero, then round up if the lowest kept bit is 1,
+ * or low otherwise (i.e. ties are broken by "rounding to even").
+ *
+ * We thus first extract a word consisting of all the dropped bit
+ * AND the lowest kept bit; then we shrink it down to three bits,
+ * the lowest being "sticky".
+ */
+ d = fpr_ulsh(m, 63 - e);
+ dd = (uint32_t)d | ((uint32_t)(d >> 32) & 0x1FFFFFFF);
+ f = (uint32_t)(d >> 61) | ((dd | -dd) >> 31);
+ m = fpr_ursh(m, e) + (uint64_t)((0xC8U >> f) & 1U);
+
+ /*
+ * Apply the sign bit.
+ */
+ s = (uint32_t)(x >> 63);
+ return ((int64_t)m ^ -(int64_t)s) + (int64_t)s;
+}
+
+int64_t
+fpr_floor(fpr x) {
+ uint64_t t;
+ int64_t xi;
+ int e, cc;
+
+ /*
+ * We extract the integer as a _signed_ 64-bit integer with
+ * a scaling factor. Since we assume that the value fits
+ * in the -(2^63-1)..+(2^63-1) range, we can left-shift the
+ * absolute value to make it in the 2^62..2^63-1 range: we
+ * will only need a right-shift afterwards.
+ */
+ e = (int)(x >> 52) & 0x7FF;
+ t = x >> 63;
+ xi = (int64_t)(((x << 10) | ((uint64_t)1 << 62))
+ & (((uint64_t)1 << 63) - 1));
+ xi = (xi ^ -(int64_t)t) + (int64_t)t;
+ cc = 1085 - e;
+
+ /*
+ * We perform an arithmetic right-shift on the value. This
+ * applies floor() semantics on both positive and negative values
+ * (rounding toward minus infinity).
+ */
+ xi = fpr_irsh(xi, cc & 63);
+
+ /*
+ * If the true shift count was 64 or more, then we should instead
+ * replace xi with 0 (if nonnegative) or -1 (if negative). Edge
+ * case: -0 will be floored to -1, not 0 (whether this is correct
+ * is debatable; in any case, the other functions normalize zero
+ * to +0).
+ *
+ * For an input of zero, the non-shifted xi was incorrect (we used
+ * a top implicit bit of value 1, not 0), but this does not matter
+ * since this operation will clamp it down.
+ */
+ xi ^= (xi ^ -(int64_t)t) & -(int64_t)((uint32_t)(63 - cc) >> 31);
+ return xi;
+}
+
+int64_t
+fpr_trunc(fpr x) {
+ uint64_t t, xu;
+ int e, cc;
+
+ /*
+ * Extract the absolute value. Since we assume that the value
+ * fits in the -(2^63-1)..+(2^63-1) range, we can left-shift
+ * the absolute value into the 2^62..2^63-1 range, and then
+ * do a right shift afterwards.
+ */
+ e = (int)(x >> 52) & 0x7FF;
+ xu = ((x << 10) | ((uint64_t)1 << 62)) & (((uint64_t)1 << 63) - 1);
+ cc = 1085 - e;
+ xu = fpr_ursh(xu, cc & 63);
+
+ /*
+ * If the exponent is too low (cc > 63), then the shift was wrong
+ * and we must clamp the value to 0. This also covers the case
+ * of an input equal to zero.
+ */
+ xu &= -(uint64_t)((uint32_t)(cc - 64) >> 31);
+
+ /*
+ * Apply back the sign, if the source value is negative.
+ */
+ t = x >> 63;
+ xu = (xu ^ -t) + t;
+ return *(int64_t *)&xu;
+}
+
+fpr
+fpr_add(fpr x, fpr y) {
+ uint64_t m, xu, yu, za;
+ uint32_t cs;
+ int ex, ey, sx, sy, cc;
+
+ /*
+ * Make sure that the first operand (x) has the larger absolute
+ * value. This guarantees that the exponent of y is less than
+ * or equal to the exponent of x, and, if they are equal, then
+ * the mantissa of y will not be greater than the mantissa of x.
+ *
+ * After this swap, the result will have the sign x, except in
+ * the following edge case: abs(x) = abs(y), and x and y have
+ * opposite sign bits; in that case, the result shall be +0
+ * even if the sign bit of x is 1. To handle this case properly,
+ * we do the swap is abs(x) = abs(y) AND the sign of x is 1.
+ */
+ m = ((uint64_t)1 << 63) - 1;
+ za = (x & m) - (y & m);
+ cs = (uint32_t)(za >> 63)
+ | ((1U - (uint32_t)(-za >> 63)) & (uint32_t)(x >> 63));
+ m = (x ^ y) & -(uint64_t)cs;
+ x ^= m;
+ y ^= m;
+
+ /*
+ * Extract sign bits, exponents and mantissas. The mantissas are
+ * scaled up to 2^55..2^56-1, and the exponent is unbiased. If
+ * an operand is zero, its mantissa is set to 0 at this step, and
+ * its exponent will be -1078.
+ */
+ ex = (int)(x >> 52);
+ sx = ex >> 11;
+ ex &= 0x7FF;
+ m = (uint64_t)(uint32_t)((ex + 0x7FF) >> 11) << 52;
+ xu = ((x & (((uint64_t)1 << 52) - 1)) | m) << 3;
+ ex -= 1078;
+ ey = (int)(y >> 52);
+ sy = ey >> 11;
+ ey &= 0x7FF;
+ m = (uint64_t)(uint32_t)((ey + 0x7FF) >> 11) << 52;
+ yu = ((y & (((uint64_t)1 << 52) - 1)) | m) << 3;
+ ey -= 1078;
+
+ /*
+ * x has the larger exponent; hence, we only need to right-shift y.
+ * If the shift count is larger than 59 bits then we clamp the
+ * value to zero.
+ */
+ cc = ex - ey;
+ yu &= -(uint64_t)((uint32_t)(cc - 60) >> 31);
+ cc &= 63;
+
+ /*
+ * The lowest bit of yu is "sticky".
+ */
+ m = fpr_ulsh(1, cc) - 1;
+ yu |= (yu & m) + m;
+ yu = fpr_ursh(yu, cc);
+
+ /*
+ * If the operands have the same sign, then we add the mantissas;
+ * otherwise, we subtract the mantissas.
+ */
+ xu += yu - ((yu << 1) & -(uint64_t)(sx ^ sy));
+
+ /*
+ * The result may be smaller, or slightly larger. We normalize
+ * it to the 2^63..2^64-1 range (if xu is zero, then it stays
+ * at zero).
+ */
+ FPR_NORM64(xu, ex);
+
+ /*
+ * Scale down the value to 2^54..s^55-1, handling the last bit
+ * as sticky.
+ */
+ xu |= ((uint32_t)xu & 0x1FF) + 0x1FF;
+ xu >>= 9;
+ ex += 9;
+
+ /*
+ * In general, the result has the sign of x. However, if the
+ * result is exactly zero, then the following situations may
+ * be encountered:
+ * x > 0, y = -x -> result should be +0
+ * x < 0, y = -x -> result should be +0
+ * x = +0, y = +0 -> result should be +0
+ * x = -0, y = +0 -> result should be +0
+ * x = +0, y = -0 -> result should be +0
+ * x = -0, y = -0 -> result should be -0
+ *
+ * But at the conditional swap step at the start of the
+ * function, we ensured that if abs(x) = abs(y) and the
+ * sign of x was 1, then x and y were swapped. Thus, the
+ * two following cases cannot actually happen:
+ * x < 0, y = -x
+ * x = -0, y = +0
+ * In all other cases, the sign bit of x is conserved, which
+ * is what the FPR() function does. The FPR() function also
+ * properly clamps values to zero when the exponent is too
+ * low, but does not alter the sign in that case.
+ */
+ return FPR(sx, ex, xu);
+}
+
+fpr
+fpr_sub(fpr x, fpr y) {
+ y ^= (uint64_t)1 << 63;
+ return fpr_add(x, y);
+}
+
+fpr
+fpr_neg(fpr x) {
+ x ^= (uint64_t)1 << 63;
+ return x;
+}
+
+fpr
+fpr_half(fpr x) {
+ /*
+ * To divide a value by 2, we just have to subtract 1 from its
+ * exponent, but we have to take care of zero.
+ */
+ uint32_t t;
+
+ x -= (uint64_t)1 << 52;
+ t = (((uint32_t)(x >> 52) & 0x7FF) + 1) >> 11;
+ x &= (uint64_t)t - 1;
+ return x;
+}
+
+fpr
+fpr_double(fpr x) {
+ /*
+ * To double a value, we just increment by one the exponent. We
+ * don't care about infinites or NaNs; however, 0 is a
+ * special case.
+ */
+ x += (uint64_t)((((unsigned)(x >> 52) & 0x7FFU) + 0x7FFU) >> 11) << 52;
+ return x;
+}
+
+fpr
+fpr_mul(fpr x, fpr y) {
+ uint64_t xu, yu, w, zu, zv;
+ uint32_t x0, x1, y0, y1, z0, z1, z2;
+ int ex, ey, d, e, s;
+
+ /*
+ * Extract absolute values as scaled unsigned integers. We
+ * don't extract exponents yet.
+ */
+ xu = (x & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+ yu = (y & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+
+ /*
+ * We have two 53-bit integers to multiply; we need to split
+ * each into a lower half and a upper half. Moreover, we
+ * prefer to have lower halves to be of 25 bits each, for
+ * reasons explained later on.
+ */
+ x0 = (uint32_t)xu & 0x01FFFFFF;
+ x1 = (uint32_t)(xu >> 25);
+ y0 = (uint32_t)yu & 0x01FFFFFF;
+ y1 = (uint32_t)(yu >> 25);
+ w = (uint64_t)x0 * (uint64_t)y0;
+ z0 = (uint32_t)w & 0x01FFFFFF;
+ z1 = (uint32_t)(w >> 25);
+ w = (uint64_t)x0 * (uint64_t)y1;
+ z1 += (uint32_t)w & 0x01FFFFFF;
+ z2 = (uint32_t)(w >> 25);
+ w = (uint64_t)x1 * (uint64_t)y0;
+ z1 += (uint32_t)w & 0x01FFFFFF;
+ z2 += (uint32_t)(w >> 25);
+ zu = (uint64_t)x1 * (uint64_t)y1;
+ z2 += (z1 >> 25);
+ z1 &= 0x01FFFFFF;
+ zu += z2;
+
+ /*
+ * Since xu and yu are both in the 2^52..2^53-1 range, the
+ * product is in the 2^104..2^106-1 range. We first reassemble
+ * it and round it into the 2^54..2^56-1 range; the bottom bit
+ * is made "sticky". Since the low limbs z0 and z1 are 25 bits
+ * each, we just take the upper part (zu), and consider z0 and
+ * z1 only for purposes of stickiness.
+ * (This is the reason why we chose 25-bit limbs above.)
+ */
+ zu |= ((z0 | z1) + 0x01FFFFFF) >> 25;
+
+ /*
+ * We normalize zu to the 2^54..s^55-1 range: it could be one
+ * bit too large at this point. This is done with a conditional
+ * right-shift that takes into account the sticky bit.
+ */
+ zv = (zu >> 1) | (zu & 1);
+ w = zu >> 55;
+ zu ^= (zu ^ zv) & -w;
+
+ /*
+ * Get the aggregate scaling factor:
+ *
+ * - Each exponent is biased by 1023.
+ *
+ * - Integral mantissas are scaled by 2^52, hence an
+ * extra 52 bias for each exponent.
+ *
+ * - However, we right-shifted z by 50 bits, and then
+ * by 0 or 1 extra bit (depending on the value of w).
+ *
+ * In total, we must add the exponents, then subtract
+ * 2 * (1023 + 52), then add 50 + w.
+ */
+ ex = (int)((x >> 52) & 0x7FF);
+ ey = (int)((y >> 52) & 0x7FF);
+ e = ex + ey - 2100 + (int)w;
+
+ /*
+ * Sign bit is the XOR of the operand sign bits.
+ */
+ s = (int)((x ^ y) >> 63);
+
+ /*
+ * Corrective actions for zeros: if either of the operands is
+ * zero, then the computations above were wrong. Test for zero
+ * is whether ex or ey is zero. We just have to set the mantissa
+ * (zu) to zero, the FPR() function will normalize e.
+ */
+ d = ((ex + 0x7FF) & (ey + 0x7FF)) >> 11;
+ zu &= -(uint64_t)d;
+
+ /*
+ * FPR() packs the result and applies proper rounding.
+ */
+ return FPR(s, e, zu);
+}
+
+fpr
+fpr_sqr(fpr x) {
+ return fpr_mul(x, x);
+}
+
+fpr
+fpr_div(fpr x, fpr y) {
+ uint64_t xu, yu, q, q2, w;
+ int i, ex, ey, e, d, s;
+
+ /*
+ * Extract mantissas of x and y (unsigned).
+ */
+ xu = (x & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+ yu = (y & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+
+ /*
+ * Perform bit-by-bit division of xu by yu. We run it for 55 bits.
+ */
+ q = 0;
+ for (i = 0; i < 55; i ++) {
+ /*
+ * If yu is less than or equal xu, then subtract it and
+ * push a 1 in the quotient; otherwise, leave xu unchanged
+ * and push a 0.
+ */
+ uint64_t b;
+
+ b = ((xu - yu) >> 63) - 1;
+ xu -= b & yu;
+ q |= b & 1;
+ xu <<= 1;
+ q <<= 1;
+ }
+
+ /*
+ * We got 55 bits in the quotient, followed by an extra zero. We
+ * want that 56th bit to be "sticky": it should be a 1 if and
+ * only if the remainder (xu) is non-zero.
+ */
+ q |= (xu | -xu) >> 63;
+
+ /*
+ * Quotient is at most 2^56-1. Its top bit may be zero, but in
+ * that case the next-to-top bit will be a one, since the
+ * initial xu and yu were both in the 2^52..2^53-1 range.
+ * We perform a conditional shift to normalize q to the
+ * 2^54..2^55-1 range (with the bottom bit being sticky).
+ */
+ q2 = (q >> 1) | (q & 1);
+ w = q >> 55;
+ q ^= (q ^ q2) & -w;
+
+ /*
+ * Extract exponents to compute the scaling factor:
+ *
+ * - Each exponent is biased and we scaled them up by
+ * 52 bits; but these biases will cancel out.
+ *
+ * - The division loop produced a 55-bit shifted result,
+ * so we must scale it down by 55 bits.
+ *
+ * - If w = 1, we right-shifted the integer by 1 bit,
+ * hence we must add 1 to the scaling.
+ */
+ ex = (int)((x >> 52) & 0x7FF);
+ ey = (int)((y >> 52) & 0x7FF);
+ e = ex - ey - 55 + (int)w;
+
+ /*
+ * Sign is the XOR of the signs of the operands.
+ */
+ s = (int)((x ^ y) >> 63);
+
+ /*
+ * Corrective actions for zeros: if x = 0, then the computation
+ * is wrong, and we must clamp e and q to 0. We do not care
+ * about the case y = 0 (as per assumptions in this module,
+ * the caller does not perform divisions by zero).
+ */
+ d = (ex + 0x7FF) >> 11;
+ s &= d;
+ e &= -d;
+ q &= -(uint64_t)d;
+
+ /*
+ * FPR() packs the result and applies proper rounding.
+ */
+ return FPR(s, e, q);
+}
+
+fpr
+fpr_inv(fpr x) {
+ return fpr_div(4607182418800017408u, x);
+}
+
+fpr
+fpr_sqrt(fpr x) {
+ uint64_t xu, q, s, r;
+ int ex, e;
+
+ /*
+ * Extract the mantissa and the exponent. We don't care about
+ * the sign: by assumption, the operand is nonnegative.
+ * We want the "true" exponent corresponding to a mantissa
+ * in the 1..2 range.
+ */
+ xu = (x & (((uint64_t)1 << 52) - 1)) | ((uint64_t)1 << 52);
+ ex = (int)((x >> 52) & 0x7FF);
+ e = ex - 1023;
+
+ /*
+ * If the exponent is odd, double the mantissa and decrement
+ * the exponent. The exponent is then halved to account for
+ * the square root.
+ */
+ xu += xu & -(uint64_t)(e & 1);
+ e >>= 1;
+
+ /*
+ * Double the mantissa.
+ */
+ xu <<= 1;
+
+ /*
+ * We now have a mantissa in the 2^53..2^55-1 range. It
+ * represents a value between 1 (inclusive) and 4 (exclusive)
+ * in fixed point notation (with 53 fractional bits). We
+ * compute the square root bit by bit.
+ */
+ q = 0;
+ s = 0;
+ r = (uint64_t)1 << 53;
+ for (int i = 0; i < 54; i ++) {
+ uint64_t t, b;
+
+ t = s + r;
+ b = ((xu - t) >> 63) - 1;
+ s += (r << 1) & b;
+ xu -= t & b;
+ q += r & b;
+ xu <<= 1;
+ r >>= 1;
+ }
+
+ /*
+ * Now, q is a rounded-low 54-bit value, with a leading 1,
+ * 52 fractional digits, and an additional guard bit. We add
+ * an extra sticky bit to account for what remains of the operand.
+ */
+ q <<= 1;
+ q |= (xu | -xu) >> 63;
+
+ /*
+ * Result q is in the 2^54..2^55-1 range; we bias the exponent
+ * by 54 bits (the value e at that point contains the "true"
+ * exponent, but q is now considered an integer, i.e. scaled
+ * up.
+ */
+ e -= 54;
+
+ /*
+ * Corrective action for an operand of value zero.
+ */
+ q &= -(uint64_t)((ex + 0x7FF) >> 11);
+
+ /*
+ * Apply rounding and back result.
+ */
+ return FPR(0, e, q);
+}
+
+int
+fpr_lt(fpr x, fpr y) {
+ /*
+ * If both x and y are positive, then a signed comparison yields
+ * the proper result:
+ * - For positive values, the order is preserved.
+ * - The sign bit is at the same place as in integers, so
+ * sign is preserved.
+ * Moreover, we can compute [x < y] as sgn(x-y) and the computation
+ * of x-y will not overflow.
+ *
+ * If the signs differ, then sgn(x) gives the proper result.
+ *
+ * If both x and y are negative, then the order is reversed.
+ * Hence [x < y] = sgn(y-x). We must compute this separately from
+ * sgn(x-y); simply inverting sgn(x-y) would not handle the edge
+ * case x = y properly.
+ */
+ int cc0, cc1;
+ int64_t sx;
+ int64_t sy;
+
+ sx = *(int64_t *)&x;
+ sy = *(int64_t *)&y;
+ sy &= ~((sx ^ sy) >> 63); /* set sy=0 if signs differ */
+
+ cc0 = (int)((sx - sy) >> 63) & 1; /* Neither subtraction overflows when */
+ cc1 = (int)((sy - sx) >> 63) & 1; /* the signs are the same. */
+
+ return cc0 ^ ((cc0 ^ cc1) & (int)((x & y) >> 63));
+}
+
+uint64_t
+fpr_expm_p63(fpr x, fpr ccs) {
+ /*
+ * Polynomial approximation of exp(-x) is taken from FACCT:
+ * https://eprint.iacr.org/2018/1234
+ * Specifically, values are extracted from the implementation
+ * referenced from the FACCT article, and available at:
+ * https://github.com/raykzhao/gaussian
+ * Here, the coefficients have been scaled up by 2^63 and
+ * converted to integers.
+ *
+ * Tests over more than 24 billions of random inputs in the
+ * 0..log(2) range have never shown a deviation larger than
+ * 2^(-50) from the true mathematical value.
+ */
+ static const uint64_t C[] = {
+ 0x00000004741183A3u,
+ 0x00000036548CFC06u,
+ 0x0000024FDCBF140Au,
+ 0x0000171D939DE045u,
+ 0x0000D00CF58F6F84u,
+ 0x000680681CF796E3u,
+ 0x002D82D8305B0FEAu,
+ 0x011111110E066FD0u,
+ 0x0555555555070F00u,
+ 0x155555555581FF00u,
+ 0x400000000002B400u,
+ 0x7FFFFFFFFFFF4800u,
+ 0x8000000000000000u
+ };
+
+ uint64_t z, y;
+ size_t u;
+ uint32_t z0, z1, y0, y1;
+ uint64_t a, b;
+
+ y = C[0];
+ z = (uint64_t)fpr_trunc(fpr_mul(x, fpr_ptwo63)) << 1;
+ for (u = 1; u < (sizeof C) / sizeof(C[0]); u ++) {
+ /*
+ * Compute product z * y over 128 bits, but keep only
+ * the top 64 bits.
+ *
+ * TODO: On some architectures/compilers we could use
+ * some intrinsics (__umulh() on MSVC) or other compiler
+ * extensions (unsigned __int128 on GCC / Clang) for
+ * improved speed; however, most 64-bit architectures
+ * also have appropriate IEEE754 floating-point support,
+ * which is better.
+ */
+ uint64_t c;
+
+ z0 = (uint32_t)z;
+ z1 = (uint32_t)(z >> 32);
+ y0 = (uint32_t)y;
+ y1 = (uint32_t)(y >> 32);
+ a = ((uint64_t)z0 * (uint64_t)y1)
+ + (((uint64_t)z0 * (uint64_t)y0) >> 32);
+ b = ((uint64_t)z1 * (uint64_t)y0);
+ c = (a >> 32) + (b >> 32);
+ c += (((uint64_t)(uint32_t)a + (uint64_t)(uint32_t)b) >> 32);
+ c += (uint64_t)z1 * (uint64_t)y1;
+ y = C[u] - c;
+ }
+
+ /*
+ * The scaling factor must be applied at the end. Since y is now
+ * in fixed-point notation, we have to convert the factor to the
+ * same format, and do an extra integer multiplication.
+ */
+ z = (uint64_t)fpr_trunc(fpr_mul(ccs, fpr_ptwo63)) << 1;
+ z0 = (uint32_t)z;
+ z1 = (uint32_t)(z >> 32);
+ y0 = (uint32_t)y;
+ y1 = (uint32_t)(y >> 32);
+ a = ((uint64_t)z0 * (uint64_t)y1)
+ + (((uint64_t)z0 * (uint64_t)y0) >> 32);
+ b = ((uint64_t)z1 * (uint64_t)y0);
+ y = (a >> 32) + (b >> 32);
+ y += (((uint64_t)(uint32_t)a + (uint64_t)(uint32_t)b) >> 32);
+ y += (uint64_t)z1 * (uint64_t)y1;
+
+ return y;
+}
+
+const fpr fpr_gm_tab[] = {
+ 0, 0,
+ 9223372036854775808U, 4607182418800017408U,
+ 4604544271217802189U, 4604544271217802189U,
+ 13827916308072577997U, 4604544271217802189U,
+ 4606496786581982534U, 4600565431771507043U,
+ 13823937468626282851U, 4606496786581982534U,
+ 4600565431771507043U, 4606496786581982534U,
+ 13829868823436758342U, 4600565431771507043U,
+ 4607009347991985328U, 4596196889902818827U,
+ 13819568926757594635U, 4607009347991985328U,
+ 4603179351334086856U, 4605664432017547683U,
+ 13829036468872323491U, 4603179351334086856U,
+ 4605664432017547683U, 4603179351334086856U,
+ 13826551388188862664U, 4605664432017547683U,
+ 4596196889902818827U, 4607009347991985328U,
+ 13830381384846761136U, 4596196889902818827U,
+ 4607139046673687846U, 4591727299969791020U,
+ 13815099336824566828U, 4607139046673687846U,
+ 4603889326261607894U, 4605137878724712257U,
+ 13828509915579488065U, 4603889326261607894U,
+ 4606118860100255153U, 4602163548591158843U,
+ 13825535585445934651U, 4606118860100255153U,
+ 4598900923775164166U, 4606794571824115162U,
+ 13830166608678890970U, 4598900923775164166U,
+ 4606794571824115162U, 4598900923775164166U,
+ 13822272960629939974U, 4606794571824115162U,
+ 4602163548591158843U, 4606118860100255153U,
+ 13829490896955030961U, 4602163548591158843U,
+ 4605137878724712257U, 4603889326261607894U,
+ 13827261363116383702U, 4605137878724712257U,
+ 4591727299969791020U, 4607139046673687846U,
+ 13830511083528463654U, 4591727299969791020U,
+ 4607171569234046334U, 4587232218149935124U,
+ 13810604255004710932U, 4607171569234046334U,
+ 4604224084862889120U, 4604849113969373103U,
+ 13828221150824148911U, 4604224084862889120U,
+ 4606317631232591731U, 4601373767755717824U,
+ 13824745804610493632U, 4606317631232591731U,
+ 4599740487990714333U, 4606655894547498725U,
+ 13830027931402274533U, 4599740487990714333U,
+ 4606912484326125783U, 4597922303871901467U,
+ 13821294340726677275U, 4606912484326125783U,
+ 4602805845399633902U, 4605900952042040894U,
+ 13829272988896816702U, 4602805845399633902U,
+ 4605409869824231233U, 4603540801876750389U,
+ 13826912838731526197U, 4605409869824231233U,
+ 4594454542771183930U, 4607084929468638487U,
+ 13830456966323414295U, 4594454542771183930U,
+ 4607084929468638487U, 4594454542771183930U,
+ 13817826579625959738U, 4607084929468638487U,
+ 4603540801876750389U, 4605409869824231233U,
+ 13828781906679007041U, 4603540801876750389U,
+ 4605900952042040894U, 4602805845399633902U,
+ 13826177882254409710U, 4605900952042040894U,
+ 4597922303871901467U, 4606912484326125783U,
+ 13830284521180901591U, 4597922303871901467U,
+ 4606655894547498725U, 4599740487990714333U,
+ 13823112524845490141U, 4606655894547498725U,
+ 4601373767755717824U, 4606317631232591731U,
+ 13829689668087367539U, 4601373767755717824U,
+ 4604849113969373103U, 4604224084862889120U,
+ 13827596121717664928U, 4604849113969373103U,
+ 4587232218149935124U, 4607171569234046334U,
+ 13830543606088822142U, 4587232218149935124U,
+ 4607179706000002317U, 4582730748936808062U,
+ 13806102785791583870U, 4607179706000002317U,
+ 4604386048625945823U, 4604698657331085206U,
+ 13828070694185861014U, 4604386048625945823U,
+ 4606409688975526202U, 4600971798440897930U,
+ 13824343835295673738U, 4606409688975526202U,
+ 4600154912527631775U, 4606578871587619388U,
+ 13829950908442395196U, 4600154912527631775U,
+ 4606963563043808649U, 4597061974398750563U,
+ 13820434011253526371U, 4606963563043808649U,
+ 4602994049708411683U, 4605784983948558848U,
+ 13829157020803334656U, 4602994049708411683U,
+ 4605539368864982914U, 4603361638657888991U,
+ 13826733675512664799U, 4605539368864982914U,
+ 4595327571478659014U, 4607049811591515049U,
+ 13830421848446290857U, 4595327571478659014U,
+ 4607114680469659603U, 4593485039402578702U,
+ 13816857076257354510U, 4607114680469659603U,
+ 4603716733069447353U, 4605276012900672507U,
+ 13828648049755448315U, 4603716733069447353U,
+ 4606012266443150634U, 4602550884377336506U,
+ 13825922921232112314U, 4606012266443150634U,
+ 4598476289818621559U, 4606856142606846307U,
+ 13830228179461622115U, 4598476289818621559U,
+ 4606727809065869586U, 4599322407794599425U,
+ 13822694444649375233U, 4606727809065869586U,
+ 4601771097584682078U, 4606220668805321205U,
+ 13829592705660097013U, 4601771097584682078U,
+ 4604995550503212910U, 4604058477489546729U,
+ 13827430514344322537U, 4604995550503212910U,
+ 4589965306122607094U, 4607158013403433018U,
+ 13830530050258208826U, 4589965306122607094U,
+ 4607158013403433018U, 4589965306122607094U,
+ 13813337342977382902U, 4607158013403433018U,
+ 4604058477489546729U, 4604995550503212910U,
+ 13828367587357988718U, 4604058477489546729U,
+ 4606220668805321205U, 4601771097584682078U,
+ 13825143134439457886U, 4606220668805321205U,
+ 4599322407794599425U, 4606727809065869586U,
+ 13830099845920645394U, 4599322407794599425U,
+ 4606856142606846307U, 4598476289818621559U,
+ 13821848326673397367U, 4606856142606846307U,
+ 4602550884377336506U, 4606012266443150634U,
+ 13829384303297926442U, 4602550884377336506U,
+ 4605276012900672507U, 4603716733069447353U,
+ 13827088769924223161U, 4605276012900672507U,
+ 4593485039402578702U, 4607114680469659603U,
+ 13830486717324435411U, 4593485039402578702U,
+ 4607049811591515049U, 4595327571478659014U,
+ 13818699608333434822U, 4607049811591515049U,
+ 4603361638657888991U, 4605539368864982914U,
+ 13828911405719758722U, 4603361638657888991U,
+ 4605784983948558848U, 4602994049708411683U,
+ 13826366086563187491U, 4605784983948558848U,
+ 4597061974398750563U, 4606963563043808649U,
+ 13830335599898584457U, 4597061974398750563U,
+ 4606578871587619388U, 4600154912527631775U,
+ 13823526949382407583U, 4606578871587619388U,
+ 4600971798440897930U, 4606409688975526202U,
+ 13829781725830302010U, 4600971798440897930U,
+ 4604698657331085206U, 4604386048625945823U,
+ 13827758085480721631U, 4604698657331085206U,
+ 4582730748936808062U, 4607179706000002317U,
+ 13830551742854778125U, 4582730748936808062U,
+ 4607181740574479067U, 4578227681973159812U,
+ 13801599718827935620U, 4607181740574479067U,
+ 4604465633578481725U, 4604621949701367983U,
+ 13827993986556143791U, 4604465633578481725U,
+ 4606453861145241227U, 4600769149537129431U,
+ 13824141186391905239U, 4606453861145241227U,
+ 4600360675823176935U, 4606538458821337243U,
+ 13829910495676113051U, 4600360675823176935U,
+ 4606987119037722413U, 4596629994023683153U,
+ 13820002030878458961U, 4606987119037722413U,
+ 4603087070374583113U, 4605725276488455441U,
+ 13829097313343231249U, 4603087070374583113U,
+ 4605602459698789090U, 4603270878689749849U,
+ 13826642915544525657U, 4605602459698789090U,
+ 4595762727260045105U, 4607030246558998647U,
+ 13830402283413774455U, 4595762727260045105U,
+ 4607127537664763515U, 4592606767730311893U,
+ 13815978804585087701U, 4607127537664763515U,
+ 4603803453461190356U, 4605207475328619533U,
+ 13828579512183395341U, 4603803453461190356U,
+ 4606066157444814153U, 4602357870542944470U,
+ 13825729907397720278U, 4606066157444814153U,
+ 4598688984595225406U, 4606826008603986804U,
+ 13830198045458762612U, 4598688984595225406U,
+ 4606761837001494797U, 4599112075441176914U,
+ 13822484112295952722U, 4606761837001494797U,
+ 4601967947786150793U, 4606170366472647579U,
+ 13829542403327423387U, 4601967947786150793U,
+ 4605067233569943231U, 4603974338538572089U,
+ 13827346375393347897U, 4605067233569943231U,
+ 4590846768565625881U, 4607149205763218185U,
+ 13830521242617993993U, 4590846768565625881U,
+ 4607165468267934125U, 4588998070480937184U,
+ 13812370107335712992U, 4607165468267934125U,
+ 4604141730443515286U, 4604922840319727473U,
+ 13828294877174503281U, 4604141730443515286U,
+ 4606269759522929756U, 4601573027631668967U,
+ 13824945064486444775U, 4606269759522929756U,
+ 4599531889160152938U, 4606692493141721470U,
+ 13830064529996497278U, 4599531889160152938U,
+ 4606884969294623682U, 4598262871476403630U,
+ 13821634908331179438U, 4606884969294623682U,
+ 4602710690099904183U, 4605957195211051218U,
+ 13829329232065827026U, 4602710690099904183U,
+ 4605343481119364930U, 4603629178146150899U,
+ 13827001215000926707U, 4605343481119364930U,
+ 4594016801320007031U, 4607100477024622401U,
+ 13830472513879398209U, 4594016801320007031U,
+ 4607068040143112603U, 4594891488091520602U,
+ 13818263524946296410U, 4607068040143112603U,
+ 4603451617570386922U, 4605475169017376660U,
+ 13828847205872152468U, 4603451617570386922U,
+ 4605843545406134034U, 4602900303344142735U,
+ 13826272340198918543U, 4605843545406134034U,
+ 4597492765973365521U, 4606938683557690074U,
+ 13830310720412465882U, 4597492765973365521U,
+ 4606618018794815019U, 4599948172872067014U,
+ 13823320209726842822U, 4606618018794815019U,
+ 4601173347964633034U, 4606364276725003740U,
+ 13829736313579779548U, 4601173347964633034U,
+ 4604774382555066977U, 4604305528345395596U,
+ 13827677565200171404U, 4604774382555066977U,
+ 4585465300892538317U, 4607176315382986589U,
+ 13830548352237762397U, 4585465300892538317U,
+ 4607176315382986589U, 4585465300892538317U,
+ 13808837337747314125U, 4607176315382986589U,
+ 4604305528345395596U, 4604774382555066977U,
+ 13828146419409842785U, 4604305528345395596U,
+ 4606364276725003740U, 4601173347964633034U,
+ 13824545384819408842U, 4606364276725003740U,
+ 4599948172872067014U, 4606618018794815019U,
+ 13829990055649590827U, 4599948172872067014U,
+ 4606938683557690074U, 4597492765973365521U,
+ 13820864802828141329U, 4606938683557690074U,
+ 4602900303344142735U, 4605843545406134034U,
+ 13829215582260909842U, 4602900303344142735U,
+ 4605475169017376660U, 4603451617570386922U,
+ 13826823654425162730U, 4605475169017376660U,
+ 4594891488091520602U, 4607068040143112603U,
+ 13830440076997888411U, 4594891488091520602U,
+ 4607100477024622401U, 4594016801320007031U,
+ 13817388838174782839U, 4607100477024622401U,
+ 4603629178146150899U, 4605343481119364930U,
+ 13828715517974140738U, 4603629178146150899U,
+ 4605957195211051218U, 4602710690099904183U,
+ 13826082726954679991U, 4605957195211051218U,
+ 4598262871476403630U, 4606884969294623682U,
+ 13830257006149399490U, 4598262871476403630U,
+ 4606692493141721470U, 4599531889160152938U,
+ 13822903926014928746U, 4606692493141721470U,
+ 4601573027631668967U, 4606269759522929756U,
+ 13829641796377705564U, 4601573027631668967U,
+ 4604922840319727473U, 4604141730443515286U,
+ 13827513767298291094U, 4604922840319727473U,
+ 4588998070480937184U, 4607165468267934125U,
+ 13830537505122709933U, 4588998070480937184U,
+ 4607149205763218185U, 4590846768565625881U,
+ 13814218805420401689U, 4607149205763218185U,
+ 4603974338538572089U, 4605067233569943231U,
+ 13828439270424719039U, 4603974338538572089U,
+ 4606170366472647579U, 4601967947786150793U,
+ 13825339984640926601U, 4606170366472647579U,
+ 4599112075441176914U, 4606761837001494797U,
+ 13830133873856270605U, 4599112075441176914U,
+ 4606826008603986804U, 4598688984595225406U,
+ 13822061021450001214U, 4606826008603986804U,
+ 4602357870542944470U, 4606066157444814153U,
+ 13829438194299589961U, 4602357870542944470U,
+ 4605207475328619533U, 4603803453461190356U,
+ 13827175490315966164U, 4605207475328619533U,
+ 4592606767730311893U, 4607127537664763515U,
+ 13830499574519539323U, 4592606767730311893U,
+ 4607030246558998647U, 4595762727260045105U,
+ 13819134764114820913U, 4607030246558998647U,
+ 4603270878689749849U, 4605602459698789090U,
+ 13828974496553564898U, 4603270878689749849U,
+ 4605725276488455441U, 4603087070374583113U,
+ 13826459107229358921U, 4605725276488455441U,
+ 4596629994023683153U, 4606987119037722413U,
+ 13830359155892498221U, 4596629994023683153U,
+ 4606538458821337243U, 4600360675823176935U,
+ 13823732712677952743U, 4606538458821337243U,
+ 4600769149537129431U, 4606453861145241227U,
+ 13829825898000017035U, 4600769149537129431U,
+ 4604621949701367983U, 4604465633578481725U,
+ 13827837670433257533U, 4604621949701367983U,
+ 4578227681973159812U, 4607181740574479067U,
+ 13830553777429254875U, 4578227681973159812U,
+ 4607182249242036882U, 4573724215515480177U,
+ 13797096252370255985U, 4607182249242036882U,
+ 4604505071555817232U, 4604583231088591477U,
+ 13827955267943367285U, 4604505071555817232U,
+ 4606475480113671417U, 4600667422348321968U,
+ 13824039459203097776U, 4606475480113671417U,
+ 4600463181646572228U, 4606517779747998088U,
+ 13829889816602773896U, 4600463181646572228U,
+ 4606998399608725124U, 4596413578358834022U,
+ 13819785615213609830U, 4606998399608725124U,
+ 4603133304188877240U, 4605694995810664660U,
+ 13829067032665440468U, 4603133304188877240U,
+ 4605633586259814045U, 4603225210076562971U,
+ 13826597246931338779U, 4605633586259814045U,
+ 4595979936813835462U, 4607019963775302583U,
+ 13830392000630078391U, 4595979936813835462U,
+ 4607133460805585796U, 4592167175087283203U,
+ 13815539211942059011U, 4607133460805585796U,
+ 4603846496621587377U, 4605172808754305228U,
+ 13828544845609081036U, 4603846496621587377U,
+ 4606092657816072624U, 4602260871257280788U,
+ 13825632908112056596U, 4606092657816072624U,
+ 4598795050632330097U, 4606810452769876110U,
+ 13830182489624651918U, 4598795050632330097U,
+ 4606778366364612594U, 4599006600037663623U,
+ 13822378636892439431U, 4606778366364612594U,
+ 4602065906208722008U, 4606144763310860551U,
+ 13829516800165636359U, 4602065906208722008U,
+ 4605102686554936490U, 4603931940768740167U,
+ 13827303977623515975U, 4605102686554936490U,
+ 4591287158938884897U, 4607144295058764886U,
+ 13830516331913540694U, 4591287158938884897U,
+ 4607168688050493276U, 4588115294056142819U,
+ 13811487330910918627U, 4607168688050493276U,
+ 4604183020748362039U, 4604886103475043762U,
+ 13828258140329819570U, 4604183020748362039U,
+ 4606293848208650998U, 4601473544562720001U,
+ 13824845581417495809U, 4606293848208650998U,
+ 4599636300858866724U, 4606674353838411301U,
+ 13830046390693187109U, 4599636300858866724U,
+ 4606898891031025132U, 4598136582470364665U,
+ 13821508619325140473U, 4606898891031025132U,
+ 4602758354025980442U, 4605929219593405673U,
+ 13829301256448181481U, 4602758354025980442U,
+ 4605376811039722786U, 4603585091850767959U,
+ 13826957128705543767U, 4605376811039722786U,
+ 4594235767444503503U, 4607092871118901179U,
+ 13830464907973676987U, 4594235767444503503U,
+ 4607076652372832968U, 4594673119063280916U,
+ 13818045155918056724U, 4607076652372832968U,
+ 4603496309891590679U, 4605442656228245717U,
+ 13828814693083021525U, 4603496309891590679U,
+ 4605872393621214213U, 4602853162432841185U,
+ 13826225199287616993U, 4605872393621214213U,
+ 4597707695679609371U, 4606925748668145757U,
+ 13830297785522921565U, 4597707695679609371U,
+ 4606637115963965612U, 4599844446633109139U,
+ 13823216483487884947U, 4606637115963965612U,
+ 4601273700967202825U, 4606341107699334546U,
+ 13829713144554110354U, 4601273700967202825U,
+ 4604811873195349477U, 4604264921241055824U,
+ 13827636958095831632U, 4604811873195349477U,
+ 4586348876009622851U, 4607174111710118367U,
+ 13830546148564894175U, 4586348876009622851U,
+ 4607178180169683960U, 4584498631466405633U,
+ 13807870668321181441U, 4607178180169683960U,
+ 4604345904647073908U, 4604736643460027021U,
+ 13828108680314802829U, 4604345904647073908U,
+ 4606387137437298591U, 4601072712526242277U,
+ 13824444749381018085U, 4606387137437298591U,
+ 4600051662802353687U, 4606598603759044570U,
+ 13829970640613820378U, 4600051662802353687U,
+ 4606951288507767453U, 4597277522845151878U,
+ 13820649559699927686U, 4606951288507767453U,
+ 4602947266358709886U, 4605814408482919348U,
+ 13829186445337695156U, 4602947266358709886U,
+ 4605507406967535927U, 4603406726595779752U,
+ 13826778763450555560U, 4605507406967535927U,
+ 4595109641634432498U, 4607059093103722971U,
+ 13830431129958498779U, 4595109641634432498U,
+ 4607107746899444102U, 4593797652641645341U,
+ 13817169689496421149U, 4607107746899444102U,
+ 4603673059103075106U, 4605309881318010327U,
+ 13828681918172786135U, 4603673059103075106U,
+ 4605984877841711338U, 4602646891659203088U,
+ 13826018928513978896U, 4605984877841711338U,
+ 4598369669086960528U, 4606870719641066940U,
+ 13830242756495842748U, 4598369669086960528U,
+ 4606710311774494716U, 4599427256825614420U,
+ 13822799293680390228U, 4606710311774494716U,
+ 4601672213217083403U, 4606245366082353408U,
+ 13829617402937129216U, 4601672213217083403U,
+ 4604959323120302796U, 4604100215502905499U,
+ 13827472252357681307U, 4604959323120302796U,
+ 4589524267239410099U, 4607161910007591876U,
+ 13830533946862367684U, 4589524267239410099U,
+ 4607153778602162496U, 4590406145430462614U,
+ 13813778182285238422U, 4607153778602162496U,
+ 4604016517974851588U, 4605031521104517324U,
+ 13828403557959293132U, 4604016517974851588U,
+ 4606195668621671667U, 4601869677011524443U,
+ 13825241713866300251U, 4606195668621671667U,
+ 4599217346014614711U, 4606744984357082948U,
+ 13830117021211858756U, 4599217346014614711U,
+ 4606841238740778884U, 4598582729657176439U,
+ 13821954766511952247U, 4606841238740778884U,
+ 4602454542796181607U, 4606039359984203741U,
+ 13829411396838979549U, 4602454542796181607U,
+ 4605241877142478242U, 4603760198400967492U,
+ 13827132235255743300U, 4605241877142478242U,
+ 4593046061348462537U, 4607121277474223905U,
+ 13830493314328999713U, 4593046061348462537U,
+ 4607040195955932526U, 4595545269419264690U,
+ 13818917306274040498U, 4607040195955932526U,
+ 4603316355454250015U, 4605571053506370248U,
+ 13828943090361146056U, 4603316355454250015U,
+ 4605755272910869620U, 4603040651631881451U,
+ 13826412688486657259U, 4605755272910869620U,
+ 4596846128749438754U, 4606975506703684317U,
+ 13830347543558460125U, 4596846128749438754U,
+ 4606558823023444576U, 4600257918160607478U,
+ 13823629955015383286U, 4606558823023444576U,
+ 4600870609507958271U, 4606431930490633905U,
+ 13829803967345409713U, 4600870609507958271U,
+ 4604660425598397818U, 4604425958770613225U,
+ 13827797995625389033U, 4604660425598397818U,
+ 4580962600092897021U, 4607180892816495009U,
+ 13830552929671270817U, 4580962600092897021U,
+ 4607180892816495009U, 4580962600092897021U,
+ 13804334636947672829U, 4607180892816495009U,
+ 4604425958770613225U, 4604660425598397818U,
+ 13828032462453173626U, 4604425958770613225U,
+ 4606431930490633905U, 4600870609507958271U,
+ 13824242646362734079U, 4606431930490633905U,
+ 4600257918160607478U, 4606558823023444576U,
+ 13829930859878220384U, 4600257918160607478U,
+ 4606975506703684317U, 4596846128749438754U,
+ 13820218165604214562U, 4606975506703684317U,
+ 4603040651631881451U, 4605755272910869620U,
+ 13829127309765645428U, 4603040651631881451U,
+ 4605571053506370248U, 4603316355454250015U,
+ 13826688392309025823U, 4605571053506370248U,
+ 4595545269419264690U, 4607040195955932526U,
+ 13830412232810708334U, 4595545269419264690U,
+ 4607121277474223905U, 4593046061348462537U,
+ 13816418098203238345U, 4607121277474223905U,
+ 4603760198400967492U, 4605241877142478242U,
+ 13828613913997254050U, 4603760198400967492U,
+ 4606039359984203741U, 4602454542796181607U,
+ 13825826579650957415U, 4606039359984203741U,
+ 4598582729657176439U, 4606841238740778884U,
+ 13830213275595554692U, 4598582729657176439U,
+ 4606744984357082948U, 4599217346014614711U,
+ 13822589382869390519U, 4606744984357082948U,
+ 4601869677011524443U, 4606195668621671667U,
+ 13829567705476447475U, 4601869677011524443U,
+ 4605031521104517324U, 4604016517974851588U,
+ 13827388554829627396U, 4605031521104517324U,
+ 4590406145430462614U, 4607153778602162496U,
+ 13830525815456938304U, 4590406145430462614U,
+ 4607161910007591876U, 4589524267239410099U,
+ 13812896304094185907U, 4607161910007591876U,
+ 4604100215502905499U, 4604959323120302796U,
+ 13828331359975078604U, 4604100215502905499U,
+ 4606245366082353408U, 4601672213217083403U,
+ 13825044250071859211U, 4606245366082353408U,
+ 4599427256825614420U, 4606710311774494716U,
+ 13830082348629270524U, 4599427256825614420U,
+ 4606870719641066940U, 4598369669086960528U,
+ 13821741705941736336U, 4606870719641066940U,
+ 4602646891659203088U, 4605984877841711338U,
+ 13829356914696487146U, 4602646891659203088U,
+ 4605309881318010327U, 4603673059103075106U,
+ 13827045095957850914U, 4605309881318010327U,
+ 4593797652641645341U, 4607107746899444102U,
+ 13830479783754219910U, 4593797652641645341U,
+ 4607059093103722971U, 4595109641634432498U,
+ 13818481678489208306U, 4607059093103722971U,
+ 4603406726595779752U, 4605507406967535927U,
+ 13828879443822311735U, 4603406726595779752U,
+ 4605814408482919348U, 4602947266358709886U,
+ 13826319303213485694U, 4605814408482919348U,
+ 4597277522845151878U, 4606951288507767453U,
+ 13830323325362543261U, 4597277522845151878U,
+ 4606598603759044570U, 4600051662802353687U,
+ 13823423699657129495U, 4606598603759044570U,
+ 4601072712526242277U, 4606387137437298591U,
+ 13829759174292074399U, 4601072712526242277U,
+ 4604736643460027021U, 4604345904647073908U,
+ 13827717941501849716U, 4604736643460027021U,
+ 4584498631466405633U, 4607178180169683960U,
+ 13830550217024459768U, 4584498631466405633U,
+ 4607174111710118367U, 4586348876009622851U,
+ 13809720912864398659U, 4607174111710118367U,
+ 4604264921241055824U, 4604811873195349477U,
+ 13828183910050125285U, 4604264921241055824U,
+ 4606341107699334546U, 4601273700967202825U,
+ 13824645737821978633U, 4606341107699334546U,
+ 4599844446633109139U, 4606637115963965612U,
+ 13830009152818741420U, 4599844446633109139U,
+ 4606925748668145757U, 4597707695679609371U,
+ 13821079732534385179U, 4606925748668145757U,
+ 4602853162432841185U, 4605872393621214213U,
+ 13829244430475990021U, 4602853162432841185U,
+ 4605442656228245717U, 4603496309891590679U,
+ 13826868346746366487U, 4605442656228245717U,
+ 4594673119063280916U, 4607076652372832968U,
+ 13830448689227608776U, 4594673119063280916U,
+ 4607092871118901179U, 4594235767444503503U,
+ 13817607804299279311U, 4607092871118901179U,
+ 4603585091850767959U, 4605376811039722786U,
+ 13828748847894498594U, 4603585091850767959U,
+ 4605929219593405673U, 4602758354025980442U,
+ 13826130390880756250U, 4605929219593405673U,
+ 4598136582470364665U, 4606898891031025132U,
+ 13830270927885800940U, 4598136582470364665U,
+ 4606674353838411301U, 4599636300858866724U,
+ 13823008337713642532U, 4606674353838411301U,
+ 4601473544562720001U, 4606293848208650998U,
+ 13829665885063426806U, 4601473544562720001U,
+ 4604886103475043762U, 4604183020748362039U,
+ 13827555057603137847U, 4604886103475043762U,
+ 4588115294056142819U, 4607168688050493276U,
+ 13830540724905269084U, 4588115294056142819U,
+ 4607144295058764886U, 4591287158938884897U,
+ 13814659195793660705U, 4607144295058764886U,
+ 4603931940768740167U, 4605102686554936490U,
+ 13828474723409712298U, 4603931940768740167U,
+ 4606144763310860551U, 4602065906208722008U,
+ 13825437943063497816U, 4606144763310860551U,
+ 4599006600037663623U, 4606778366364612594U,
+ 13830150403219388402U, 4599006600037663623U,
+ 4606810452769876110U, 4598795050632330097U,
+ 13822167087487105905U, 4606810452769876110U,
+ 4602260871257280788U, 4606092657816072624U,
+ 13829464694670848432U, 4602260871257280788U,
+ 4605172808754305228U, 4603846496621587377U,
+ 13827218533476363185U, 4605172808754305228U,
+ 4592167175087283203U, 4607133460805585796U,
+ 13830505497660361604U, 4592167175087283203U,
+ 4607019963775302583U, 4595979936813835462U,
+ 13819351973668611270U, 4607019963775302583U,
+ 4603225210076562971U, 4605633586259814045U,
+ 13829005623114589853U, 4603225210076562971U,
+ 4605694995810664660U, 4603133304188877240U,
+ 13826505341043653048U, 4605694995810664660U,
+ 4596413578358834022U, 4606998399608725124U,
+ 13830370436463500932U, 4596413578358834022U,
+ 4606517779747998088U, 4600463181646572228U,
+ 13823835218501348036U, 4606517779747998088U,
+ 4600667422348321968U, 4606475480113671417U,
+ 13829847516968447225U, 4600667422348321968U,
+ 4604583231088591477U, 4604505071555817232U,
+ 13827877108410593040U, 4604583231088591477U,
+ 4573724215515480177U, 4607182249242036882U,
+ 13830554286096812690U, 4573724215515480177U,
+ 4607182376410422530U, 4569220649180767418U,
+ 13792592686035543226U, 4607182376410422530U,
+ 4604524701268679793U, 4604563781218984604U,
+ 13827935818073760412U, 4604524701268679793U,
+ 4606486172460753999U, 4600616459743653188U,
+ 13823988496598428996U, 4606486172460753999U,
+ 4600514338912178239U, 4606507322377452870U,
+ 13829879359232228678U, 4600514338912178239U,
+ 4607003915349878877U, 4596305267720071930U,
+ 13819677304574847738U, 4607003915349878877U,
+ 4603156351203636159U, 4605679749231851918U,
+ 13829051786086627726U, 4603156351203636159U,
+ 4605649044311923410U, 4603202304363743346U,
+ 13826574341218519154U, 4605649044311923410U,
+ 4596088445927168004U, 4607014697483910382U,
+ 13830386734338686190U, 4596088445927168004U,
+ 4607136295912168606U, 4591947271803021404U,
+ 13815319308657797212U, 4607136295912168606U,
+ 4603867938232615808U, 4605155376589456981U,
+ 13828527413444232789U, 4603867938232615808U,
+ 4606105796280968177U, 4602212250118051877U,
+ 13825584286972827685U, 4606105796280968177U,
+ 4598848011564831930U, 4606802552898869248U,
+ 13830174589753645056U, 4598848011564831930U,
+ 4606786509620734768U, 4598953786765296928U,
+ 13822325823620072736U, 4606786509620734768U,
+ 4602114767134999006U, 4606131849150971908U,
+ 13829503886005747716U, 4602114767134999006U,
+ 4605120315324767624U, 4603910660507251362U,
+ 13827282697362027170U, 4605120315324767624U,
+ 4591507261658050721U, 4607141713064252300U,
+ 13830513749919028108U, 4591507261658050721U,
+ 4607170170974224083U, 4587673791460508439U,
+ 13811045828315284247U, 4607170170974224083U,
+ 4604203581176243359U, 4604867640218014515U,
+ 13828239677072790323U, 4604203581176243359U,
+ 4606305777984577632U, 4601423692641949331U,
+ 13824795729496725139U, 4606305777984577632U,
+ 4599688422741010356U, 4606665164148251002U,
+ 13830037201003026810U, 4599688422741010356U,
+ 4606905728766014348U, 4598029484874872834U,
+ 13821401521729648642U, 4606905728766014348U,
+ 4602782121393764535U, 4605915122243179241U,
+ 13829287159097955049U, 4602782121393764535U,
+ 4605393374401988274U, 4603562972219549215U,
+ 13826935009074325023U, 4605393374401988274U,
+ 4594345179472540681U, 4607088942243446236U,
+ 13830460979098222044U, 4594345179472540681U,
+ 4607080832832247697U, 4594563856311064231U,
+ 13817935893165840039U, 4607080832832247697U,
+ 4603518581031047189U, 4605426297151190466U,
+ 13828798334005966274U, 4603518581031047189U,
+ 4605886709123365959U, 4602829525820289164U,
+ 13826201562675064972U, 4605886709123365959U,
+ 4597815040470278984U, 4606919157647773535U,
+ 13830291194502549343U, 4597815040470278984U,
+ 4606646545123403481U, 4599792496117920694U,
+ 13823164532972696502U, 4606646545123403481U,
+ 4601323770373937522U, 4606329407841126011U,
+ 13829701444695901819U, 4601323770373937522U,
+ 4604830524903495634U, 4604244531615310815U,
+ 13827616568470086623U, 4604830524903495634U,
+ 4586790578280679046U, 4607172882816799076U,
+ 13830544919671574884U, 4586790578280679046U,
+ 4607178985458280057U, 4583614727651146525U,
+ 13806986764505922333U, 4607178985458280057U,
+ 4604366005771528720U, 4604717681185626434U,
+ 13828089718040402242U, 4604366005771528720U,
+ 4606398451906509788U, 4601022290077223616U,
+ 13824394326931999424U, 4606398451906509788U,
+ 4600103317933788342U, 4606588777269136769U,
+ 13829960814123912577U, 4600103317933788342U,
+ 4606957467106717424U, 4597169786279785693U,
+ 13820541823134561501U, 4606957467106717424U,
+ 4602970680601913687U, 4605799732098147061U,
+ 13829171768952922869U, 4602970680601913687U,
+ 4605523422498301790U, 4603384207141321914U,
+ 13826756243996097722U, 4605523422498301790U,
+ 4595218635031890910U, 4607054494135176056U,
+ 13830426530989951864U, 4595218635031890910U,
+ 4607111255739239816U, 4593688012422887515U,
+ 13817060049277663323U, 4607111255739239816U,
+ 4603694922063032361U, 4605292980606880364U,
+ 13828665017461656172U, 4603694922063032361U,
+ 4605998608960791335U, 4602598930031891166U,
+ 13825970966886666974U, 4605998608960791335U,
+ 4598423001813699022U, 4606863472012527185U,
+ 13830235508867302993U, 4598423001813699022U,
+ 4606719100629313491U, 4599374859150636784U,
+ 13822746896005412592U, 4606719100629313491U,
+ 4601721693286060937U, 4606233055365547081U,
+ 13829605092220322889U, 4601721693286060937U,
+ 4604977468824438271U, 4604079374282302598U,
+ 13827451411137078406U, 4604977468824438271U,
+ 4589744810590291021U, 4607160003989618959U,
+ 13830532040844394767U, 4589744810590291021U,
+ 4607155938267770208U, 4590185751760970393U,
+ 13813557788615746201U, 4607155938267770208U,
+ 4604037525321326463U, 4605013567986435066U,
+ 13828385604841210874U, 4604037525321326463U,
+ 4606208206518262803U, 4601820425647934753U,
+ 13825192462502710561U, 4606208206518262803U,
+ 4599269903251194481U, 4606736437002195879U,
+ 13830108473856971687U, 4599269903251194481U,
+ 4606848731493011465U, 4598529532600161144U,
+ 13821901569454936952U, 4606848731493011465U,
+ 4602502755147763107U, 4606025850160239809U,
+ 13829397887015015617U, 4602502755147763107U,
+ 4605258978359093269U, 4603738491917026584U,
+ 13827110528771802392U, 4605258978359093269U,
+ 4593265590854265407U, 4607118021058468598U,
+ 13830490057913244406U, 4593265590854265407U,
+ 4607045045516813836U, 4595436449949385485U,
+ 13818808486804161293U, 4607045045516813836U,
+ 4603339021357904144U, 4605555245917486022U,
+ 13828927282772261830U, 4603339021357904144U,
+ 4605770164172969910U, 4603017373458244943U,
+ 13826389410313020751U, 4605770164172969910U,
+ 4596954088216812973U, 4606969576261663845U,
+ 13830341613116439653U, 4596954088216812973U,
+ 4606568886807728474U, 4600206446098256018U,
+ 13823578482953031826U, 4606568886807728474U,
+ 4600921238092511730U, 4606420848538580260U,
+ 13829792885393356068U, 4600921238092511730U,
+ 4604679572075463103U, 4604406033021674239U,
+ 13827778069876450047U, 4604679572075463103U,
+ 4581846703643734566U, 4607180341788068727U,
+ 13830552378642844535U, 4581846703643734566U,
+ 4607181359080094673U, 4579996072175835083U,
+ 13803368109030610891U, 4607181359080094673U,
+ 4604445825685214043U, 4604641218080103285U,
+ 13828013254934879093U, 4604445825685214043U,
+ 4606442934727379583U, 4600819913163773071U,
+ 13824191950018548879U, 4606442934727379583U,
+ 4600309328230211502U, 4606548680329491866U,
+ 13829920717184267674U, 4600309328230211502U,
+ 4606981354314050484U, 4596738097012783531U,
+ 13820110133867559339U, 4606981354314050484U,
+ 4603063884010218172U, 4605740310302420207U,
+ 13829112347157196015U, 4603063884010218172U,
+ 4605586791482848547U, 4603293641160266722U,
+ 13826665678015042530U, 4605586791482848547U,
+ 4595654028864046335U, 4607035262954517034U,
+ 13830407299809292842U, 4595654028864046335U,
+ 4607124449686274900U, 4592826452951465409U,
+ 13816198489806241217U, 4607124449686274900U,
+ 4603781852316960384U, 4605224709411790590U,
+ 13828596746266566398U, 4603781852316960384U,
+ 4606052795787882823U, 4602406247776385022U,
+ 13825778284631160830U, 4606052795787882823U,
+ 4598635880488956483U, 4606833664420673202U,
+ 13830205701275449010U, 4598635880488956483U,
+ 4606753451050079834U, 4599164736579548843U,
+ 13822536773434324651U, 4606753451050079834U,
+ 4601918851211878557U, 4606183055233559255U,
+ 13829555092088335063U, 4601918851211878557U,
+ 4605049409688478101U, 4603995455647851249U,
+ 13827367492502627057U, 4605049409688478101U,
+ 4590626485056654602U, 4607151534426937478U,
+ 13830523571281713286U, 4590626485056654602U,
+ 4607163731439411601U, 4589303678145802340U,
+ 13812675715000578148U, 4607163731439411601U,
+ 4604121000955189926U, 4604941113561600762U,
+ 13828313150416376570U, 4604121000955189926U,
+ 4606257600839867033U, 4601622657843474729U,
+ 13824994694698250537U, 4606257600839867033U,
+ 4599479600326345459U, 4606701442584137310U,
+ 13830073479438913118U, 4599479600326345459U,
+ 4606877885424248132U, 4598316292140394014U,
+ 13821688328995169822U, 4606877885424248132U,
+ 4602686793990243041U, 4605971073215153165U,
+ 13829343110069928973U, 4602686793990243041U,
+ 4605326714874986465U, 4603651144395358093U,
+ 13827023181250133901U, 4605326714874986465U,
+ 4593907249284540294U, 4607104153983298999U,
+ 13830476190838074807U, 4593907249284540294U,
+ 4607063608453868552U, 4595000592312171144U,
+ 13818372629166946952U, 4607063608453868552U,
+ 4603429196809300824U, 4605491322423429598U,
+ 13828863359278205406U, 4603429196809300824U,
+ 4605829012964735987U, 4602923807199184054U,
+ 13826295844053959862U, 4605829012964735987U,
+ 4597385183080791534U, 4606945027305114062U,
+ 13830317064159889870U, 4597385183080791534U,
+ 4606608350964852124U, 4599999947619525579U,
+ 13823371984474301387U, 4606608350964852124U,
+ 4601123065313358619U, 4606375745674388705U,
+ 13829747782529164513U, 4601123065313358619U,
+ 4604755543975806820U, 4604325745441780828U,
+ 13827697782296556636U, 4604755543975806820U,
+ 4585023436363055487U, 4607177290141793710U,
+ 13830549326996569518U, 4585023436363055487U,
+ 4607175255902437396U, 4585907115494236537U,
+ 13809279152349012345U, 4607175255902437396U,
+ 4604285253548209224U, 4604793159020491611U,
+ 13828165195875267419U, 4604285253548209224U,
+ 4606352730697093817U, 4601223560006786057U,
+ 13824595596861561865U, 4606352730697093817U,
+ 4599896339047301634U, 4606627607157935956U,
+ 13829999644012711764U, 4599896339047301634U,
+ 4606932257325205256U, 4597600270510262682U,
+ 13820972307365038490U, 4606932257325205256U,
+ 4602876755014813164U, 4605858005670328613U,
+ 13829230042525104421U, 4602876755014813164U,
+ 4605458946901419122U, 4603473988668005304U,
+ 13826846025522781112U, 4605458946901419122U,
+ 4594782329999411347U, 4607072388129742377U,
+ 13830444424984518185U, 4594782329999411347U,
+ 4607096716058023245U, 4594126307716900071U,
+ 13817498344571675879U, 4607096716058023245U,
+ 4603607160562208225U, 4605360179893335444U,
+ 13828732216748111252U, 4603607160562208225U,
+ 4605943243960030558U, 4602734543519989142U,
+ 13826106580374764950U, 4605943243960030558U,
+ 4598209407597805010U, 4606891971185517504U,
+ 13830264008040293312U, 4598209407597805010U,
+ 4606683463531482757U, 4599584122834874440U,
+ 13822956159689650248U, 4606683463531482757U,
+ 4601523323048804569U, 4606281842017099424U,
+ 13829653878871875232U, 4601523323048804569U,
+ 4604904503566677638U, 4604162403772767740U,
+ 13827534440627543548U, 4604904503566677638U,
+ 4588556721781247689U, 4607167120476811757U,
+ 13830539157331587565U, 4588556721781247689U,
+ 4607146792632922887U, 4591066993883984169U,
+ 13814439030738759977U, 4607146792632922887U,
+ 4603953166845776383U, 4605084992581147553U,
+ 13828457029435923361U, 4603953166845776383U,
+ 4606157602458368090U, 4602016966272225497U,
+ 13825389003127001305U, 4606157602458368090U,
+ 4599059363095165615U, 4606770142132396069U,
+ 13830142178987171877U, 4599059363095165615U,
+ 4606818271362779153U, 4598742041476147134U,
+ 13822114078330922942U, 4606818271362779153U,
+ 4602309411551204896U, 4606079444829232727U,
+ 13829451481684008535U, 4602309411551204896U,
+ 4605190175055178825U, 4603825001630339212U,
+ 13827197038485115020U, 4605190175055178825U,
+ 4592387007752762956U, 4607130541380624519U,
+ 13830502578235400327U, 4592387007752762956U,
+ 4607025146816593591U, 4595871363584150300U,
+ 13819243400438926108U, 4607025146816593591U,
+ 4603248068256948438U, 4605618058006716661U,
+ 13828990094861492469U, 4603248068256948438U,
+ 4605710171610479304U, 4603110210506737381U,
+ 13826482247361513189U, 4605710171610479304U,
+ 4596521820799644122U, 4606992800820440327U,
+ 13830364837675216135U, 4596521820799644122U,
+ 4606528158595189433U, 4600411960456200676U,
+ 13823783997310976484U, 4606528158595189433U,
+ 4600718319105833937U, 4606464709641375231U,
+ 13829836746496151039U, 4600718319105833937U,
+ 4604602620643553229U, 4604485382263976838U,
+ 13827857419118752646U, 4604602620643553229U,
+ 4576459225186735875U, 4607182037296057423U,
+ 13830554074150833231U, 4576459225186735875U,
+ 4607182037296057423U, 4576459225186735875U,
+ 13799831262041511683U, 4607182037296057423U,
+ 4604485382263976838U, 4604602620643553229U,
+ 13827974657498329037U, 4604485382263976838U,
+ 4606464709641375231U, 4600718319105833937U,
+ 13824090355960609745U, 4606464709641375231U,
+ 4600411960456200676U, 4606528158595189433U,
+ 13829900195449965241U, 4600411960456200676U,
+ 4606992800820440327U, 4596521820799644122U,
+ 13819893857654419930U, 4606992800820440327U,
+ 4603110210506737381U, 4605710171610479304U,
+ 13829082208465255112U, 4603110210506737381U,
+ 4605618058006716661U, 4603248068256948438U,
+ 13826620105111724246U, 4605618058006716661U,
+ 4595871363584150300U, 4607025146816593591U,
+ 13830397183671369399U, 4595871363584150300U,
+ 4607130541380624519U, 4592387007752762956U,
+ 13815759044607538764U, 4607130541380624519U,
+ 4603825001630339212U, 4605190175055178825U,
+ 13828562211909954633U, 4603825001630339212U,
+ 4606079444829232727U, 4602309411551204896U,
+ 13825681448405980704U, 4606079444829232727U,
+ 4598742041476147134U, 4606818271362779153U,
+ 13830190308217554961U, 4598742041476147134U,
+ 4606770142132396069U, 4599059363095165615U,
+ 13822431399949941423U, 4606770142132396069U,
+ 4602016966272225497U, 4606157602458368090U,
+ 13829529639313143898U, 4602016966272225497U,
+ 4605084992581147553U, 4603953166845776383U,
+ 13827325203700552191U, 4605084992581147553U,
+ 4591066993883984169U, 4607146792632922887U,
+ 13830518829487698695U, 4591066993883984169U,
+ 4607167120476811757U, 4588556721781247689U,
+ 13811928758636023497U, 4607167120476811757U,
+ 4604162403772767740U, 4604904503566677638U,
+ 13828276540421453446U, 4604162403772767740U,
+ 4606281842017099424U, 4601523323048804569U,
+ 13824895359903580377U, 4606281842017099424U,
+ 4599584122834874440U, 4606683463531482757U,
+ 13830055500386258565U, 4599584122834874440U,
+ 4606891971185517504U, 4598209407597805010U,
+ 13821581444452580818U, 4606891971185517504U,
+ 4602734543519989142U, 4605943243960030558U,
+ 13829315280814806366U, 4602734543519989142U,
+ 4605360179893335444U, 4603607160562208225U,
+ 13826979197416984033U, 4605360179893335444U,
+ 4594126307716900071U, 4607096716058023245U,
+ 13830468752912799053U, 4594126307716900071U,
+ 4607072388129742377U, 4594782329999411347U,
+ 13818154366854187155U, 4607072388129742377U,
+ 4603473988668005304U, 4605458946901419122U,
+ 13828830983756194930U, 4603473988668005304U,
+ 4605858005670328613U, 4602876755014813164U,
+ 13826248791869588972U, 4605858005670328613U,
+ 4597600270510262682U, 4606932257325205256U,
+ 13830304294179981064U, 4597600270510262682U,
+ 4606627607157935956U, 4599896339047301634U,
+ 13823268375902077442U, 4606627607157935956U,
+ 4601223560006786057U, 4606352730697093817U,
+ 13829724767551869625U, 4601223560006786057U,
+ 4604793159020491611U, 4604285253548209224U,
+ 13827657290402985032U, 4604793159020491611U,
+ 4585907115494236537U, 4607175255902437396U,
+ 13830547292757213204U, 4585907115494236537U,
+ 4607177290141793710U, 4585023436363055487U,
+ 13808395473217831295U, 4607177290141793710U,
+ 4604325745441780828U, 4604755543975806820U,
+ 13828127580830582628U, 4604325745441780828U,
+ 4606375745674388705U, 4601123065313358619U,
+ 13824495102168134427U, 4606375745674388705U,
+ 4599999947619525579U, 4606608350964852124U,
+ 13829980387819627932U, 4599999947619525579U,
+ 4606945027305114062U, 4597385183080791534U,
+ 13820757219935567342U, 4606945027305114062U,
+ 4602923807199184054U, 4605829012964735987U,
+ 13829201049819511795U, 4602923807199184054U,
+ 4605491322423429598U, 4603429196809300824U,
+ 13826801233664076632U, 4605491322423429598U,
+ 4595000592312171144U, 4607063608453868552U,
+ 13830435645308644360U, 4595000592312171144U,
+ 4607104153983298999U, 4593907249284540294U,
+ 13817279286139316102U, 4607104153983298999U,
+ 4603651144395358093U, 4605326714874986465U,
+ 13828698751729762273U, 4603651144395358093U,
+ 4605971073215153165U, 4602686793990243041U,
+ 13826058830845018849U, 4605971073215153165U,
+ 4598316292140394014U, 4606877885424248132U,
+ 13830249922279023940U, 4598316292140394014U,
+ 4606701442584137310U, 4599479600326345459U,
+ 13822851637181121267U, 4606701442584137310U,
+ 4601622657843474729U, 4606257600839867033U,
+ 13829629637694642841U, 4601622657843474729U,
+ 4604941113561600762U, 4604121000955189926U,
+ 13827493037809965734U, 4604941113561600762U,
+ 4589303678145802340U, 4607163731439411601U,
+ 13830535768294187409U, 4589303678145802340U,
+ 4607151534426937478U, 4590626485056654602U,
+ 13813998521911430410U, 4607151534426937478U,
+ 4603995455647851249U, 4605049409688478101U,
+ 13828421446543253909U, 4603995455647851249U,
+ 4606183055233559255U, 4601918851211878557U,
+ 13825290888066654365U, 4606183055233559255U,
+ 4599164736579548843U, 4606753451050079834U,
+ 13830125487904855642U, 4599164736579548843U,
+ 4606833664420673202U, 4598635880488956483U,
+ 13822007917343732291U, 4606833664420673202U,
+ 4602406247776385022U, 4606052795787882823U,
+ 13829424832642658631U, 4602406247776385022U,
+ 4605224709411790590U, 4603781852316960384U,
+ 13827153889171736192U, 4605224709411790590U,
+ 4592826452951465409U, 4607124449686274900U,
+ 13830496486541050708U, 4592826452951465409U,
+ 4607035262954517034U, 4595654028864046335U,
+ 13819026065718822143U, 4607035262954517034U,
+ 4603293641160266722U, 4605586791482848547U,
+ 13828958828337624355U, 4603293641160266722U,
+ 4605740310302420207U, 4603063884010218172U,
+ 13826435920864993980U, 4605740310302420207U,
+ 4596738097012783531U, 4606981354314050484U,
+ 13830353391168826292U, 4596738097012783531U,
+ 4606548680329491866U, 4600309328230211502U,
+ 13823681365084987310U, 4606548680329491866U,
+ 4600819913163773071U, 4606442934727379583U,
+ 13829814971582155391U, 4600819913163773071U,
+ 4604641218080103285U, 4604445825685214043U,
+ 13827817862539989851U, 4604641218080103285U,
+ 4579996072175835083U, 4607181359080094673U,
+ 13830553395934870481U, 4579996072175835083U,
+ 4607180341788068727U, 4581846703643734566U,
+ 13805218740498510374U, 4607180341788068727U,
+ 4604406033021674239U, 4604679572075463103U,
+ 13828051608930238911U, 4604406033021674239U,
+ 4606420848538580260U, 4600921238092511730U,
+ 13824293274947287538U, 4606420848538580260U,
+ 4600206446098256018U, 4606568886807728474U,
+ 13829940923662504282U, 4600206446098256018U,
+ 4606969576261663845U, 4596954088216812973U,
+ 13820326125071588781U, 4606969576261663845U,
+ 4603017373458244943U, 4605770164172969910U,
+ 13829142201027745718U, 4603017373458244943U,
+ 4605555245917486022U, 4603339021357904144U,
+ 13826711058212679952U, 4605555245917486022U,
+ 4595436449949385485U, 4607045045516813836U,
+ 13830417082371589644U, 4595436449949385485U,
+ 4607118021058468598U, 4593265590854265407U,
+ 13816637627709041215U, 4607118021058468598U,
+ 4603738491917026584U, 4605258978359093269U,
+ 13828631015213869077U, 4603738491917026584U,
+ 4606025850160239809U, 4602502755147763107U,
+ 13825874792002538915U, 4606025850160239809U,
+ 4598529532600161144U, 4606848731493011465U,
+ 13830220768347787273U, 4598529532600161144U,
+ 4606736437002195879U, 4599269903251194481U,
+ 13822641940105970289U, 4606736437002195879U,
+ 4601820425647934753U, 4606208206518262803U,
+ 13829580243373038611U, 4601820425647934753U,
+ 4605013567986435066U, 4604037525321326463U,
+ 13827409562176102271U, 4605013567986435066U,
+ 4590185751760970393U, 4607155938267770208U,
+ 13830527975122546016U, 4590185751760970393U,
+ 4607160003989618959U, 4589744810590291021U,
+ 13813116847445066829U, 4607160003989618959U,
+ 4604079374282302598U, 4604977468824438271U,
+ 13828349505679214079U, 4604079374282302598U,
+ 4606233055365547081U, 4601721693286060937U,
+ 13825093730140836745U, 4606233055365547081U,
+ 4599374859150636784U, 4606719100629313491U,
+ 13830091137484089299U, 4599374859150636784U,
+ 4606863472012527185U, 4598423001813699022U,
+ 13821795038668474830U, 4606863472012527185U,
+ 4602598930031891166U, 4605998608960791335U,
+ 13829370645815567143U, 4602598930031891166U,
+ 4605292980606880364U, 4603694922063032361U,
+ 13827066958917808169U, 4605292980606880364U,
+ 4593688012422887515U, 4607111255739239816U,
+ 13830483292594015624U, 4593688012422887515U,
+ 4607054494135176056U, 4595218635031890910U,
+ 13818590671886666718U, 4607054494135176056U,
+ 4603384207141321914U, 4605523422498301790U,
+ 13828895459353077598U, 4603384207141321914U,
+ 4605799732098147061U, 4602970680601913687U,
+ 13826342717456689495U, 4605799732098147061U,
+ 4597169786279785693U, 4606957467106717424U,
+ 13830329503961493232U, 4597169786279785693U,
+ 4606588777269136769U, 4600103317933788342U,
+ 13823475354788564150U, 4606588777269136769U,
+ 4601022290077223616U, 4606398451906509788U,
+ 13829770488761285596U, 4601022290077223616U,
+ 4604717681185626434U, 4604366005771528720U,
+ 13827738042626304528U, 4604717681185626434U,
+ 4583614727651146525U, 4607178985458280057U,
+ 13830551022313055865U, 4583614727651146525U,
+ 4607172882816799076U, 4586790578280679046U,
+ 13810162615135454854U, 4607172882816799076U,
+ 4604244531615310815U, 4604830524903495634U,
+ 13828202561758271442U, 4604244531615310815U,
+ 4606329407841126011U, 4601323770373937522U,
+ 13824695807228713330U, 4606329407841126011U,
+ 4599792496117920694U, 4606646545123403481U,
+ 13830018581978179289U, 4599792496117920694U,
+ 4606919157647773535U, 4597815040470278984U,
+ 13821187077325054792U, 4606919157647773535U,
+ 4602829525820289164U, 4605886709123365959U,
+ 13829258745978141767U, 4602829525820289164U,
+ 4605426297151190466U, 4603518581031047189U,
+ 13826890617885822997U, 4605426297151190466U,
+ 4594563856311064231U, 4607080832832247697U,
+ 13830452869687023505U, 4594563856311064231U,
+ 4607088942243446236U, 4594345179472540681U,
+ 13817717216327316489U, 4607088942243446236U,
+ 4603562972219549215U, 4605393374401988274U,
+ 13828765411256764082U, 4603562972219549215U,
+ 4605915122243179241U, 4602782121393764535U,
+ 13826154158248540343U, 4605915122243179241U,
+ 4598029484874872834U, 4606905728766014348U,
+ 13830277765620790156U, 4598029484874872834U,
+ 4606665164148251002U, 4599688422741010356U,
+ 13823060459595786164U, 4606665164148251002U,
+ 4601423692641949331U, 4606305777984577632U,
+ 13829677814839353440U, 4601423692641949331U,
+ 4604867640218014515U, 4604203581176243359U,
+ 13827575618031019167U, 4604867640218014515U,
+ 4587673791460508439U, 4607170170974224083U,
+ 13830542207828999891U, 4587673791460508439U,
+ 4607141713064252300U, 4591507261658050721U,
+ 13814879298512826529U, 4607141713064252300U,
+ 4603910660507251362U, 4605120315324767624U,
+ 13828492352179543432U, 4603910660507251362U,
+ 4606131849150971908U, 4602114767134999006U,
+ 13825486803989774814U, 4606131849150971908U,
+ 4598953786765296928U, 4606786509620734768U,
+ 13830158546475510576U, 4598953786765296928U,
+ 4606802552898869248U, 4598848011564831930U,
+ 13822220048419607738U, 4606802552898869248U,
+ 4602212250118051877U, 4606105796280968177U,
+ 13829477833135743985U, 4602212250118051877U,
+ 4605155376589456981U, 4603867938232615808U,
+ 13827239975087391616U, 4605155376589456981U,
+ 4591947271803021404U, 4607136295912168606U,
+ 13830508332766944414U, 4591947271803021404U,
+ 4607014697483910382U, 4596088445927168004U,
+ 13819460482781943812U, 4607014697483910382U,
+ 4603202304363743346U, 4605649044311923410U,
+ 13829021081166699218U, 4603202304363743346U,
+ 4605679749231851918U, 4603156351203636159U,
+ 13826528388058411967U, 4605679749231851918U,
+ 4596305267720071930U, 4607003915349878877U,
+ 13830375952204654685U, 4596305267720071930U,
+ 4606507322377452870U, 4600514338912178239U,
+ 13823886375766954047U, 4606507322377452870U,
+ 4600616459743653188U, 4606486172460753999U,
+ 13829858209315529807U, 4600616459743653188U,
+ 4604563781218984604U, 4604524701268679793U,
+ 13827896738123455601U, 4604563781218984604U,
+ 4569220649180767418U, 4607182376410422530U,
+ 13830554413265198338U, 4569220649180767418U
+};
+
+const fpr fpr_p2_tab[] = {
+ 4611686018427387904U,
+ 4607182418800017408U,
+ 4602678819172646912U,
+ 4598175219545276416U,
+ 4593671619917905920U,
+ 4589168020290535424U,
+ 4584664420663164928U,
+ 4580160821035794432U,
+ 4575657221408423936U,
+ 4571153621781053440U,
+ 4566650022153682944U
+};
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fpr.h b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fpr.h
new file mode 100644
index 000000000..fb6830e71
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/fpr.h
@@ -0,0 +1,253 @@
+#ifndef PQCLEAN_FALCON512_CLEAN_FPR_H
+#define PQCLEAN_FALCON512_CLEAN_FPR_H
+
+/*
+ * Floating-point operations.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* ====================================================================== */
+/*
+ * Custom floating-point implementation with integer arithmetics. We
+ * use IEEE-754 "binary64" format, with some simplifications:
+ *
+ * - Top bit is s = 1 for negative, 0 for positive.
+ *
+ * - Exponent e uses the next 11 bits (bits 52 to 62, inclusive).
+ *
+ * - Mantissa m uses the 52 low bits.
+ *
+ * Encoded value is, in general: (-1)^s * 2^(e-1023) * (1 + m*2^(-52))
+ * i.e. the mantissa really is a 53-bit number (less than 2.0, but not
+ * less than 1.0), but the top bit (equal to 1 by definition) is omitted
+ * in the encoding.
+ *
+ * In IEEE-754, there are some special values:
+ *
+ * - If e = 2047, then the value is either an infinite (m = 0) or
+ * a NaN (m != 0).
+ *
+ * - If e = 0, then the value is either a zero (m = 0) or a subnormal,
+ * aka "denormalized number" (m != 0).
+ *
+ * Of these, we only need the zeros. The caller is responsible for not
+ * providing operands that would lead to infinites, NaNs or subnormals.
+ * If inputs are such that values go out of range, then indeterminate
+ * values are returned (it would still be deterministic, but no specific
+ * value may be relied upon).
+ *
+ * At the C level, the three parts are stored in a 64-bit unsigned
+ * word.
+ *
+ * One may note that a property of the IEEE-754 format is that order
+ * is preserved for positive values: if two positive floating-point
+ * values x and y are such that x < y, then their respective encodings
+ * as _signed_ 64-bit integers i64(x) and i64(y) will be such that
+ * i64(x) < i64(y). For negative values, order is reversed: if x < 0,
+ * y < 0, and x < y, then ia64(x) > ia64(y).
+ *
+ * IMPORTANT ASSUMPTIONS:
+ * ======================
+ *
+ * For proper computations, and constant-time behaviour, we assume the
+ * following:
+ *
+ * - 32x32->64 multiplication (unsigned) has an execution time that
+ * is independent of its operands. This is true of most modern
+ * x86 and ARM cores. Notable exceptions are the ARM Cortex M0, M0+
+ * and M3 (in the M0 and M0+, this is done in software, so it depends
+ * on that routine), and the PowerPC cores from the G3/G4 lines.
+ * For more info, see: https://www.bearssl.org/ctmul.html
+ *
+ * - Left-shifts and right-shifts of 32-bit values have an execution
+ * time which does not depend on the shifted value nor on the
+ * shift count. An historical exception is the Pentium IV, but most
+ * modern CPU have barrel shifters. Some small microcontrollers
+ * might have varying-time shifts (not the ARM Cortex M*, though).
+ *
+ * - Right-shift of a signed negative value performs a sign extension.
+ * As per the C standard, this operation returns an
+ * implementation-defined result (this is NOT an "undefined
+ * behaviour"). On most/all systems, an arithmetic shift is
+ * performed, because this is what makes most sense.
+ */
+
+/*
+ * Normally we should declare the 'fpr' type to be a struct or union
+ * around the internal 64-bit value; however, we want to use the
+ * direct 64-bit integer type to enable a lighter call convention on
+ * ARM platforms. This means that direct (invalid) use of operators
+ * such as '*' or '+' will not be caught by the compiler. We rely on
+ * the "normal" (non-emulated) code to detect such instances.
+ */
+typedef uint64_t fpr;
+
+/*
+ * For computations, we split values into an integral mantissa in the
+ * 2^54..2^55 range, and an (adjusted) exponent. The lowest bit is
+ * "sticky" (it is set to 1 if any of the bits below it is 1); when
+ * re-encoding, the low two bits are dropped, but may induce an
+ * increment in the value for proper rounding.
+ */
+
+/*
+ * Right-shift a 64-bit unsigned value by a possibly secret shift count.
+ * We assumed that the underlying architecture had a barrel shifter for
+ * 32-bit shifts, but for 64-bit shifts on a 32-bit system, this will
+ * typically invoke a software routine that is not necessarily
+ * constant-time; hence the function below.
+ *
+ * Shift count n MUST be in the 0..63 range.
+ */
+#define fpr_ursh PQCLEAN_FALCON512_CLEAN_fpr_ursh
+uint64_t fpr_ursh(uint64_t x, int n);
+
+/*
+ * Right-shift a 64-bit signed value by a possibly secret shift count
+ * (see fpr_ursh() for the rationale).
+ *
+ * Shift count n MUST be in the 0..63 range.
+ */
+#define fpr_irsh PQCLEAN_FALCON512_CLEAN_fpr_irsh
+int64_t fpr_irsh(int64_t x, int n);
+
+/*
+ * Left-shift a 64-bit unsigned value by a possibly secret shift count
+ * (see fpr_ursh() for the rationale).
+ *
+ * Shift count n MUST be in the 0..63 range.
+ */
+#define fpr_ulsh PQCLEAN_FALCON512_CLEAN_fpr_ulsh
+uint64_t fpr_ulsh(uint64_t x, int n);
+
+/*
+ * Expectations:
+ * s = 0 or 1
+ * exponent e is "arbitrary" and unbiased
+ * 2^54 <= m < 2^55
+ * Numerical value is (-1)^2 * m * 2^e
+ *
+ * Exponents which are too low lead to value zero. If the exponent is
+ * too large, the returned value is indeterminate.
+ *
+ * If m = 0, then a zero is returned (using the provided sign).
+ * If e < -1076, then a zero is returned (regardless of the value of m).
+ * If e >= -1076 and e != 0, m must be within the expected range
+ * (2^54 to 2^55-1).
+ */
+#define FPR PQCLEAN_FALCON512_CLEAN_FPR
+fpr FPR(int s, int e, uint64_t m);
+
+
+#define fpr_scaled PQCLEAN_FALCON512_CLEAN_fpr_scaled
+fpr fpr_scaled(int64_t i, int sc);
+
+#define fpr_of PQCLEAN_FALCON512_CLEAN_fpr_of
+fpr fpr_of(int64_t i);
+
+static const fpr fpr_q = 4667981563525332992;
+static const fpr fpr_inverse_of_q = 4545632735260551042;
+static const fpr fpr_inv_2sqrsigma0 = 4594603506513722306;
+static const fpr fpr_inv_sigma = 4573359825155195350;
+static const fpr fpr_sigma_min_9 = 4608495221497168882;
+static const fpr fpr_sigma_min_10 = 4608586345619182117;
+static const fpr fpr_log2 = 4604418534313441775;
+static const fpr fpr_inv_log2 = 4609176140021203710;
+static const fpr fpr_bnorm_max = 4670353323383631276;
+static const fpr fpr_zero = 0;
+static const fpr fpr_one = 4607182418800017408;
+static const fpr fpr_two = 4611686018427387904;
+static const fpr fpr_onehalf = 4602678819172646912;
+static const fpr fpr_invsqrt2 = 4604544271217802189;
+static const fpr fpr_invsqrt8 = 4600040671590431693;
+static const fpr fpr_ptwo31 = 4746794007248502784;
+static const fpr fpr_ptwo31m1 = 4746794007244308480;
+static const fpr fpr_mtwo31m1 = 13970166044099084288U;
+static const fpr fpr_ptwo63m1 = 4890909195324358656;
+static const fpr fpr_mtwo63m1 = 14114281232179134464U;
+static const fpr fpr_ptwo63 = 4890909195324358656;
+
+#define fpr_rint PQCLEAN_FALCON512_CLEAN_fpr_rint
+int64_t fpr_rint(fpr x);
+
+#define fpr_floor PQCLEAN_FALCON512_CLEAN_fpr_floor
+int64_t fpr_floor(fpr x);
+
+#define fpr_trunc PQCLEAN_FALCON512_CLEAN_fpr_trunc
+int64_t fpr_trunc(fpr x);
+
+#define fpr_add PQCLEAN_FALCON512_CLEAN_fpr_add
+fpr fpr_add(fpr x, fpr y);
+
+#define fpr_sub PQCLEAN_FALCON512_CLEAN_fpr_sub
+fpr fpr_sub(fpr x, fpr y);
+
+#define fpr_neg PQCLEAN_FALCON512_CLEAN_fpr_neg
+fpr fpr_neg(fpr x);
+
+#define fpr_half PQCLEAN_FALCON512_CLEAN_fpr_half
+fpr fpr_half(fpr x);
+
+#define fpr_double PQCLEAN_FALCON512_CLEAN_fpr_double
+fpr fpr_double(fpr x);
+
+#define fpr_mul PQCLEAN_FALCON512_CLEAN_fpr_mul
+fpr fpr_mul(fpr x, fpr y);
+
+#define fpr_sqr PQCLEAN_FALCON512_CLEAN_fpr_sqr
+fpr fpr_sqr(fpr x);
+
+#define fpr_div PQCLEAN_FALCON512_CLEAN_fpr_div
+fpr fpr_div(fpr x, fpr y);
+
+#define fpr_inv PQCLEAN_FALCON512_CLEAN_fpr_inv
+fpr fpr_inv(fpr x);
+
+#define fpr_sqrt PQCLEAN_FALCON512_CLEAN_fpr_sqrt
+fpr fpr_sqrt(fpr x);
+
+#define fpr_lt PQCLEAN_FALCON512_CLEAN_fpr_lt
+int fpr_lt(fpr x, fpr y);
+
+/*
+ * Compute exp(x) for x such that |x| <= ln 2. We want a precision of 50
+ * bits or so.
+ */
+#define fpr_expm_p63 PQCLEAN_FALCON512_CLEAN_fpr_expm_p63
+uint64_t fpr_expm_p63(fpr x, fpr ccs);
+
+#define fpr_gm_tab PQCLEAN_FALCON512_CLEAN_fpr_gm_tab
+extern const fpr fpr_gm_tab[];
+
+#define fpr_p2_tab PQCLEAN_FALCON512_CLEAN_fpr_p2_tab
+extern const fpr fpr_p2_tab[];
+
+/* ====================================================================== */
+#endif
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/inner.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/inner.c
new file mode 100755
index 000000000..dd90bd57e
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/inner.c
@@ -0,0 +1,70 @@
+#include "inner.h"
+
+/*
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ */
+
+unsigned set_fpu_cw(unsigned x) {
+ return x;
+}
+
+
+uint64_t prng_get_u64(prng *p) {
+ size_t u;
+
+ /*
+ * If there are less than 9 bytes in the buffer, we refill it.
+ * This means that we may drop the last few bytes, but this allows
+ * for faster extraction code. Also, it means that we never leave
+ * an empty buffer.
+ */
+ u = p->ptr;
+ if (u >= (sizeof p->buf.d) - 9) {
+ PQCLEAN_FALCON512_CLEAN_prng_refill(p);
+ u = 0;
+ }
+ p->ptr = u + 8;
+
+ return (uint64_t)p->buf.d[u + 0]
+ | ((uint64_t)p->buf.d[u + 1] << 8)
+ | ((uint64_t)p->buf.d[u + 2] << 16)
+ | ((uint64_t)p->buf.d[u + 3] << 24)
+ | ((uint64_t)p->buf.d[u + 4] << 32)
+ | ((uint64_t)p->buf.d[u + 5] << 40)
+ | ((uint64_t)p->buf.d[u + 6] << 48)
+ | ((uint64_t)p->buf.d[u + 7] << 56);
+}
+
+
+unsigned prng_get_u8(prng *p) {
+ unsigned v;
+
+ v = p->buf.d[p->ptr ++];
+ if (p->ptr == sizeof p->buf.d) {
+ PQCLEAN_FALCON512_CLEAN_prng_refill(p);
+ }
+ return v;
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/inner.h b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/inner.h
new file mode 100644
index 000000000..d469c9237
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/inner.h
@@ -0,0 +1,793 @@
+#ifndef PQCLEAN_FALCON512_CLEAN_INNER_H
+#define PQCLEAN_FALCON512_CLEAN_INNER_H
+
+
+/*
+ * Internal functions for Falcon. This is not the API intended to be
+ * used by applications; instead, this internal API provides all the
+ * primitives on which wrappers build to provide external APIs.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+/*
+ * IMPORTANT API RULES
+ * -------------------
+ *
+ * This API has some non-trivial usage rules:
+ *
+ *
+ * - All public functions (i.e. the non-static ones) must be referenced
+ * with the PQCLEAN_FALCON512_CLEAN_ macro (e.g. PQCLEAN_FALCON512_CLEAN_verify_raw for the verify_raw()
+ * function). That macro adds a prefix to the name, which is
+ * configurable with the FALCON_PREFIX macro. This allows compiling
+ * the code into a specific "namespace" and potentially including
+ * several versions of this code into a single application (e.g. to
+ * have an AVX2 and a non-AVX2 variants and select the one to use at
+ * runtime based on availability of AVX2 opcodes).
+ *
+ * - Functions that need temporary buffers expects them as a final
+ * tmp[] array of type uint8_t*, with a size which is documented for
+ * each function. However, most have some alignment requirements,
+ * because they will use the array to store 16-bit, 32-bit or 64-bit
+ * values (e.g. uint64_t or double). The caller must ensure proper
+ * alignment. What happens on unaligned access depends on the
+ * underlying architecture, ranging from a slight time penalty
+ * to immediate termination of the process.
+ *
+ * - Some functions rely on specific rounding rules and precision for
+ * floating-point numbers. On some systems (in particular 32-bit x86
+ * with the 387 FPU), this requires setting an hardware control
+ * word. The caller MUST use set_fpu_cw() to ensure proper precision:
+ *
+ * oldcw = set_fpu_cw(2);
+ * PQCLEAN_FALCON512_CLEAN_sign_dyn(...);
+ * set_fpu_cw(oldcw);
+ *
+ * On systems where the native floating-point precision is already
+ * proper, or integer-based emulation is used, the set_fpu_cw()
+ * function does nothing, so it can be called systematically.
+ */
+#include "fips202.h"
+#include "fpr.h"
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+
+
+
+/*
+ * Some computations with floating-point elements, in particular
+ * rounding to the nearest integer, rely on operations using _exactly_
+ * the precision of IEEE-754 binary64 type (i.e. 52 bits). On 32-bit
+ * x86, the 387 FPU may be used (depending on the target OS) and, in
+ * that case, may use more precision bits (i.e. 64 bits, for an 80-bit
+ * total type length); to prevent miscomputations, we define an explicit
+ * function that modifies the precision in the FPU control word.
+ *
+ * set_fpu_cw() sets the precision to the provided value, and returns
+ * the previously set precision; callers are supposed to restore the
+ * previous precision on exit. The correct (52-bit) precision is
+ * configured with the value "2". On unsupported compilers, or on
+ * targets other than 32-bit x86, or when the native 'double' type is
+ * not used, the set_fpu_cw() function does nothing at all.
+ */
+#define set_fpu_cw PQCLEAN_FALCON512_CLEAN_set_fpu_cw
+unsigned set_fpu_cw(unsigned x);
+
+/* ==================================================================== */
+/*
+ * SHAKE256 implementation (shake.c).
+ *
+ * API is defined to be easily replaced with the fips202.h API defined
+ * as part of PQClean.
+ */
+
+
+
+#define inner_shake256_context shake256incctx
+#define inner_shake256_init(sc) shake256_inc_init(sc)
+#define inner_shake256_inject(sc, in, len) shake256_inc_absorb(sc, in, len)
+#define inner_shake256_flip(sc) shake256_inc_finalize(sc)
+#define inner_shake256_extract(sc, out, len) shake256_inc_squeeze(out, len, sc)
+#define inner_shake256_ctx_release(sc) shake256_inc_ctx_release(sc)
+
+
+/* ==================================================================== */
+/*
+ * Encoding/decoding functions (codec.c).
+ *
+ * Encoding functions take as parameters an output buffer (out) with
+ * a given maximum length (max_out_len); returned value is the actual
+ * number of bytes which have been written. If the output buffer is
+ * not large enough, then 0 is returned (some bytes may have been
+ * written to the buffer). If 'out' is NULL, then 'max_out_len' is
+ * ignored; instead, the function computes and returns the actual
+ * required output length (in bytes).
+ *
+ * Decoding functions take as parameters an input buffer (in) with
+ * its maximum length (max_in_len); returned value is the actual number
+ * of bytes that have been read from the buffer. If the provided length
+ * is too short, then 0 is returned.
+ *
+ * Values to encode or decode are vectors of integers, with N = 2^logn
+ * elements.
+ *
+ * Three encoding formats are defined:
+ *
+ * - modq: sequence of values modulo 12289, each encoded over exactly
+ * 14 bits. The encoder and decoder verify that integers are within
+ * the valid range (0..12288). Values are arrays of uint16.
+ *
+ * - trim: sequence of signed integers, a specified number of bits
+ * each. The number of bits is provided as parameter and includes
+ * the sign bit. Each integer x must be such that |x| < 2^(bits-1)
+ * (which means that the -2^(bits-1) value is forbidden); encode and
+ * decode functions check that property. Values are arrays of
+ * int16_t or int8_t, corresponding to names 'trim_i16' and
+ * 'trim_i8', respectively.
+ *
+ * - comp: variable-length encoding for signed integers; each integer
+ * uses a minimum of 9 bits, possibly more. This is normally used
+ * only for signatures.
+ *
+ */
+
+size_t PQCLEAN_FALCON512_CLEAN_modq_encode(void *out, size_t max_out_len,
+ const uint16_t *x, unsigned logn);
+size_t PQCLEAN_FALCON512_CLEAN_trim_i16_encode(void *out, size_t max_out_len,
+ const int16_t *x, unsigned logn, unsigned bits);
+size_t PQCLEAN_FALCON512_CLEAN_trim_i8_encode(void *out, size_t max_out_len,
+ const int8_t *x, unsigned logn, unsigned bits);
+size_t PQCLEAN_FALCON512_CLEAN_comp_encode(void *out, size_t max_out_len,
+ const int16_t *x, unsigned logn);
+
+size_t PQCLEAN_FALCON512_CLEAN_modq_decode(uint16_t *x, unsigned logn,
+ const void *in, size_t max_in_len);
+size_t PQCLEAN_FALCON512_CLEAN_trim_i16_decode(int16_t *x, unsigned logn, unsigned bits,
+ const void *in, size_t max_in_len);
+size_t PQCLEAN_FALCON512_CLEAN_trim_i8_decode(int8_t *x, unsigned logn, unsigned bits,
+ const void *in, size_t max_in_len);
+size_t PQCLEAN_FALCON512_CLEAN_comp_decode(int16_t *x, unsigned logn,
+ const void *in, size_t max_in_len);
+
+/*
+ * Number of bits for key elements, indexed by logn (1 to 10). This
+ * is at most 8 bits for all degrees, but some degrees may have shorter
+ * elements.
+ */
+extern const uint8_t PQCLEAN_FALCON512_CLEAN_max_fg_bits[];
+extern const uint8_t PQCLEAN_FALCON512_CLEAN_max_FG_bits[];
+
+/*
+ * Maximum size, in bits, of elements in a signature, indexed by logn
+ * (1 to 10). The size includes the sign bit.
+ */
+extern const uint8_t PQCLEAN_FALCON512_CLEAN_max_sig_bits[];
+
+/* ==================================================================== */
+/*
+ * Support functions used for both signature generation and signature
+ * verification (common.c).
+ */
+
+/*
+ * From a SHAKE256 context (must be already flipped), produce a new
+ * point. This is the non-constant-time version, which may leak enough
+ * information to serve as a stop condition on a brute force attack on
+ * the hashed message (provided that the nonce value is known).
+ */
+void PQCLEAN_FALCON512_CLEAN_hash_to_point_vartime(inner_shake256_context *sc,
+ uint16_t *x, unsigned logn);
+
+/*
+ * From a SHAKE256 context (must be already flipped), produce a new
+ * point. The temporary buffer (tmp) must have room for 2*2^logn bytes.
+ * This function is constant-time but is typically more expensive than
+ * PQCLEAN_FALCON512_CLEAN_hash_to_point_vartime().
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+void PQCLEAN_FALCON512_CLEAN_hash_to_point_ct(inner_shake256_context *sc,
+ uint16_t *x, unsigned logn, uint8_t *tmp);
+
+/*
+ * Tell whether a given vector (2N coordinates, in two halves) is
+ * acceptable as a signature. This compares the appropriate norm of the
+ * vector with the acceptance bound. Returned value is 1 on success
+ * (vector is short enough to be acceptable), 0 otherwise.
+ */
+int PQCLEAN_FALCON512_CLEAN_is_short(const int16_t *s1, const int16_t *s2, unsigned logn);
+
+/*
+ * Tell whether a given vector (2N coordinates, in two halves) is
+ * acceptable as a signature. Instead of the first half s1, this
+ * function receives the "saturated squared norm" of s1, i.e. the
+ * sum of the squares of the coordinates of s1 (saturated at 2^32-1
+ * if the sum exceeds 2^31-1).
+ *
+ * Returned value is 1 on success (vector is short enough to be
+ * acceptable), 0 otherwise.
+ */
+int PQCLEAN_FALCON512_CLEAN_is_short_half(uint32_t sqn, const int16_t *s2, unsigned logn);
+
+/* ==================================================================== */
+/*
+ * Signature verification functions (vrfy.c).
+ */
+
+/*
+ * Convert a public key to NTT + Montgomery format. Conversion is done
+ * in place.
+ */
+void PQCLEAN_FALCON512_CLEAN_to_ntt_monty(uint16_t *h, unsigned logn);
+
+/*
+ * Internal signature verification code:
+ * c0[] contains the hashed nonce+message
+ * s2[] is the decoded signature
+ * h[] contains the public key, in NTT + Montgomery format
+ * logn is the degree log
+ * tmp[] temporary, must have at least 2*2^logn bytes
+ * Returned value is 1 on success, 0 on error.
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON512_CLEAN_verify_raw(const uint16_t *c0, const int16_t *s2,
+ const uint16_t *h, unsigned logn, uint8_t *tmp);
+
+/*
+ * Compute the public key h[], given the private key elements f[] and
+ * g[]. This computes h = g/f mod phi mod q, where phi is the polynomial
+ * modulus. This function returns 1 on success, 0 on error (an error is
+ * reported if f is not invertible mod phi mod q).
+ *
+ * The tmp[] array must have room for at least 2*2^logn elements.
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON512_CLEAN_compute_public(uint16_t *h,
+ const int8_t *f, const int8_t *g, unsigned logn, uint8_t *tmp);
+
+/*
+ * Recompute the fourth private key element. Private key consists in
+ * four polynomials with small coefficients f, g, F and G, which are
+ * such that fG - gF = q mod phi; furthermore, f is invertible modulo
+ * phi and modulo q. This function recomputes G from f, g and F.
+ *
+ * The tmp[] array must have room for at least 4*2^logn bytes.
+ *
+ * Returned value is 1 in success, 0 on error (f not invertible).
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON512_CLEAN_complete_private(int8_t *G,
+ const int8_t *f, const int8_t *g, const int8_t *F,
+ unsigned logn, uint8_t *tmp);
+
+/*
+ * Test whether a given polynomial is invertible modulo phi and q.
+ * Polynomial coefficients are small integers.
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON512_CLEAN_is_invertible(
+ const int16_t *s2, unsigned logn, uint8_t *tmp);
+
+/*
+ * Count the number of elements of value zero in the NTT representation
+ * of the given polynomial: this is the number of primitive 2n-th roots
+ * of unity (modulo q = 12289) that are roots of the provided polynomial
+ * (taken modulo q).
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON512_CLEAN_count_nttzero(const int16_t *sig, unsigned logn, uint8_t *tmp);
+
+/*
+ * Internal signature verification with public key recovery:
+ * h[] receives the public key (NOT in NTT/Montgomery format)
+ * c0[] contains the hashed nonce+message
+ * s1[] is the first signature half
+ * s2[] is the second signature half
+ * logn is the degree log
+ * tmp[] temporary, must have at least 2*2^logn bytes
+ * Returned value is 1 on success, 0 on error. Success is returned if
+ * the signature is a short enough vector; in that case, the public
+ * key has been written to h[]. However, the caller must still
+ * verify that h[] is the correct value (e.g. with regards to a known
+ * hash of the public key).
+ *
+ * h[] may not overlap with any of the other arrays.
+ *
+ * tmp[] must have 16-bit alignment.
+ */
+int PQCLEAN_FALCON512_CLEAN_verify_recover(uint16_t *h,
+ const uint16_t *c0, const int16_t *s1, const int16_t *s2,
+ unsigned logn, uint8_t *tmp);
+
+/* ==================================================================== */
+/*
+ * Implementation of floating-point real numbers (fpr.h, fpr.c).
+ */
+
+/*
+ * Real numbers are implemented by an extra header file, included below.
+ * This is meant to support pluggable implementations. The default
+ * implementation relies on the C type 'double'.
+ *
+ * The included file must define the following types, functions and
+ * constants:
+ *
+ * fpr
+ * type for a real number
+ *
+ * fpr fpr_of(int64_t i)
+ * cast an integer into a real number; source must be in the
+ * -(2^63-1)..+(2^63-1) range
+ *
+ * fpr fpr_scaled(int64_t i, int sc)
+ * compute i*2^sc as a real number; source 'i' must be in the
+ * -(2^63-1)..+(2^63-1) range
+ *
+ * fpr fpr_ldexp(fpr x, int e)
+ * compute x*2^e
+ *
+ * int64_t fpr_rint(fpr x)
+ * round x to the nearest integer; x must be in the -(2^63-1)
+ * to +(2^63-1) range
+ *
+ * int64_t fpr_trunc(fpr x)
+ * round to an integer; this rounds towards zero; value must
+ * be in the -(2^63-1) to +(2^63-1) range
+ *
+ * fpr fpr_add(fpr x, fpr y)
+ * compute x + y
+ *
+ * fpr fpr_sub(fpr x, fpr y)
+ * compute x - y
+ *
+ * fpr fpr_neg(fpr x)
+ * compute -x
+ *
+ * fpr fpr_half(fpr x)
+ * compute x/2
+ *
+ * fpr fpr_double(fpr x)
+ * compute x*2
+ *
+ * fpr fpr_mul(fpr x, fpr y)
+ * compute x * y
+ *
+ * fpr fpr_sqr(fpr x)
+ * compute x * x
+ *
+ * fpr fpr_inv(fpr x)
+ * compute 1/x
+ *
+ * fpr fpr_div(fpr x, fpr y)
+ * compute x/y
+ *
+ * fpr fpr_sqrt(fpr x)
+ * compute the square root of x
+ *
+ * int fpr_lt(fpr x, fpr y)
+ * return 1 if x < y, 0 otherwise
+ *
+ * uint64_t fpr_expm_p63(fpr x)
+ * return exp(x), assuming that 0 <= x < log(2). Returned value
+ * is scaled to 63 bits (i.e. it really returns 2^63*exp(-x),
+ * rounded to the nearest integer). Computation should have a
+ * precision of at least 45 bits.
+ *
+ * const fpr fpr_gm_tab[]
+ * array of constants for FFT / iFFT
+ *
+ * const fpr fpr_p2_tab[]
+ * precomputed powers of 2 (by index, 0 to 10)
+ *
+ * Constants of type 'fpr':
+ *
+ * fpr fpr_q 12289
+ * fpr fpr_inverse_of_q 1/12289
+ * fpr fpr_inv_2sqrsigma0 1/(2*(1.8205^2))
+ * fpr fpr_inv_sigma 1/(1.55*sqrt(12289))
+ * fpr fpr_sigma_min_9 1.291500756233514568549480827642
+ * fpr fpr_sigma_min_10 1.311734375905083682667395805765
+ * fpr fpr_log2 log(2)
+ * fpr fpr_inv_log2 1/log(2)
+ * fpr fpr_bnorm_max 16822.4121
+ * fpr fpr_zero 0
+ * fpr fpr_one 1
+ * fpr fpr_two 2
+ * fpr fpr_onehalf 0.5
+ * fpr fpr_ptwo31 2^31
+ * fpr fpr_ptwo31m1 2^31-1
+ * fpr fpr_mtwo31m1 -(2^31-1)
+ * fpr fpr_ptwo63m1 2^63-1
+ * fpr fpr_mtwo63m1 -(2^63-1)
+ * fpr fpr_ptwo63 2^63
+ */
+
+/* ==================================================================== */
+/*
+ * RNG (rng.c).
+ *
+ * A PRNG based on ChaCha20 is implemented; it is seeded from a SHAKE256
+ * context (flipped) and is used for bulk pseudorandom generation.
+ * A system-dependent seed generator is also provided.
+ */
+
+/*
+ * Obtain a random seed from the system RNG.
+ *
+ * Returned value is 1 on success, 0 on error.
+ */
+int PQCLEAN_FALCON512_CLEAN_get_seed(void *seed, size_t seed_len);
+
+/*
+ * Structure for a PRNG. This includes a large buffer so that values
+ * get generated in advance. The 'state' is used to keep the current
+ * PRNG algorithm state (contents depend on the selected algorithm).
+ *
+ * The unions with 'dummy_u64' are there to ensure proper alignment for
+ * 64-bit direct access.
+ */
+typedef struct {
+ union {
+ uint8_t d[512]; /* MUST be 512, exactly */
+ uint64_t dummy_u64;
+ } buf;
+ size_t ptr;
+ union {
+ uint8_t d[256];
+ uint64_t dummy_u64;
+ } state;
+ int type;
+} prng;
+
+/*
+ * Instantiate a PRNG. That PRNG will feed over the provided SHAKE256
+ * context (in "flipped" state) to obtain its initial state.
+ */
+void PQCLEAN_FALCON512_CLEAN_prng_init(prng *p, inner_shake256_context *src);
+
+/*
+ * Refill the PRNG buffer. This is normally invoked automatically, and
+ * is declared here only so that prng_get_u64() may be inlined.
+ */
+void PQCLEAN_FALCON512_CLEAN_prng_refill(prng *p);
+
+/*
+ * Get some bytes from a PRNG.
+ */
+void PQCLEAN_FALCON512_CLEAN_prng_get_bytes(prng *p, void *dst, size_t len);
+
+/*
+ * Get a 64-bit random value from a PRNG.
+ */
+#define prng_get_u64 PQCLEAN_FALCON512_CLEAN_prng_get_u64
+uint64_t prng_get_u64(prng *p);
+
+/*
+ * Get an 8-bit random value from a PRNG.
+ */
+#define prng_get_u8 PQCLEAN_FALCON512_CLEAN_prng_get_u8
+unsigned prng_get_u8(prng *p);
+
+/* ==================================================================== */
+/*
+ * FFT (falcon-fft.c).
+ *
+ * A real polynomial is represented as an array of N 'fpr' elements.
+ * The FFT representation of a real polynomial contains N/2 complex
+ * elements; each is stored as two real numbers, for the real and
+ * imaginary parts, respectively. See falcon-fft.c for details on the
+ * internal representation.
+ */
+
+/*
+ * Compute FFT in-place: the source array should contain a real
+ * polynomial (N coefficients); its storage area is reused to store
+ * the FFT representation of that polynomial (N/2 complex numbers).
+ *
+ * 'logn' MUST lie between 1 and 10 (inclusive).
+ */
+void PQCLEAN_FALCON512_CLEAN_FFT(fpr *f, unsigned logn);
+
+/*
+ * Compute the inverse FFT in-place: the source array should contain the
+ * FFT representation of a real polynomial (N/2 elements); the resulting
+ * real polynomial (N coefficients of type 'fpr') is written over the
+ * array.
+ *
+ * 'logn' MUST lie between 1 and 10 (inclusive).
+ */
+void PQCLEAN_FALCON512_CLEAN_iFFT(fpr *f, unsigned logn);
+
+/*
+ * Add polynomial b to polynomial a. a and b MUST NOT overlap. This
+ * function works in both normal and FFT representations.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_add(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Subtract polynomial b from polynomial a. a and b MUST NOT overlap. This
+ * function works in both normal and FFT representations.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_sub(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Negate polynomial a. This function works in both normal and FFT
+ * representations.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_neg(fpr *a, unsigned logn);
+
+/*
+ * Compute adjoint of polynomial a. This function works only in FFT
+ * representation.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_adj_fft(fpr *a, unsigned logn);
+
+/*
+ * Multiply polynomial a with polynomial b. a and b MUST NOT overlap.
+ * This function works only in FFT representation.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_mul_fft(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Multiply polynomial a with the adjoint of polynomial b. a and b MUST NOT
+ * overlap. This function works only in FFT representation.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_muladj_fft(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Multiply polynomial with its own adjoint. This function works only in FFT
+ * representation.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(fpr *a, unsigned logn);
+
+/*
+ * Multiply polynomial with a real constant. This function works in both
+ * normal and FFT representations.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_mulconst(fpr *a, fpr x, unsigned logn);
+
+/*
+ * Divide polynomial a by polynomial b, modulo X^N+1 (FFT representation).
+ * a and b MUST NOT overlap.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_div_fft(fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Given f and g (in FFT representation), compute 1/(f*adj(f)+g*adj(g))
+ * (also in FFT representation). Since the result is auto-adjoint, all its
+ * coordinates in FFT representation are real; as such, only the first N/2
+ * values of d[] are filled (the imaginary parts are skipped).
+ *
+ * Array d MUST NOT overlap with either a or b.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_invnorm2_fft(fpr *d,
+ const fpr *a, const fpr *b, unsigned logn);
+
+/*
+ * Given F, G, f and g (in FFT representation), compute F*adj(f)+G*adj(g)
+ * (also in FFT representation). Destination d MUST NOT overlap with
+ * any of the source arrays.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_add_muladj_fft(fpr *d,
+ const fpr *F, const fpr *G,
+ const fpr *f, const fpr *g, unsigned logn);
+
+/*
+ * Multiply polynomial a by polynomial b, where b is autoadjoint. Both
+ * a and b are in FFT representation. Since b is autoadjoint, all its
+ * FFT coefficients are real, and the array b contains only N/2 elements.
+ * a and b MUST NOT overlap.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_mul_autoadj_fft(fpr *a,
+ const fpr *b, unsigned logn);
+
+/*
+ * Divide polynomial a by polynomial b, where b is autoadjoint. Both
+ * a and b are in FFT representation. Since b is autoadjoint, all its
+ * FFT coefficients are real, and the array b contains only N/2 elements.
+ * a and b MUST NOT overlap.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_div_autoadj_fft(fpr *a,
+ const fpr *b, unsigned logn);
+
+/*
+ * Perform an LDL decomposition of an auto-adjoint matrix G, in FFT
+ * representation. On input, g00, g01 and g11 are provided (where the
+ * matrix G = [[g00, g01], [adj(g01), g11]]). On output, the d00, l10
+ * and d11 values are written in g00, g01 and g11, respectively
+ * (with D = [[d00, 0], [0, d11]] and L = [[1, 0], [l10, 1]]).
+ * (In fact, d00 = g00, so the g00 operand is left unmodified.)
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_LDL_fft(const fpr *g00,
+ fpr *g01, fpr *g11, unsigned logn);
+
+/*
+ * Perform an LDL decomposition of an auto-adjoint matrix G, in FFT
+ * representation. This is identical to poly_LDL_fft() except that
+ * g00, g01 and g11 are unmodified; the outputs d11 and l10 are written
+ * in two other separate buffers provided as extra parameters.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_LDLmv_fft(fpr *d11, fpr *l10,
+ const fpr *g00, const fpr *g01,
+ const fpr *g11, unsigned logn);
+
+/*
+ * Apply "split" operation on a polynomial in FFT representation:
+ * f = f0(x^2) + x*f1(x^2), for half-size polynomials f0 and f1
+ * (polynomials modulo X^(N/2)+1). f0, f1 and f MUST NOT overlap.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_split_fft(fpr *f0, fpr *f1,
+ const fpr *f, unsigned logn);
+
+/*
+ * Apply "merge" operation on two polynomials in FFT representation:
+ * given f0 and f1, polynomials moduo X^(N/2)+1, this function computes
+ * f = f0(x^2) + x*f1(x^2), in FFT representation modulo X^N+1.
+ * f MUST NOT overlap with either f0 or f1.
+ */
+void PQCLEAN_FALCON512_CLEAN_poly_merge_fft(fpr *f,
+ const fpr *f0, const fpr *f1, unsigned logn);
+
+/* ==================================================================== */
+/*
+ * Key pair generation.
+ */
+
+/*
+ * Required sizes of the temporary buffer (in bytes).
+ *
+ * This size is 28*2^logn bytes, except for degrees 2 and 4 (logn = 1
+ * or 2) where it is slightly greater.
+ */
+#define FALCON_KEYGEN_TEMP_1 136
+#define FALCON_KEYGEN_TEMP_2 272
+#define FALCON_KEYGEN_TEMP_3 224
+#define FALCON_KEYGEN_TEMP_4 448
+#define FALCON_KEYGEN_TEMP_5 896
+#define FALCON_KEYGEN_TEMP_6 1792
+#define FALCON_KEYGEN_TEMP_7 3584
+#define FALCON_KEYGEN_TEMP_8 7168
+#define FALCON_KEYGEN_TEMP_9 14336
+#define FALCON_KEYGEN_TEMP_10 28672
+
+/*
+ * Generate a new key pair. Randomness is extracted from the provided
+ * SHAKE256 context, which must have already been seeded and flipped.
+ * The tmp[] array must have suitable size (see FALCON_KEYGEN_TEMP_*
+ * macros) and be aligned for the uint32_t, uint64_t and fpr types.
+ *
+ * The private key elements are written in f, g, F and G, and the
+ * public key is written in h. Either or both of G and h may be NULL,
+ * in which case the corresponding element is not returned (they can
+ * be recomputed from f, g and F).
+ *
+ * tmp[] must have 64-bit alignment.
+ * This function uses floating-point rounding (see set_fpu_cw()).
+ */
+void PQCLEAN_FALCON512_CLEAN_keygen(inner_shake256_context *rng,
+ int8_t *f, int8_t *g, int8_t *F, int8_t *G, uint16_t *h,
+ unsigned logn, uint8_t *tmp);
+
+/* ==================================================================== */
+/*
+ * Signature generation.
+ */
+
+/*
+ * Expand a private key into the B0 matrix in FFT representation and
+ * the LDL tree. All the values are written in 'expanded_key', for
+ * a total of (8*logn+40)*2^logn bytes.
+ *
+ * The tmp[] array must have room for at least 48*2^logn bytes.
+ *
+ * tmp[] must have 64-bit alignment.
+ * This function uses floating-point rounding (see set_fpu_cw()).
+ */
+void PQCLEAN_FALCON512_CLEAN_expand_privkey(fpr *expanded_key,
+ const int8_t *f, const int8_t *g, const int8_t *F, const int8_t *G,
+ unsigned logn, uint8_t *tmp);
+
+/*
+ * Compute a signature over the provided hashed message (hm); the
+ * signature value is one short vector. This function uses an
+ * expanded key (as generated by PQCLEAN_FALCON512_CLEAN_expand_privkey()).
+ *
+ * The sig[] and hm[] buffers may overlap.
+ *
+ * On successful output, the start of the tmp[] buffer contains the s1
+ * vector (as int16_t elements).
+ *
+ * The minimal size (in bytes) of tmp[] is 48*2^logn bytes.
+ *
+ * tmp[] must have 64-bit alignment.
+ * This function uses floating-point rounding (see set_fpu_cw()).
+ */
+void PQCLEAN_FALCON512_CLEAN_sign_tree(int16_t *sig, inner_shake256_context *rng,
+ const fpr *expanded_key,
+ const uint16_t *hm, unsigned logn, uint8_t *tmp);
+
+/*
+ * Compute a signature over the provided hashed message (hm); the
+ * signature value is one short vector. This function uses a raw
+ * key and dynamically recompute the B0 matrix and LDL tree; this
+ * saves RAM since there is no needed for an expanded key, but
+ * increases the signature cost.
+ *
+ * The sig[] and hm[] buffers may overlap.
+ *
+ * On successful output, the start of the tmp[] buffer contains the s1
+ * vector (as int16_t elements).
+ *
+ * The minimal size (in bytes) of tmp[] is 72*2^logn bytes.
+ *
+ * tmp[] must have 64-bit alignment.
+ * This function uses floating-point rounding (see set_fpu_cw()).
+ */
+void PQCLEAN_FALCON512_CLEAN_sign_dyn(int16_t *sig, inner_shake256_context *rng,
+ const int8_t *f, const int8_t *g,
+ const int8_t *F, const int8_t *G,
+ const uint16_t *hm, unsigned logn, uint8_t *tmp);
+
+/*
+ * Internal sampler engine. Exported for tests.
+ *
+ * sampler_context wraps around a source of random numbers (PRNG) and
+ * the sigma_min value (nominally dependent on the degree).
+ *
+ * sampler() takes as parameters:
+ * ctx pointer to the sampler_context structure
+ * mu center for the distribution
+ * isigma inverse of the distribution standard deviation
+ * It returns an integer sampled along the Gaussian distribution centered
+ * on mu and of standard deviation sigma = 1/isigma.
+ *
+ * gaussian0_sampler() takes as parameter a pointer to a PRNG, and
+ * returns an integer sampled along a half-Gaussian with standard
+ * deviation sigma0 = 1.8205 (center is 0, returned value is
+ * nonnegative).
+ */
+
+typedef struct {
+ prng p;
+ fpr sigma_min;
+} sampler_context;
+
+int PQCLEAN_FALCON512_CLEAN_sampler(void *ctx, fpr mu, fpr isigma);
+
+int PQCLEAN_FALCON512_CLEAN_gaussian0_sampler(prng *p);
+
+/* ==================================================================== */
+
+#endif
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/keygen.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/keygen.c
new file mode 100644
index 000000000..f72ecd991
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/keygen.c
@@ -0,0 +1,4231 @@
+#include "inner.h"
+
+/*
+ * Falcon key pair generation.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+#define MKN(logn) ((size_t)1 << (logn))
+
+/* ==================================================================== */
+/*
+ * Modular arithmetics.
+ *
+ * We implement a few functions for computing modulo a small integer p.
+ *
+ * All functions require that 2^30 < p < 2^31. Moreover, operands must
+ * be in the 0..p-1 range.
+ *
+ * Modular addition and subtraction work for all such p.
+ *
+ * Montgomery multiplication requires that p is odd, and must be provided
+ * with an additional value p0i = -1/p mod 2^31. See below for some basics
+ * on Montgomery multiplication.
+ *
+ * Division computes an inverse modulo p by an exponentiation (with
+ * exponent p-2): this works only if p is prime. Multiplication
+ * requirements also apply, i.e. p must be odd and p0i must be provided.
+ *
+ * The NTT and inverse NTT need all of the above, and also that
+ * p = 1 mod 2048.
+ *
+ * -----------------------------------------------------------------------
+ *
+ * We use Montgomery representation with 31-bit values:
+ *
+ * Let R = 2^31 mod p. When 2^30 < p < 2^31, R = 2^31 - p.
+ * Montgomery representation of an integer x modulo p is x*R mod p.
+ *
+ * Montgomery multiplication computes (x*y)/R mod p for
+ * operands x and y. Therefore:
+ *
+ * - if operands are x*R and y*R (Montgomery representations of x and
+ * y), then Montgomery multiplication computes (x*R*y*R)/R = (x*y)*R
+ * mod p, which is the Montgomery representation of the product x*y;
+ *
+ * - if operands are x*R and y (or x and y*R), then Montgomery
+ * multiplication returns x*y mod p: mixed-representation
+ * multiplications yield results in normal representation.
+ *
+ * To convert to Montgomery representation, we multiply by R, which is done
+ * by Montgomery-multiplying by R^2. Stand-alone conversion back from
+ * Montgomery representation is Montgomery-multiplication by 1.
+ */
+
+/*
+ * Precomputed small primes. Each element contains the following:
+ *
+ * p The prime itself.
+ *
+ * g A primitive root of phi = X^N+1 (in field Z_p).
+ *
+ * s The inverse of the product of all previous primes in the array,
+ * computed modulo p and in Montgomery representation.
+ *
+ * All primes are such that p = 1 mod 2048, and are lower than 2^31. They
+ * are listed in decreasing order.
+ */
+
+typedef struct {
+ uint32_t p;
+ uint32_t g;
+ uint32_t s;
+} small_prime;
+
+static const small_prime PRIMES[] = {
+ { 2147473409, 383167813, 10239 },
+ { 2147389441, 211808905, 471403745 },
+ { 2147387393, 37672282, 1329335065 },
+ { 2147377153, 1977035326, 968223422 },
+ { 2147358721, 1067163706, 132460015 },
+ { 2147352577, 1606082042, 598693809 },
+ { 2147346433, 2033915641, 1056257184 },
+ { 2147338241, 1653770625, 421286710 },
+ { 2147309569, 631200819, 1111201074 },
+ { 2147297281, 2038364663, 1042003613 },
+ { 2147295233, 1962540515, 19440033 },
+ { 2147239937, 2100082663, 353296760 },
+ { 2147235841, 1991153006, 1703918027 },
+ { 2147217409, 516405114, 1258919613 },
+ { 2147205121, 409347988, 1089726929 },
+ { 2147196929, 927788991, 1946238668 },
+ { 2147178497, 1136922411, 1347028164 },
+ { 2147100673, 868626236, 701164723 },
+ { 2147082241, 1897279176, 617820870 },
+ { 2147074049, 1888819123, 158382189 },
+ { 2147051521, 25006327, 522758543 },
+ { 2147043329, 327546255, 37227845 },
+ { 2147039233, 766324424, 1133356428 },
+ { 2146988033, 1862817362, 73861329 },
+ { 2146963457, 404622040, 653019435 },
+ { 2146959361, 1936581214, 995143093 },
+ { 2146938881, 1559770096, 634921513 },
+ { 2146908161, 422623708, 1985060172 },
+ { 2146885633, 1751189170, 298238186 },
+ { 2146871297, 578919515, 291810829 },
+ { 2146846721, 1114060353, 915902322 },
+ { 2146834433, 2069565474, 47859524 },
+ { 2146818049, 1552824584, 646281055 },
+ { 2146775041, 1906267847, 1597832891 },
+ { 2146756609, 1847414714, 1228090888 },
+ { 2146744321, 1818792070, 1176377637 },
+ { 2146738177, 1118066398, 1054971214 },
+ { 2146736129, 52057278, 933422153 },
+ { 2146713601, 592259376, 1406621510 },
+ { 2146695169, 263161877, 1514178701 },
+ { 2146656257, 685363115, 384505091 },
+ { 2146650113, 927727032, 537575289 },
+ { 2146646017, 52575506, 1799464037 },
+ { 2146643969, 1276803876, 1348954416 },
+ { 2146603009, 814028633, 1521547704 },
+ { 2146572289, 1846678872, 1310832121 },
+ { 2146547713, 919368090, 1019041349 },
+ { 2146508801, 671847612, 38582496 },
+ { 2146492417, 283911680, 532424562 },
+ { 2146490369, 1780044827, 896447978 },
+ { 2146459649, 327980850, 1327906900 },
+ { 2146447361, 1310561493, 958645253 },
+ { 2146441217, 412148926, 287271128 },
+ { 2146437121, 293186449, 2009822534 },
+ { 2146430977, 179034356, 1359155584 },
+ { 2146418689, 1517345488, 1790248672 },
+ { 2146406401, 1615820390, 1584833571 },
+ { 2146404353, 826651445, 607120498 },
+ { 2146379777, 3816988, 1897049071 },
+ { 2146363393, 1221409784, 1986921567 },
+ { 2146355201, 1388081168, 849968120 },
+ { 2146336769, 1803473237, 1655544036 },
+ { 2146312193, 1023484977, 273671831 },
+ { 2146293761, 1074591448, 467406983 },
+ { 2146283521, 831604668, 1523950494 },
+ { 2146203649, 712865423, 1170834574 },
+ { 2146154497, 1764991362, 1064856763 },
+ { 2146142209, 627386213, 1406840151 },
+ { 2146127873, 1638674429, 2088393537 },
+ { 2146099201, 1516001018, 690673370 },
+ { 2146093057, 1294931393, 315136610 },
+ { 2146091009, 1942399533, 973539425 },
+ { 2146078721, 1843461814, 2132275436 },
+ { 2146060289, 1098740778, 360423481 },
+ { 2146048001, 1617213232, 1951981294 },
+ { 2146041857, 1805783169, 2075683489 },
+ { 2146019329, 272027909, 1753219918 },
+ { 2145986561, 1206530344, 2034028118 },
+ { 2145976321, 1243769360, 1173377644 },
+ { 2145964033, 887200839, 1281344586 },
+ { 2145906689, 1651026455, 906178216 },
+ { 2145875969, 1673238256, 1043521212 },
+ { 2145871873, 1226591210, 1399796492 },
+ { 2145841153, 1465353397, 1324527802 },
+ { 2145832961, 1150638905, 554084759 },
+ { 2145816577, 221601706, 427340863 },
+ { 2145785857, 608896761, 316590738 },
+ { 2145755137, 1712054942, 1684294304 },
+ { 2145742849, 1302302867, 724873116 },
+ { 2145728513, 516717693, 431671476 },
+ { 2145699841, 524575579, 1619722537 },
+ { 2145691649, 1925625239, 982974435 },
+ { 2145687553, 463795662, 1293154300 },
+ { 2145673217, 771716636, 881778029 },
+ { 2145630209, 1509556977, 837364988 },
+ { 2145595393, 229091856, 851648427 },
+ { 2145587201, 1796903241, 635342424 },
+ { 2145525761, 715310882, 1677228081 },
+ { 2145495041, 1040930522, 200685896 },
+ { 2145466369, 949804237, 1809146322 },
+ { 2145445889, 1673903706, 95316881 },
+ { 2145390593, 806941852, 1428671135 },
+ { 2145372161, 1402525292, 159350694 },
+ { 2145361921, 2124760298, 1589134749 },
+ { 2145359873, 1217503067, 1561543010 },
+ { 2145355777, 338341402, 83865711 },
+ { 2145343489, 1381532164, 641430002 },
+ { 2145325057, 1883895478, 1528469895 },
+ { 2145318913, 1335370424, 65809740 },
+ { 2145312769, 2000008042, 1919775760 },
+ { 2145300481, 961450962, 1229540578 },
+ { 2145282049, 910466767, 1964062701 },
+ { 2145232897, 816527501, 450152063 },
+ { 2145218561, 1435128058, 1794509700 },
+ { 2145187841, 33505311, 1272467582 },
+ { 2145181697, 269767433, 1380363849 },
+ { 2145175553, 56386299, 1316870546 },
+ { 2145079297, 2106880293, 1391797340 },
+ { 2145021953, 1347906152, 720510798 },
+ { 2145015809, 206769262, 1651459955 },
+ { 2145003521, 1885513236, 1393381284 },
+ { 2144960513, 1810381315, 31937275 },
+ { 2144944129, 1306487838, 2019419520 },
+ { 2144935937, 37304730, 1841489054 },
+ { 2144894977, 1601434616, 157985831 },
+ { 2144888833, 98749330, 2128592228 },
+ { 2144880641, 1772327002, 2076128344 },
+ { 2144864257, 1404514762, 2029969964 },
+ { 2144827393, 801236594, 406627220 },
+ { 2144806913, 349217443, 1501080290 },
+ { 2144796673, 1542656776, 2084736519 },
+ { 2144778241, 1210734884, 1746416203 },
+ { 2144759809, 1146598851, 716464489 },
+ { 2144757761, 286328400, 1823728177 },
+ { 2144729089, 1347555695, 1836644881 },
+ { 2144727041, 1795703790, 520296412 },
+ { 2144696321, 1302475157, 852964281 },
+ { 2144667649, 1075877614, 504992927 },
+ { 2144573441, 198765808, 1617144982 },
+ { 2144555009, 321528767, 155821259 },
+ { 2144550913, 814139516, 1819937644 },
+ { 2144536577, 571143206, 962942255 },
+ { 2144524289, 1746733766, 2471321 },
+ { 2144512001, 1821415077, 124190939 },
+ { 2144468993, 917871546, 1260072806 },
+ { 2144458753, 378417981, 1569240563 },
+ { 2144421889, 175229668, 1825620763 },
+ { 2144409601, 1699216963, 351648117 },
+ { 2144370689, 1071885991, 958186029 },
+ { 2144348161, 1763151227, 540353574 },
+ { 2144335873, 1060214804, 919598847 },
+ { 2144329729, 663515846, 1448552668 },
+ { 2144327681, 1057776305, 590222840 },
+ { 2144309249, 1705149168, 1459294624 },
+ { 2144296961, 325823721, 1649016934 },
+ { 2144290817, 738775789, 447427206 },
+ { 2144243713, 962347618, 893050215 },
+ { 2144237569, 1655257077, 900860862 },
+ { 2144161793, 242206694, 1567868672 },
+ { 2144155649, 769415308, 1247993134 },
+ { 2144137217, 320492023, 515841070 },
+ { 2144120833, 1639388522, 770877302 },
+ { 2144071681, 1761785233, 964296120 },
+ { 2144065537, 419817825, 204564472 },
+ { 2144028673, 666050597, 2091019760 },
+ { 2144010241, 1413657615, 1518702610 },
+ { 2143952897, 1238327946, 475672271 },
+ { 2143940609, 307063413, 1176750846 },
+ { 2143918081, 2062905559, 786785803 },
+ { 2143899649, 1338112849, 1562292083 },
+ { 2143891457, 68149545, 87166451 },
+ { 2143885313, 921750778, 394460854 },
+ { 2143854593, 719766593, 133877196 },
+ { 2143836161, 1149399850, 1861591875 },
+ { 2143762433, 1848739366, 1335934145 },
+ { 2143756289, 1326674710, 102999236 },
+ { 2143713281, 808061791, 1156900308 },
+ { 2143690753, 388399459, 1926468019 },
+ { 2143670273, 1427891374, 1756689401 },
+ { 2143666177, 1912173949, 986629565 },
+ { 2143645697, 2041160111, 371842865 },
+ { 2143641601, 1279906897, 2023974350 },
+ { 2143635457, 720473174, 1389027526 },
+ { 2143621121, 1298309455, 1732632006 },
+ { 2143598593, 1548762216, 1825417506 },
+ { 2143567873, 620475784, 1073787233 },
+ { 2143561729, 1932954575, 949167309 },
+ { 2143553537, 354315656, 1652037534 },
+ { 2143541249, 577424288, 1097027618 },
+ { 2143531009, 357862822, 478640055 },
+ { 2143522817, 2017706025, 1550531668 },
+ { 2143506433, 2078127419, 1824320165 },
+ { 2143488001, 613475285, 1604011510 },
+ { 2143469569, 1466594987, 502095196 },
+ { 2143426561, 1115430331, 1044637111 },
+ { 2143383553, 9778045, 1902463734 },
+ { 2143377409, 1557401276, 2056861771 },
+ { 2143363073, 652036455, 1965915971 },
+ { 2143260673, 1464581171, 1523257541 },
+ { 2143246337, 1876119649, 764541916 },
+ { 2143209473, 1614992673, 1920672844 },
+ { 2143203329, 981052047, 2049774209 },
+ { 2143160321, 1847355533, 728535665 },
+ { 2143129601, 965558457, 603052992 },
+ { 2143123457, 2140817191, 8348679 },
+ { 2143100929, 1547263683, 694209023 },
+ { 2143092737, 643459066, 1979934533 },
+ { 2143082497, 188603778, 2026175670 },
+ { 2143062017, 1657329695, 377451099 },
+ { 2143051777, 114967950, 979255473 },
+ { 2143025153, 1698431342, 1449196896 },
+ { 2143006721, 1862741675, 1739650365 },
+ { 2142996481, 756660457, 996160050 },
+ { 2142976001, 927864010, 1166847574 },
+ { 2142965761, 905070557, 661974566 },
+ { 2142916609, 40932754, 1787161127 },
+ { 2142892033, 1987985648, 675335382 },
+ { 2142885889, 797497211, 1323096997 },
+ { 2142871553, 2068025830, 1411877159 },
+ { 2142861313, 1217177090, 1438410687 },
+ { 2142830593, 409906375, 1767860634 },
+ { 2142803969, 1197788993, 359782919 },
+ { 2142785537, 643817365, 513932862 },
+ { 2142779393, 1717046338, 218943121 },
+ { 2142724097, 89336830, 416687049 },
+ { 2142707713, 5944581, 1356813523 },
+ { 2142658561, 887942135, 2074011722 },
+ { 2142638081, 151851972, 1647339939 },
+ { 2142564353, 1691505537, 1483107336 },
+ { 2142533633, 1989920200, 1135938817 },
+ { 2142529537, 959263126, 1531961857 },
+ { 2142527489, 453251129, 1725566162 },
+ { 2142502913, 1536028102, 182053257 },
+ { 2142498817, 570138730, 701443447 },
+ { 2142416897, 326965800, 411931819 },
+ { 2142363649, 1675665410, 1517191733 },
+ { 2142351361, 968529566, 1575712703 },
+ { 2142330881, 1384953238, 1769087884 },
+ { 2142314497, 1977173242, 1833745524 },
+ { 2142289921, 95082313, 1714775493 },
+ { 2142283777, 109377615, 1070584533 },
+ { 2142277633, 16960510, 702157145 },
+ { 2142263297, 553850819, 431364395 },
+ { 2142208001, 241466367, 2053967982 },
+ { 2142164993, 1795661326, 1031836848 },
+ { 2142097409, 1212530046, 712772031 },
+ { 2142087169, 1763869720, 822276067 },
+ { 2142078977, 644065713, 1765268066 },
+ { 2142074881, 112671944, 643204925 },
+ { 2142044161, 1387785471, 1297890174 },
+ { 2142025729, 783885537, 1000425730 },
+ { 2142011393, 905662232, 1679401033 },
+ { 2141974529, 799788433, 468119557 },
+ { 2141943809, 1932544124, 449305555 },
+ { 2141933569, 1527403256, 841867925 },
+ { 2141931521, 1247076451, 743823916 },
+ { 2141902849, 1199660531, 401687910 },
+ { 2141890561, 150132350, 1720336972 },
+ { 2141857793, 1287438162, 663880489 },
+ { 2141833217, 618017731, 1819208266 },
+ { 2141820929, 999578638, 1403090096 },
+ { 2141786113, 81834325, 1523542501 },
+ { 2141771777, 120001928, 463556492 },
+ { 2141759489, 122455485, 2124928282 },
+ { 2141749249, 141986041, 940339153 },
+ { 2141685761, 889088734, 477141499 },
+ { 2141673473, 324212681, 1122558298 },
+ { 2141669377, 1175806187, 1373818177 },
+ { 2141655041, 1113654822, 296887082 },
+ { 2141587457, 991103258, 1585913875 },
+ { 2141583361, 1401451409, 1802457360 },
+ { 2141575169, 1571977166, 712760980 },
+ { 2141546497, 1107849376, 1250270109 },
+ { 2141515777, 196544219, 356001130 },
+ { 2141495297, 1733571506, 1060744866 },
+ { 2141483009, 321552363, 1168297026 },
+ { 2141458433, 505818251, 733225819 },
+ { 2141360129, 1026840098, 948342276 },
+ { 2141325313, 945133744, 2129965998 },
+ { 2141317121, 1871100260, 1843844634 },
+ { 2141286401, 1790639498, 1750465696 },
+ { 2141267969, 1376858592, 186160720 },
+ { 2141255681, 2129698296, 1876677959 },
+ { 2141243393, 2138900688, 1340009628 },
+ { 2141214721, 1933049835, 1087819477 },
+ { 2141212673, 1898664939, 1786328049 },
+ { 2141202433, 990234828, 940682169 },
+ { 2141175809, 1406392421, 993089586 },
+ { 2141165569, 1263518371, 289019479 },
+ { 2141073409, 1485624211, 507864514 },
+ { 2141052929, 1885134788, 311252465 },
+ { 2141040641, 1285021247, 280941862 },
+ { 2141028353, 1527610374, 375035110 },
+ { 2141011969, 1400626168, 164696620 },
+ { 2140999681, 632959608, 966175067 },
+ { 2140997633, 2045628978, 1290889438 },
+ { 2140993537, 1412755491, 375366253 },
+ { 2140942337, 719477232, 785367828 },
+ { 2140925953, 45224252, 836552317 },
+ { 2140917761, 1157376588, 1001839569 },
+ { 2140887041, 278480752, 2098732796 },
+ { 2140837889, 1663139953, 924094810 },
+ { 2140788737, 802501511, 2045368990 },
+ { 2140766209, 1820083885, 1800295504 },
+ { 2140764161, 1169561905, 2106792035 },
+ { 2140696577, 127781498, 1885987531 },
+ { 2140684289, 16014477, 1098116827 },
+ { 2140653569, 665960598, 1796728247 },
+ { 2140594177, 1043085491, 377310938 },
+ { 2140579841, 1732838211, 1504505945 },
+ { 2140569601, 302071939, 358291016 },
+ { 2140567553, 192393733, 1909137143 },
+ { 2140557313, 406595731, 1175330270 },
+ { 2140549121, 1748850918, 525007007 },
+ { 2140477441, 499436566, 1031159814 },
+ { 2140469249, 1886004401, 1029951320 },
+ { 2140426241, 1483168100, 1676273461 },
+ { 2140420097, 1779917297, 846024476 },
+ { 2140413953, 522948893, 1816354149 },
+ { 2140383233, 1931364473, 1296921241 },
+ { 2140366849, 1917356555, 147196204 },
+ { 2140354561, 16466177, 1349052107 },
+ { 2140348417, 1875366972, 1860485634 },
+ { 2140323841, 456498717, 1790256483 },
+ { 2140321793, 1629493973, 150031888 },
+ { 2140315649, 1904063898, 395510935 },
+ { 2140280833, 1784104328, 831417909 },
+ { 2140250113, 256087139, 697349101 },
+ { 2140229633, 388553070, 243875754 },
+ { 2140223489, 747459608, 1396270850 },
+ { 2140200961, 507423743, 1895572209 },
+ { 2140162049, 580106016, 2045297469 },
+ { 2140149761, 712426444, 785217995 },
+ { 2140137473, 1441607584, 536866543 },
+ { 2140119041, 346538902, 1740434653 },
+ { 2140090369, 282642885, 21051094 },
+ { 2140076033, 1407456228, 319910029 },
+ { 2140047361, 1619330500, 1488632070 },
+ { 2140041217, 2089408064, 2012026134 },
+ { 2140008449, 1705524800, 1613440760 },
+ { 2139924481, 1846208233, 1280649481 },
+ { 2139906049, 989438755, 1185646076 },
+ { 2139867137, 1522314850, 372783595 },
+ { 2139842561, 1681587377, 216848235 },
+ { 2139826177, 2066284988, 1784999464 },
+ { 2139824129, 480888214, 1513323027 },
+ { 2139789313, 847937200, 858192859 },
+ { 2139783169, 1642000434, 1583261448 },
+ { 2139770881, 940699589, 179702100 },
+ { 2139768833, 315623242, 964612676 },
+ { 2139666433, 331649203, 764666914 },
+ { 2139641857, 2118730799, 1313764644 },
+ { 2139635713, 519149027, 519212449 },
+ { 2139598849, 1526413634, 1769667104 },
+ { 2139574273, 551148610, 820739925 },
+ { 2139568129, 1386800242, 472447405 },
+ { 2139549697, 813760130, 1412328531 },
+ { 2139537409, 1615286260, 1609362979 },
+ { 2139475969, 1352559299, 1696720421 },
+ { 2139455489, 1048691649, 1584935400 },
+ { 2139432961, 836025845, 950121150 },
+ { 2139424769, 1558281165, 1635486858 },
+ { 2139406337, 1728402143, 1674423301 },
+ { 2139396097, 1727715782, 1483470544 },
+ { 2139383809, 1092853491, 1741699084 },
+ { 2139369473, 690776899, 1242798709 },
+ { 2139351041, 1768782380, 2120712049 },
+ { 2139334657, 1739968247, 1427249225 },
+ { 2139332609, 1547189119, 623011170 },
+ { 2139310081, 1346827917, 1605466350 },
+ { 2139303937, 369317948, 828392831 },
+ { 2139301889, 1560417239, 1788073219 },
+ { 2139283457, 1303121623, 595079358 },
+ { 2139248641, 1354555286, 573424177 },
+ { 2139240449, 60974056, 885781403 },
+ { 2139222017, 355573421, 1221054839 },
+ { 2139215873, 566477826, 1724006500 },
+ { 2139150337, 871437673, 1609133294 },
+ { 2139144193, 1478130914, 1137491905 },
+ { 2139117569, 1854880922, 964728507 },
+ { 2139076609, 202405335, 756508944 },
+ { 2139062273, 1399715741, 884826059 },
+ { 2139045889, 1051045798, 1202295476 },
+ { 2139033601, 1707715206, 632234634 },
+ { 2139006977, 2035853139, 231626690 },
+ { 2138951681, 183867876, 838350879 },
+ { 2138945537, 1403254661, 404460202 },
+ { 2138920961, 310865011, 1282911681 },
+ { 2138910721, 1328496553, 103472415 },
+ { 2138904577, 78831681, 993513549 },
+ { 2138902529, 1319697451, 1055904361 },
+ { 2138816513, 384338872, 1706202469 },
+ { 2138810369, 1084868275, 405677177 },
+ { 2138787841, 401181788, 1964773901 },
+ { 2138775553, 1850532988, 1247087473 },
+ { 2138767361, 874261901, 1576073565 },
+ { 2138757121, 1187474742, 993541415 },
+ { 2138748929, 1782458888, 1043206483 },
+ { 2138744833, 1221500487, 800141243 },
+ { 2138738689, 413465368, 1450660558 },
+ { 2138695681, 739045140, 342611472 },
+ { 2138658817, 1355845756, 672674190 },
+ { 2138644481, 608379162, 1538874380 },
+ { 2138632193, 1444914034, 686911254 },
+ { 2138607617, 484707818, 1435142134 },
+ { 2138591233, 539460669, 1290458549 },
+ { 2138572801, 2093538990, 2011138646 },
+ { 2138552321, 1149786988, 1076414907 },
+ { 2138546177, 840688206, 2108985273 },
+ { 2138533889, 209669619, 198172413 },
+ { 2138523649, 1975879426, 1277003968 },
+ { 2138490881, 1351891144, 1976858109 },
+ { 2138460161, 1817321013, 1979278293 },
+ { 2138429441, 1950077177, 203441928 },
+ { 2138400769, 908970113, 628395069 },
+ { 2138398721, 219890864, 758486760 },
+ { 2138376193, 1306654379, 977554090 },
+ { 2138351617, 298822498, 2004708503 },
+ { 2138337281, 441457816, 1049002108 },
+ { 2138320897, 1517731724, 1442269609 },
+ { 2138290177, 1355911197, 1647139103 },
+ { 2138234881, 531313247, 1746591962 },
+ { 2138214401, 1899410930, 781416444 },
+ { 2138202113, 1813477173, 1622508515 },
+ { 2138191873, 1086458299, 1025408615 },
+ { 2138183681, 1998800427, 827063290 },
+ { 2138173441, 1921308898, 749670117 },
+ { 2138103809, 1620902804, 2126787647 },
+ { 2138099713, 828647069, 1892961817 },
+ { 2138085377, 179405355, 1525506535 },
+ { 2138060801, 615683235, 1259580138 },
+ { 2138044417, 2030277840, 1731266562 },
+ { 2138042369, 2087222316, 1627902259 },
+ { 2138032129, 126388712, 1108640984 },
+ { 2138011649, 715026550, 1017980050 },
+ { 2137993217, 1693714349, 1351778704 },
+ { 2137888769, 1289762259, 1053090405 },
+ { 2137853953, 199991890, 1254192789 },
+ { 2137833473, 941421685, 896995556 },
+ { 2137817089, 750416446, 1251031181 },
+ { 2137792513, 798075119, 368077456 },
+ { 2137786369, 878543495, 1035375025 },
+ { 2137767937, 9351178, 1156563902 },
+ { 2137755649, 1382297614, 1686559583 },
+ { 2137724929, 1345472850, 1681096331 },
+ { 2137704449, 834666929, 630551727 },
+ { 2137673729, 1646165729, 1892091571 },
+ { 2137620481, 778943821, 48456461 },
+ { 2137618433, 1730837875, 1713336725 },
+ { 2137581569, 805610339, 1378891359 },
+ { 2137538561, 204342388, 1950165220 },
+ { 2137526273, 1947629754, 1500789441 },
+ { 2137516033, 719902645, 1499525372 },
+ { 2137491457, 230451261, 556382829 },
+ { 2137440257, 979573541, 412760291 },
+ { 2137374721, 927841248, 1954137185 },
+ { 2137362433, 1243778559, 861024672 },
+ { 2137313281, 1341338501, 980638386 },
+ { 2137311233, 937415182, 1793212117 },
+ { 2137255937, 795331324, 1410253405 },
+ { 2137243649, 150756339, 1966999887 },
+ { 2137182209, 163346914, 1939301431 },
+ { 2137171969, 1952552395, 758913141 },
+ { 2137159681, 570788721, 218668666 },
+ { 2137147393, 1896656810, 2045670345 },
+ { 2137141249, 358493842, 518199643 },
+ { 2137139201, 1505023029, 674695848 },
+ { 2137133057, 27911103, 830956306 },
+ { 2137122817, 439771337, 1555268614 },
+ { 2137116673, 790988579, 1871449599 },
+ { 2137110529, 432109234, 811805080 },
+ { 2137102337, 1357900653, 1184997641 },
+ { 2137098241, 515119035, 1715693095 },
+ { 2137090049, 408575203, 2085660657 },
+ { 2137085953, 2097793407, 1349626963 },
+ { 2137055233, 1556739954, 1449960883 },
+ { 2137030657, 1545758650, 1369303716 },
+ { 2136987649, 332602570, 103875114 },
+ { 2136969217, 1499989506, 1662964115 },
+ { 2136924161, 857040753, 4738842 },
+ { 2136895489, 1948872712, 570436091 },
+ { 2136893441, 58969960, 1568349634 },
+ { 2136887297, 2127193379, 273612548 },
+ { 2136850433, 111208983, 1181257116 },
+ { 2136809473, 1627275942, 1680317971 },
+ { 2136764417, 1574888217, 14011331 },
+ { 2136741889, 14011055, 1129154251 },
+ { 2136727553, 35862563, 1838555253 },
+ { 2136721409, 310235666, 1363928244 },
+ { 2136698881, 1612429202, 1560383828 },
+ { 2136649729, 1138540131, 800014364 },
+ { 2136606721, 602323503, 1433096652 },
+ { 2136563713, 182209265, 1919611038 },
+ { 2136555521, 324156477, 165591039 },
+ { 2136549377, 195513113, 217165345 },
+ { 2136526849, 1050768046, 939647887 },
+ { 2136508417, 1886286237, 1619926572 },
+ { 2136477697, 609647664, 35065157 },
+ { 2136471553, 679352216, 1452259468 },
+ { 2136457217, 128630031, 824816521 },
+ { 2136422401, 19787464, 1526049830 },
+ { 2136420353, 698316836, 1530623527 },
+ { 2136371201, 1651862373, 1804812805 },
+ { 2136334337, 326596005, 336977082 },
+ { 2136322049, 63253370, 1904972151 },
+ { 2136297473, 312176076, 172182411 },
+ { 2136248321, 381261841, 369032670 },
+ { 2136242177, 358688773, 1640007994 },
+ { 2136229889, 512677188, 75585225 },
+ { 2136219649, 2095003250, 1970086149 },
+ { 2136207361, 1909650722, 537760675 },
+ { 2136176641, 1334616195, 1533487619 },
+ { 2136158209, 2096285632, 1793285210 },
+ { 2136143873, 1897347517, 293843959 },
+ { 2136133633, 923586222, 1022655978 },
+ { 2136096769, 1464868191, 1515074410 },
+ { 2136094721, 2020679520, 2061636104 },
+ { 2136076289, 290798503, 1814726809 },
+ { 2136041473, 156415894, 1250757633 },
+ { 2135996417, 297459940, 1132158924 },
+ { 2135955457, 538755304, 1688831340 },
+ { 0, 0, 0 }
+};
+
+/*
+ * Reduce a small signed integer modulo a small prime. The source
+ * value x MUST be such that -p < x < p.
+ */
+static inline uint32_t
+modp_set(int32_t x, uint32_t p) {
+ uint32_t w;
+
+ w = (uint32_t)x;
+ w += p & -(w >> 31);
+ return w;
+}
+
+/*
+ * Normalize a modular integer around 0.
+ */
+static inline int32_t
+modp_norm(uint32_t x, uint32_t p) {
+ return (int32_t)(x - (p & (((x - ((p + 1) >> 1)) >> 31) - 1)));
+}
+
+/*
+ * Compute -1/p mod 2^31. This works for all odd integers p that fit
+ * on 31 bits.
+ */
+static uint32_t
+modp_ninv31(uint32_t p) {
+ uint32_t y;
+
+ y = 2 - p;
+ y *= 2 - p * y;
+ y *= 2 - p * y;
+ y *= 2 - p * y;
+ y *= 2 - p * y;
+ return (uint32_t)0x7FFFFFFF & -y;
+}
+
+/*
+ * Compute R = 2^31 mod p.
+ */
+static inline uint32_t
+modp_R(uint32_t p) {
+ /*
+ * Since 2^30 < p < 2^31, we know that 2^31 mod p is simply
+ * 2^31 - p.
+ */
+ return ((uint32_t)1 << 31) - p;
+}
+
+/*
+ * Addition modulo p.
+ */
+static inline uint32_t
+modp_add(uint32_t a, uint32_t b, uint32_t p) {
+ uint32_t d;
+
+ d = a + b - p;
+ d += p & -(d >> 31);
+ return d;
+}
+
+/*
+ * Subtraction modulo p.
+ */
+static inline uint32_t
+modp_sub(uint32_t a, uint32_t b, uint32_t p) {
+ uint32_t d;
+
+ d = a - b;
+ d += p & -(d >> 31);
+ return d;
+}
+
+/*
+ * Halving modulo p.
+ */
+/* unused
+static inline uint32_t
+modp_half(uint32_t a, uint32_t p)
+{
+ a += p & -(a & 1);
+ return a >> 1;
+}
+*/
+
+/*
+ * Montgomery multiplication modulo p. The 'p0i' value is -1/p mod 2^31.
+ * It is required that p is an odd integer.
+ */
+static inline uint32_t
+modp_montymul(uint32_t a, uint32_t b, uint32_t p, uint32_t p0i) {
+ uint64_t z, w;
+ uint32_t d;
+
+ z = (uint64_t)a * (uint64_t)b;
+ w = ((z * p0i) & (uint64_t)0x7FFFFFFF) * p;
+ d = (uint32_t)((z + w) >> 31) - p;
+ d += p & -(d >> 31);
+ return d;
+}
+
+/*
+ * Compute R2 = 2^62 mod p.
+ */
+static uint32_t
+modp_R2(uint32_t p, uint32_t p0i) {
+ uint32_t z;
+
+ /*
+ * Compute z = 2^31 mod p (this is the value 1 in Montgomery
+ * representation), then double it with an addition.
+ */
+ z = modp_R(p);
+ z = modp_add(z, z, p);
+
+ /*
+ * Square it five times to obtain 2^32 in Montgomery representation
+ * (i.e. 2^63 mod p).
+ */
+ z = modp_montymul(z, z, p, p0i);
+ z = modp_montymul(z, z, p, p0i);
+ z = modp_montymul(z, z, p, p0i);
+ z = modp_montymul(z, z, p, p0i);
+ z = modp_montymul(z, z, p, p0i);
+
+ /*
+ * Halve the value mod p to get 2^62.
+ */
+ z = (z + (p & -(z & 1))) >> 1;
+ return z;
+}
+
+/*
+ * Compute 2^(31*x) modulo p. This works for integers x up to 2^11.
+ * p must be prime such that 2^30 < p < 2^31; p0i must be equal to
+ * -1/p mod 2^31; R2 must be equal to 2^62 mod p.
+ */
+static inline uint32_t
+modp_Rx(unsigned x, uint32_t p, uint32_t p0i, uint32_t R2) {
+ int i;
+ uint32_t r, z;
+
+ /*
+ * 2^(31*x) = (2^31)*(2^(31*(x-1))); i.e. we want the Montgomery
+ * representation of (2^31)^e mod p, where e = x-1.
+ * R2 is 2^31 in Montgomery representation.
+ */
+ x --;
+ r = R2;
+ z = modp_R(p);
+ for (i = 0; (1U << i) <= x; i ++) {
+ if ((x & (1U << i)) != 0) {
+ z = modp_montymul(z, r, p, p0i);
+ }
+ r = modp_montymul(r, r, p, p0i);
+ }
+ return z;
+}
+
+/*
+ * Division modulo p. If the divisor (b) is 0, then 0 is returned.
+ * This function computes proper results only when p is prime.
+ * Parameters:
+ * a dividend
+ * b divisor
+ * p odd prime modulus
+ * p0i -1/p mod 2^31
+ * R 2^31 mod R
+ */
+static uint32_t
+modp_div(uint32_t a, uint32_t b, uint32_t p, uint32_t p0i, uint32_t R) {
+ uint32_t z, e;
+ int i;
+
+ e = p - 2;
+ z = R;
+ for (i = 30; i >= 0; i --) {
+ uint32_t z2;
+
+ z = modp_montymul(z, z, p, p0i);
+ z2 = modp_montymul(z, b, p, p0i);
+ z ^= (z ^ z2) & -(uint32_t)((e >> i) & 1);
+ }
+
+ /*
+ * The loop above just assumed that b was in Montgomery
+ * representation, i.e. really contained b*R; under that
+ * assumption, it returns 1/b in Montgomery representation,
+ * which is R/b. But we gave it b in normal representation,
+ * so the loop really returned R/(b/R) = R^2/b.
+ *
+ * We want a/b, so we need one Montgomery multiplication with a,
+ * which also remove one of the R factors, and another such
+ * multiplication to remove the second R factor.
+ */
+ z = modp_montymul(z, 1, p, p0i);
+ return modp_montymul(a, z, p, p0i);
+}
+
+/*
+ * Bit-reversal index table.
+ */
+static const uint16_t REV10[] = {
+ 0, 512, 256, 768, 128, 640, 384, 896, 64, 576, 320, 832,
+ 192, 704, 448, 960, 32, 544, 288, 800, 160, 672, 416, 928,
+ 96, 608, 352, 864, 224, 736, 480, 992, 16, 528, 272, 784,
+ 144, 656, 400, 912, 80, 592, 336, 848, 208, 720, 464, 976,
+ 48, 560, 304, 816, 176, 688, 432, 944, 112, 624, 368, 880,
+ 240, 752, 496, 1008, 8, 520, 264, 776, 136, 648, 392, 904,
+ 72, 584, 328, 840, 200, 712, 456, 968, 40, 552, 296, 808,
+ 168, 680, 424, 936, 104, 616, 360, 872, 232, 744, 488, 1000,
+ 24, 536, 280, 792, 152, 664, 408, 920, 88, 600, 344, 856,
+ 216, 728, 472, 984, 56, 568, 312, 824, 184, 696, 440, 952,
+ 120, 632, 376, 888, 248, 760, 504, 1016, 4, 516, 260, 772,
+ 132, 644, 388, 900, 68, 580, 324, 836, 196, 708, 452, 964,
+ 36, 548, 292, 804, 164, 676, 420, 932, 100, 612, 356, 868,
+ 228, 740, 484, 996, 20, 532, 276, 788, 148, 660, 404, 916,
+ 84, 596, 340, 852, 212, 724, 468, 980, 52, 564, 308, 820,
+ 180, 692, 436, 948, 116, 628, 372, 884, 244, 756, 500, 1012,
+ 12, 524, 268, 780, 140, 652, 396, 908, 76, 588, 332, 844,
+ 204, 716, 460, 972, 44, 556, 300, 812, 172, 684, 428, 940,
+ 108, 620, 364, 876, 236, 748, 492, 1004, 28, 540, 284, 796,
+ 156, 668, 412, 924, 92, 604, 348, 860, 220, 732, 476, 988,
+ 60, 572, 316, 828, 188, 700, 444, 956, 124, 636, 380, 892,
+ 252, 764, 508, 1020, 2, 514, 258, 770, 130, 642, 386, 898,
+ 66, 578, 322, 834, 194, 706, 450, 962, 34, 546, 290, 802,
+ 162, 674, 418, 930, 98, 610, 354, 866, 226, 738, 482, 994,
+ 18, 530, 274, 786, 146, 658, 402, 914, 82, 594, 338, 850,
+ 210, 722, 466, 978, 50, 562, 306, 818, 178, 690, 434, 946,
+ 114, 626, 370, 882, 242, 754, 498, 1010, 10, 522, 266, 778,
+ 138, 650, 394, 906, 74, 586, 330, 842, 202, 714, 458, 970,
+ 42, 554, 298, 810, 170, 682, 426, 938, 106, 618, 362, 874,
+ 234, 746, 490, 1002, 26, 538, 282, 794, 154, 666, 410, 922,
+ 90, 602, 346, 858, 218, 730, 474, 986, 58, 570, 314, 826,
+ 186, 698, 442, 954, 122, 634, 378, 890, 250, 762, 506, 1018,
+ 6, 518, 262, 774, 134, 646, 390, 902, 70, 582, 326, 838,
+ 198, 710, 454, 966, 38, 550, 294, 806, 166, 678, 422, 934,
+ 102, 614, 358, 870, 230, 742, 486, 998, 22, 534, 278, 790,
+ 150, 662, 406, 918, 86, 598, 342, 854, 214, 726, 470, 982,
+ 54, 566, 310, 822, 182, 694, 438, 950, 118, 630, 374, 886,
+ 246, 758, 502, 1014, 14, 526, 270, 782, 142, 654, 398, 910,
+ 78, 590, 334, 846, 206, 718, 462, 974, 46, 558, 302, 814,
+ 174, 686, 430, 942, 110, 622, 366, 878, 238, 750, 494, 1006,
+ 30, 542, 286, 798, 158, 670, 414, 926, 94, 606, 350, 862,
+ 222, 734, 478, 990, 62, 574, 318, 830, 190, 702, 446, 958,
+ 126, 638, 382, 894, 254, 766, 510, 1022, 1, 513, 257, 769,
+ 129, 641, 385, 897, 65, 577, 321, 833, 193, 705, 449, 961,
+ 33, 545, 289, 801, 161, 673, 417, 929, 97, 609, 353, 865,
+ 225, 737, 481, 993, 17, 529, 273, 785, 145, 657, 401, 913,
+ 81, 593, 337, 849, 209, 721, 465, 977, 49, 561, 305, 817,
+ 177, 689, 433, 945, 113, 625, 369, 881, 241, 753, 497, 1009,
+ 9, 521, 265, 777, 137, 649, 393, 905, 73, 585, 329, 841,
+ 201, 713, 457, 969, 41, 553, 297, 809, 169, 681, 425, 937,
+ 105, 617, 361, 873, 233, 745, 489, 1001, 25, 537, 281, 793,
+ 153, 665, 409, 921, 89, 601, 345, 857, 217, 729, 473, 985,
+ 57, 569, 313, 825, 185, 697, 441, 953, 121, 633, 377, 889,
+ 249, 761, 505, 1017, 5, 517, 261, 773, 133, 645, 389, 901,
+ 69, 581, 325, 837, 197, 709, 453, 965, 37, 549, 293, 805,
+ 165, 677, 421, 933, 101, 613, 357, 869, 229, 741, 485, 997,
+ 21, 533, 277, 789, 149, 661, 405, 917, 85, 597, 341, 853,
+ 213, 725, 469, 981, 53, 565, 309, 821, 181, 693, 437, 949,
+ 117, 629, 373, 885, 245, 757, 501, 1013, 13, 525, 269, 781,
+ 141, 653, 397, 909, 77, 589, 333, 845, 205, 717, 461, 973,
+ 45, 557, 301, 813, 173, 685, 429, 941, 109, 621, 365, 877,
+ 237, 749, 493, 1005, 29, 541, 285, 797, 157, 669, 413, 925,
+ 93, 605, 349, 861, 221, 733, 477, 989, 61, 573, 317, 829,
+ 189, 701, 445, 957, 125, 637, 381, 893, 253, 765, 509, 1021,
+ 3, 515, 259, 771, 131, 643, 387, 899, 67, 579, 323, 835,
+ 195, 707, 451, 963, 35, 547, 291, 803, 163, 675, 419, 931,
+ 99, 611, 355, 867, 227, 739, 483, 995, 19, 531, 275, 787,
+ 147, 659, 403, 915, 83, 595, 339, 851, 211, 723, 467, 979,
+ 51, 563, 307, 819, 179, 691, 435, 947, 115, 627, 371, 883,
+ 243, 755, 499, 1011, 11, 523, 267, 779, 139, 651, 395, 907,
+ 75, 587, 331, 843, 203, 715, 459, 971, 43, 555, 299, 811,
+ 171, 683, 427, 939, 107, 619, 363, 875, 235, 747, 491, 1003,
+ 27, 539, 283, 795, 155, 667, 411, 923, 91, 603, 347, 859,
+ 219, 731, 475, 987, 59, 571, 315, 827, 187, 699, 443, 955,
+ 123, 635, 379, 891, 251, 763, 507, 1019, 7, 519, 263, 775,
+ 135, 647, 391, 903, 71, 583, 327, 839, 199, 711, 455, 967,
+ 39, 551, 295, 807, 167, 679, 423, 935, 103, 615, 359, 871,
+ 231, 743, 487, 999, 23, 535, 279, 791, 151, 663, 407, 919,
+ 87, 599, 343, 855, 215, 727, 471, 983, 55, 567, 311, 823,
+ 183, 695, 439, 951, 119, 631, 375, 887, 247, 759, 503, 1015,
+ 15, 527, 271, 783, 143, 655, 399, 911, 79, 591, 335, 847,
+ 207, 719, 463, 975, 47, 559, 303, 815, 175, 687, 431, 943,
+ 111, 623, 367, 879, 239, 751, 495, 1007, 31, 543, 287, 799,
+ 159, 671, 415, 927, 95, 607, 351, 863, 223, 735, 479, 991,
+ 63, 575, 319, 831, 191, 703, 447, 959, 127, 639, 383, 895,
+ 255, 767, 511, 1023
+};
+
+/*
+ * Compute the roots for NTT and inverse NTT (binary case). Input
+ * parameter g is a primitive 2048-th root of 1 modulo p (i.e. g^1024 =
+ * -1 mod p). This fills gm[] and igm[] with powers of g and 1/g:
+ * gm[rev(i)] = g^i mod p
+ * igm[rev(i)] = (1/g)^i mod p
+ * where rev() is the "bit reversal" function over 10 bits. It fills
+ * the arrays only up to N = 2^logn values.
+ *
+ * The values stored in gm[] and igm[] are in Montgomery representation.
+ *
+ * p must be a prime such that p = 1 mod 2048.
+ */
+static void
+modp_mkgm2(uint32_t *gm, uint32_t *igm, unsigned logn,
+ uint32_t g, uint32_t p, uint32_t p0i) {
+ size_t u, n;
+ unsigned k;
+ uint32_t ig, x1, x2, R2;
+
+ n = (size_t)1 << logn;
+
+ /*
+ * We want g such that g^(2N) = 1 mod p, but the provided
+ * generator has order 2048. We must square it a few times.
+ */
+ R2 = modp_R2(p, p0i);
+ g = modp_montymul(g, R2, p, p0i);
+ for (k = logn; k < 10; k ++) {
+ g = modp_montymul(g, g, p, p0i);
+ }
+
+ ig = modp_div(R2, g, p, p0i, modp_R(p));
+ k = 10 - logn;
+ x1 = x2 = modp_R(p);
+ for (u = 0; u < n; u ++) {
+ size_t v;
+
+ v = REV10[u << k];
+ gm[v] = x1;
+ igm[v] = x2;
+ x1 = modp_montymul(x1, g, p, p0i);
+ x2 = modp_montymul(x2, ig, p, p0i);
+ }
+}
+
+/*
+ * Compute the NTT over a polynomial (binary case). Polynomial elements
+ * are a[0], a[stride], a[2 * stride]...
+ */
+static void
+modp_NTT2_ext(uint32_t *a, size_t stride, const uint32_t *gm, unsigned logn,
+ uint32_t p, uint32_t p0i) {
+ size_t t, m, n;
+
+ if (logn == 0) {
+ return;
+ }
+ n = (size_t)1 << logn;
+ t = n;
+ for (m = 1; m < n; m <<= 1) {
+ size_t ht, u, v1;
+
+ ht = t >> 1;
+ for (u = 0, v1 = 0; u < m; u ++, v1 += t) {
+ uint32_t s;
+ size_t v;
+ uint32_t *r1, *r2;
+
+ s = gm[m + u];
+ r1 = a + v1 * stride;
+ r2 = r1 + ht * stride;
+ for (v = 0; v < ht; v ++, r1 += stride, r2 += stride) {
+ uint32_t x, y;
+
+ x = *r1;
+ y = modp_montymul(*r2, s, p, p0i);
+ *r1 = modp_add(x, y, p);
+ *r2 = modp_sub(x, y, p);
+ }
+ }
+ t = ht;
+ }
+}
+
+/*
+ * Compute the inverse NTT over a polynomial (binary case).
+ */
+static void
+modp_iNTT2_ext(uint32_t *a, size_t stride, const uint32_t *igm, unsigned logn,
+ uint32_t p, uint32_t p0i) {
+ size_t t, m, n, k;
+ uint32_t ni;
+ uint32_t *r;
+
+ if (logn == 0) {
+ return;
+ }
+ n = (size_t)1 << logn;
+ t = 1;
+ for (m = n; m > 1; m >>= 1) {
+ size_t hm, dt, u, v1;
+
+ hm = m >> 1;
+ dt = t << 1;
+ for (u = 0, v1 = 0; u < hm; u ++, v1 += dt) {
+ uint32_t s;
+ size_t v;
+ uint32_t *r1, *r2;
+
+ s = igm[hm + u];
+ r1 = a + v1 * stride;
+ r2 = r1 + t * stride;
+ for (v = 0; v < t; v ++, r1 += stride, r2 += stride) {
+ uint32_t x, y;
+
+ x = *r1;
+ y = *r2;
+ *r1 = modp_add(x, y, p);
+ *r2 = modp_montymul(
+ modp_sub(x, y, p), s, p, p0i);;
+ }
+ }
+ t = dt;
+ }
+
+ /*
+ * We need 1/n in Montgomery representation, i.e. R/n. Since
+ * 1 <= logn <= 10, R/n is an integer; morever, R/n <= 2^30 < p,
+ * thus a simple shift will do.
+ */
+ ni = (uint32_t)1 << (31 - logn);
+ for (k = 0, r = a; k < n; k ++, r += stride) {
+ *r = modp_montymul(*r, ni, p, p0i);
+ }
+}
+
+/*
+ * Simplified macros for NTT and iNTT (binary case) when the elements
+ * are consecutive in RAM.
+ */
+#define modp_NTT2(a, gm, logn, p, p0i) modp_NTT2_ext(a, 1, gm, logn, p, p0i)
+#define modp_iNTT2(a, igm, logn, p, p0i) modp_iNTT2_ext(a, 1, igm, logn, p, p0i)
+
+/*
+ * Given polynomial f in NTT representation modulo p, compute f' of degree
+ * less than N/2 such that f' = f0^2 - X*f1^2, where f0 and f1 are
+ * polynomials of degree less than N/2 such that f = f0(X^2) + X*f1(X^2).
+ *
+ * The new polynomial is written "in place" over the first N/2 elements
+ * of f.
+ *
+ * If applied logn times successively on a given polynomial, the resulting
+ * degree-0 polynomial is the resultant of f and X^N+1 modulo p.
+ *
+ * This function applies only to the binary case; it is invoked from
+ * solve_NTRU_binary_depth1().
+ */
+static void
+modp_poly_rec_res(uint32_t *f, unsigned logn,
+ uint32_t p, uint32_t p0i, uint32_t R2) {
+ size_t hn, u;
+
+ hn = (size_t)1 << (logn - 1);
+ for (u = 0; u < hn; u ++) {
+ uint32_t w0, w1;
+
+ w0 = f[(u << 1) + 0];
+ w1 = f[(u << 1) + 1];
+ f[u] = modp_montymul(modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+}
+
+/* ==================================================================== */
+/*
+ * Custom bignum implementation.
+ *
+ * This is a very reduced set of functionalities. We need to do the
+ * following operations:
+ *
+ * - Rebuild the resultant and the polynomial coefficients from their
+ * values modulo small primes (of length 31 bits each).
+ *
+ * - Compute an extended GCD between the two computed resultants.
+ *
+ * - Extract top bits and add scaled values during the successive steps
+ * of Babai rounding.
+ *
+ * When rebuilding values using CRT, we must also recompute the product
+ * of the small prime factors. We always do it one small factor at a
+ * time, so the "complicated" operations can be done modulo the small
+ * prime with the modp_* functions. CRT coefficients (inverses) are
+ * precomputed.
+ *
+ * All values are positive until the last step: when the polynomial
+ * coefficients have been rebuilt, we normalize them around 0. But then,
+ * only additions and subtractions on the upper few bits are needed
+ * afterwards.
+ *
+ * We keep big integers as arrays of 31-bit words (in uint32_t values);
+ * the top bit of each uint32_t is kept equal to 0. Using 31-bit words
+ * makes it easier to keep track of carries. When negative values are
+ * used, two's complement is used.
+ */
+
+/*
+ * Subtract integer b from integer a. Both integers are supposed to have
+ * the same size. The carry (0 or 1) is returned. Source arrays a and b
+ * MUST be distinct.
+ *
+ * The operation is performed as described above if ctr = 1. If
+ * ctl = 0, the value a[] is unmodified, but all memory accesses are
+ * still performed, and the carry is computed and returned.
+ */
+static uint32_t
+zint_sub(uint32_t *a, const uint32_t *b, size_t len,
+ uint32_t ctl) {
+ size_t u;
+ uint32_t cc, m;
+
+ cc = 0;
+ m = -ctl;
+ for (u = 0; u < len; u ++) {
+ uint32_t aw, w;
+
+ aw = a[u];
+ w = aw - b[u] - cc;
+ cc = w >> 31;
+ aw ^= ((w & 0x7FFFFFFF) ^ aw) & m;
+ a[u] = aw;
+ }
+ return cc;
+}
+
+/*
+ * Mutiply the provided big integer m with a small value x.
+ * This function assumes that x < 2^31. The carry word is returned.
+ */
+static uint32_t
+zint_mul_small(uint32_t *m, size_t mlen, uint32_t x) {
+ size_t u;
+ uint32_t cc;
+
+ cc = 0;
+ for (u = 0; u < mlen; u ++) {
+ uint64_t z;
+
+ z = (uint64_t)m[u] * (uint64_t)x + cc;
+ m[u] = (uint32_t)z & 0x7FFFFFFF;
+ cc = (uint32_t)(z >> 31);
+ }
+ return cc;
+}
+
+/*
+ * Reduce a big integer d modulo a small integer p.
+ * Rules:
+ * d is unsigned
+ * p is prime
+ * 2^30 < p < 2^31
+ * p0i = -(1/p) mod 2^31
+ * R2 = 2^62 mod p
+ */
+static uint32_t
+zint_mod_small_unsigned(const uint32_t *d, size_t dlen,
+ uint32_t p, uint32_t p0i, uint32_t R2) {
+ uint32_t x;
+ size_t u;
+
+ /*
+ * Algorithm: we inject words one by one, starting with the high
+ * word. Each step is:
+ * - multiply x by 2^31
+ * - add new word
+ */
+ x = 0;
+ u = dlen;
+ while (u -- > 0) {
+ uint32_t w;
+
+ x = modp_montymul(x, R2, p, p0i);
+ w = d[u] - p;
+ w += p & -(w >> 31);
+ x = modp_add(x, w, p);
+ }
+ return x;
+}
+
+/*
+ * Similar to zint_mod_small_unsigned(), except that d may be signed.
+ * Extra parameter is Rx = 2^(31*dlen) mod p.
+ */
+static uint32_t
+zint_mod_small_signed(const uint32_t *d, size_t dlen,
+ uint32_t p, uint32_t p0i, uint32_t R2, uint32_t Rx) {
+ uint32_t z;
+
+ if (dlen == 0) {
+ return 0;
+ }
+ z = zint_mod_small_unsigned(d, dlen, p, p0i, R2);
+ z = modp_sub(z, Rx & -(d[dlen - 1] >> 30), p);
+ return z;
+}
+
+/*
+ * Add y*s to x. x and y initially have length 'len' words; the new x
+ * has length 'len+1' words. 's' must fit on 31 bits. x[] and y[] must
+ * not overlap.
+ */
+static void
+zint_add_mul_small(uint32_t *x,
+ const uint32_t *y, size_t len, uint32_t s) {
+ size_t u;
+ uint32_t cc;
+
+ cc = 0;
+ for (u = 0; u < len; u ++) {
+ uint32_t xw, yw;
+ uint64_t z;
+
+ xw = x[u];
+ yw = y[u];
+ z = (uint64_t)yw * (uint64_t)s + (uint64_t)xw + (uint64_t)cc;
+ x[u] = (uint32_t)z & 0x7FFFFFFF;
+ cc = (uint32_t)(z >> 31);
+ }
+ x[len] = cc;
+}
+
+/*
+ * Normalize a modular integer around 0: if x > p/2, then x is replaced
+ * with x - p (signed encoding with two's complement); otherwise, x is
+ * untouched. The two integers x and p are encoded over the same length.
+ */
+static void
+zint_norm_zero(uint32_t *x, const uint32_t *p, size_t len) {
+ size_t u;
+ uint32_t r, bb;
+
+ /*
+ * Compare x with p/2. We use the shifted version of p, and p
+ * is odd, so we really compare with (p-1)/2; we want to perform
+ * the subtraction if and only if x > (p-1)/2.
+ */
+ r = 0;
+ bb = 0;
+ u = len;
+ while (u -- > 0) {
+ uint32_t wx, wp, cc;
+
+ /*
+ * Get the two words to compare in wx and wp (both over
+ * 31 bits exactly).
+ */
+ wx = x[u];
+ wp = (p[u] >> 1) | (bb << 30);
+ bb = p[u] & 1;
+
+ /*
+ * We set cc to -1, 0 or 1, depending on whether wp is
+ * lower than, equal to, or greater than wx.
+ */
+ cc = wp - wx;
+ cc = ((-cc) >> 31) | -(cc >> 31);
+
+ /*
+ * If r != 0 then it is either 1 or -1, and we keep its
+ * value. Otherwise, if r = 0, then we replace it with cc.
+ */
+ r |= cc & ((r & 1) - 1);
+ }
+
+ /*
+ * At this point, r = -1, 0 or 1, depending on whether (p-1)/2
+ * is lower than, equal to, or greater than x. We thus want to
+ * do the subtraction only if r = -1.
+ */
+ zint_sub(x, p, len, r >> 31);
+}
+
+/*
+ * Rebuild integers from their RNS representation. There are 'num'
+ * integers, and each consists in 'xlen' words. 'xx' points at that
+ * first word of the first integer; subsequent integers are accessed
+ * by adding 'xstride' repeatedly.
+ *
+ * The words of an integer are the RNS representation of that integer,
+ * using the provided 'primes' are moduli. This function replaces
+ * each integer with its multi-word value (little-endian order).
+ *
+ * If "normalize_signed" is non-zero, then the returned value is
+ * normalized to the -m/2..m/2 interval (where m is the product of all
+ * small prime moduli); two's complement is used for negative values.
+ */
+static void
+zint_rebuild_CRT(uint32_t *xx, size_t xlen, size_t xstride,
+ size_t num, const small_prime *primes, int normalize_signed,
+ uint32_t *tmp) {
+ size_t u;
+ uint32_t *x;
+
+ tmp[0] = primes[0].p;
+ for (u = 1; u < xlen; u ++) {
+ /*
+ * At the entry of each loop iteration:
+ * - the first u words of each array have been
+ * reassembled;
+ * - the first u words of tmp[] contains the
+ * product of the prime moduli processed so far.
+ *
+ * We call 'q' the product of all previous primes.
+ */
+ uint32_t p, p0i, s, R2;
+ size_t v;
+
+ p = primes[u].p;
+ s = primes[u].s;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+
+ for (v = 0, x = xx; v < num; v ++, x += xstride) {
+ uint32_t xp, xq, xr;
+ /*
+ * xp = the integer x modulo the prime p for this
+ * iteration
+ * xq = (x mod q) mod p
+ */
+ xp = x[u];
+ xq = zint_mod_small_unsigned(x, u, p, p0i, R2);
+
+ /*
+ * New value is (x mod q) + q * (s * (xp - xq) mod p)
+ */
+ xr = modp_montymul(s, modp_sub(xp, xq, p), p, p0i);
+ zint_add_mul_small(x, tmp, u, xr);
+ }
+
+ /*
+ * Update product of primes in tmp[].
+ */
+ tmp[u] = zint_mul_small(tmp, u, p);
+ }
+
+ /*
+ * Normalize the reconstructed values around 0.
+ */
+ if (normalize_signed) {
+ for (u = 0, x = xx; u < num; u ++, x += xstride) {
+ zint_norm_zero(x, tmp, xlen);
+ }
+ }
+}
+
+/*
+ * Negate a big integer conditionally: value a is replaced with -a if
+ * and only if ctl = 1. Control value ctl must be 0 or 1.
+ */
+static void
+zint_negate(uint32_t *a, size_t len, uint32_t ctl) {
+ size_t u;
+ uint32_t cc, m;
+
+ /*
+ * If ctl = 1 then we flip the bits of a by XORing with
+ * 0x7FFFFFFF, and we add 1 to the value. If ctl = 0 then we XOR
+ * with 0 and add 0, which leaves the value unchanged.
+ */
+ cc = ctl;
+ m = -ctl >> 1;
+ for (u = 0; u < len; u ++) {
+ uint32_t aw;
+
+ aw = a[u];
+ aw = (aw ^ m) + cc;
+ a[u] = aw & 0x7FFFFFFF;
+ cc = aw >> 31;
+ }
+}
+
+/*
+ * Replace a with (a*xa+b*xb)/(2^31) and b with (a*ya+b*yb)/(2^31).
+ * The low bits are dropped (the caller should compute the coefficients
+ * such that these dropped bits are all zeros). If either or both
+ * yields a negative value, then the value is negated.
+ *
+ * Returned value is:
+ * 0 both values were positive
+ * 1 new a had to be negated
+ * 2 new b had to be negated
+ * 3 both new a and new b had to be negated
+ *
+ * Coefficients xa, xb, ya and yb may use the full signed 32-bit range.
+ */
+static uint32_t
+zint_co_reduce(uint32_t *a, uint32_t *b, size_t len,
+ int64_t xa, int64_t xb, int64_t ya, int64_t yb) {
+ size_t u;
+ int64_t cca, ccb;
+ uint32_t nega, negb;
+
+ cca = 0;
+ ccb = 0;
+ for (u = 0; u < len; u ++) {
+ uint32_t wa, wb;
+ uint64_t za, zb;
+
+ wa = a[u];
+ wb = b[u];
+ za = wa * (uint64_t)xa + wb * (uint64_t)xb + (uint64_t)cca;
+ zb = wa * (uint64_t)ya + wb * (uint64_t)yb + (uint64_t)ccb;
+ if (u > 0) {
+ a[u - 1] = (uint32_t)za & 0x7FFFFFFF;
+ b[u - 1] = (uint32_t)zb & 0x7FFFFFFF;
+ }
+ cca = *(int64_t *)&za >> 31;
+ ccb = *(int64_t *)&zb >> 31;
+ }
+ a[len - 1] = (uint32_t)cca;
+ b[len - 1] = (uint32_t)ccb;
+
+ nega = (uint32_t)((uint64_t)cca >> 63);
+ negb = (uint32_t)((uint64_t)ccb >> 63);
+ zint_negate(a, len, nega);
+ zint_negate(b, len, negb);
+ return nega | (negb << 1);
+}
+
+/*
+ * Finish modular reduction. Rules on input parameters:
+ *
+ * if neg = 1, then -m <= a < 0
+ * if neg = 0, then 0 <= a < 2*m
+ *
+ * If neg = 0, then the top word of a[] is allowed to use 32 bits.
+ *
+ * Modulus m must be odd.
+ */
+static void
+zint_finish_mod(uint32_t *a, size_t len, const uint32_t *m, uint32_t neg) {
+ size_t u;
+ uint32_t cc, xm, ym;
+
+ /*
+ * First pass: compare a (assumed nonnegative) with m. Note that
+ * if the top word uses 32 bits, subtracting m must yield a
+ * value less than 2^31 since a < 2*m.
+ */
+ cc = 0;
+ for (u = 0; u < len; u ++) {
+ cc = (a[u] - m[u] - cc) >> 31;
+ }
+
+ /*
+ * If neg = 1 then we must add m (regardless of cc)
+ * If neg = 0 and cc = 0 then we must subtract m
+ * If neg = 0 and cc = 1 then we must do nothing
+ *
+ * In the loop below, we conditionally subtract either m or -m
+ * from a. Word xm is a word of m (if neg = 0) or -m (if neg = 1);
+ * but if neg = 0 and cc = 1, then ym = 0 and it forces mw to 0.
+ */
+ xm = -neg >> 1;
+ ym = -(neg | (1 - cc));
+ cc = neg;
+ for (u = 0; u < len; u ++) {
+ uint32_t aw, mw;
+
+ aw = a[u];
+ mw = (m[u] ^ xm) & ym;
+ aw = aw - mw - cc;
+ a[u] = aw & 0x7FFFFFFF;
+ cc = aw >> 31;
+ }
+}
+
+/*
+ * Replace a with (a*xa+b*xb)/(2^31) mod m, and b with
+ * (a*ya+b*yb)/(2^31) mod m. Modulus m must be odd; m0i = -1/m[0] mod 2^31.
+ */
+static void
+zint_co_reduce_mod(uint32_t *a, uint32_t *b, const uint32_t *m, size_t len,
+ uint32_t m0i, int64_t xa, int64_t xb, int64_t ya, int64_t yb) {
+ size_t u;
+ int64_t cca, ccb;
+ uint32_t fa, fb;
+
+ /*
+ * These are actually four combined Montgomery multiplications.
+ */
+ cca = 0;
+ ccb = 0;
+ fa = ((a[0] * (uint32_t)xa + b[0] * (uint32_t)xb) * m0i) & 0x7FFFFFFF;
+ fb = ((a[0] * (uint32_t)ya + b[0] * (uint32_t)yb) * m0i) & 0x7FFFFFFF;
+ for (u = 0; u < len; u ++) {
+ uint32_t wa, wb;
+ uint64_t za, zb;
+
+ wa = a[u];
+ wb = b[u];
+ za = wa * (uint64_t)xa + wb * (uint64_t)xb
+ + m[u] * (uint64_t)fa + (uint64_t)cca;
+ zb = wa * (uint64_t)ya + wb * (uint64_t)yb
+ + m[u] * (uint64_t)fb + (uint64_t)ccb;
+ if (u > 0) {
+ a[u - 1] = (uint32_t)za & 0x7FFFFFFF;
+ b[u - 1] = (uint32_t)zb & 0x7FFFFFFF;
+ }
+ cca = *(int64_t *)&za >> 31;
+ ccb = *(int64_t *)&zb >> 31;
+ }
+ a[len - 1] = (uint32_t)cca;
+ b[len - 1] = (uint32_t)ccb;
+
+ /*
+ * At this point:
+ * -m <= a < 2*m
+ * -m <= b < 2*m
+ * (this is a case of Montgomery reduction)
+ * The top words of 'a' and 'b' may have a 32-th bit set.
+ * We want to add or subtract the modulus, as required.
+ */
+ zint_finish_mod(a, len, m, (uint32_t)((uint64_t)cca >> 63));
+ zint_finish_mod(b, len, m, (uint32_t)((uint64_t)ccb >> 63));
+}
+
+/*
+ * Compute a GCD between two positive big integers x and y. The two
+ * integers must be odd. Returned value is 1 if the GCD is 1, 0
+ * otherwise. When 1 is returned, arrays u and v are filled with values
+ * such that:
+ * 0 <= u <= y
+ * 0 <= v <= x
+ * x*u - y*v = 1
+ * x[] and y[] are unmodified. Both input values must have the same
+ * encoded length. Temporary array must be large enough to accommodate 4
+ * extra values of that length. Arrays u, v and tmp may not overlap with
+ * each other, or with either x or y.
+ */
+static int
+zint_bezout(uint32_t *u, uint32_t *v,
+ const uint32_t *x, const uint32_t *y,
+ size_t len, uint32_t *tmp) {
+ /*
+ * Algorithm is an extended binary GCD. We maintain 6 values
+ * a, b, u0, u1, v0 and v1 with the following invariants:
+ *
+ * a = x*u0 - y*v0
+ * b = x*u1 - y*v1
+ * 0 <= a <= x
+ * 0 <= b <= y
+ * 0 <= u0 < y
+ * 0 <= v0 < x
+ * 0 <= u1 <= y
+ * 0 <= v1 < x
+ *
+ * Initial values are:
+ *
+ * a = x u0 = 1 v0 = 0
+ * b = y u1 = y v1 = x-1
+ *
+ * Each iteration reduces either a or b, and maintains the
+ * invariants. Algorithm stops when a = b, at which point their
+ * common value is GCD(a,b) and (u0,v0) (or (u1,v1)) contains
+ * the values (u,v) we want to return.
+ *
+ * The formal definition of the algorithm is a sequence of steps:
+ *
+ * - If a is even, then:
+ * a <- a/2
+ * u0 <- u0/2 mod y
+ * v0 <- v0/2 mod x
+ *
+ * - Otherwise, if b is even, then:
+ * b <- b/2
+ * u1 <- u1/2 mod y
+ * v1 <- v1/2 mod x
+ *
+ * - Otherwise, if a > b, then:
+ * a <- (a-b)/2
+ * u0 <- (u0-u1)/2 mod y
+ * v0 <- (v0-v1)/2 mod x
+ *
+ * - Otherwise:
+ * b <- (b-a)/2
+ * u1 <- (u1-u0)/2 mod y
+ * v1 <- (v1-v0)/2 mod y
+ *
+ * We can show that the operations above preserve the invariants:
+ *
+ * - If a is even, then u0 and v0 are either both even or both
+ * odd (since a = x*u0 - y*v0, and x and y are both odd).
+ * If u0 and v0 are both even, then (u0,v0) <- (u0/2,v0/2).
+ * Otherwise, (u0,v0) <- ((u0+y)/2,(v0+x)/2). Either way,
+ * the a = x*u0 - y*v0 invariant is preserved.
+ *
+ * - The same holds for the case where b is even.
+ *
+ * - If a and b are odd, and a > b, then:
+ *
+ * a-b = x*(u0-u1) - y*(v0-v1)
+ *
+ * In that situation, if u0 < u1, then x*(u0-u1) < 0, but
+ * a-b > 0; therefore, it must be that v0 < v1, and the
+ * first part of the update is: (u0,v0) <- (u0-u1+y,v0-v1+x),
+ * which preserves the invariants. Otherwise, if u0 > u1,
+ * then u0-u1 >= 1, thus x*(u0-u1) >= x. But a <= x and
+ * b >= 0, hence a-b <= x. It follows that, in that case,
+ * v0-v1 >= 0. The first part of the update is then:
+ * (u0,v0) <- (u0-u1,v0-v1), which again preserves the
+ * invariants.
+ *
+ * Either way, once the subtraction is done, the new value of
+ * a, which is the difference of two odd values, is even,
+ * and the remaining of this step is a subcase of the
+ * first algorithm case (i.e. when a is even).
+ *
+ * - If a and b are odd, and b > a, then the a similar
+ * argument holds.
+ *
+ * The values a and b start at x and y, respectively. Since x
+ * and y are odd, their GCD is odd, and it is easily seen that
+ * all steps conserve the GCD (GCD(a-b,b) = GCD(a, b);
+ * GCD(a/2,b) = GCD(a,b) if GCD(a,b) is odd). Moreover, either a
+ * or b is reduced by at least one bit at each iteration, so
+ * the algorithm necessarily converges on the case a = b, at
+ * which point the common value is the GCD.
+ *
+ * In the algorithm expressed above, when a = b, the fourth case
+ * applies, and sets b = 0. Since a contains the GCD of x and y,
+ * which are both odd, a must be odd, and subsequent iterations
+ * (if any) will simply divide b by 2 repeatedly, which has no
+ * consequence. Thus, the algorithm can run for more iterations
+ * than necessary; the final GCD will be in a, and the (u,v)
+ * coefficients will be (u0,v0).
+ *
+ *
+ * The presentation above is bit-by-bit. It can be sped up by
+ * noticing that all decisions are taken based on the low bits
+ * and high bits of a and b. We can extract the two top words
+ * and low word of each of a and b, and compute reduction
+ * parameters pa, pb, qa and qb such that the new values for
+ * a and b are:
+ * a' = (a*pa + b*pb) / (2^31)
+ * b' = (a*qa + b*qb) / (2^31)
+ * the two divisions being exact. The coefficients are obtained
+ * just from the extracted words, and may be slightly off, requiring
+ * an optional correction: if a' < 0, then we replace pa with -pa
+ * and pb with -pb. Each such step will reduce the total length
+ * (sum of lengths of a and b) by at least 30 bits at each
+ * iteration.
+ */
+ uint32_t *u0, *u1, *v0, *v1, *a, *b;
+ uint32_t x0i, y0i;
+ uint32_t num, rc;
+ size_t j;
+
+ if (len == 0) {
+ return 0;
+ }
+
+ /*
+ * u0 and v0 are the u and v result buffers; the four other
+ * values (u1, v1, a and b) are taken from tmp[].
+ */
+ u0 = u;
+ v0 = v;
+ u1 = tmp;
+ v1 = u1 + len;
+ a = v1 + len;
+ b = a + len;
+
+ /*
+ * We'll need the Montgomery reduction coefficients.
+ */
+ x0i = modp_ninv31(x[0]);
+ y0i = modp_ninv31(y[0]);
+
+ /*
+ * Initialize a, b, u0, u1, v0 and v1.
+ * a = x u0 = 1 v0 = 0
+ * b = y u1 = y v1 = x-1
+ * Note that x is odd, so computing x-1 is easy.
+ */
+ memcpy(a, x, len * sizeof * x);
+ memcpy(b, y, len * sizeof * y);
+ u0[0] = 1;
+ memset(u0 + 1, 0, (len - 1) * sizeof * u0);
+ memset(v0, 0, len * sizeof * v0);
+ memcpy(u1, y, len * sizeof * u1);
+ memcpy(v1, x, len * sizeof * v1);
+ v1[0] --;
+
+ /*
+ * Each input operand may be as large as 31*len bits, and we
+ * reduce the total length by at least 30 bits at each iteration.
+ */
+ for (num = 62 * (uint32_t)len + 30; num >= 30; num -= 30) {
+ uint32_t c0, c1;
+ uint32_t a0, a1, b0, b1;
+ uint64_t a_hi, b_hi;
+ uint32_t a_lo, b_lo;
+ int64_t pa, pb, qa, qb;
+ int i;
+ uint32_t r;
+
+ /*
+ * Extract the top words of a and b. If j is the highest
+ * index >= 1 such that a[j] != 0 or b[j] != 0, then we
+ * want (a[j] << 31) + a[j-1] and (b[j] << 31) + b[j-1].
+ * If a and b are down to one word each, then we use
+ * a[0] and b[0].
+ */
+ c0 = (uint32_t) -1;
+ c1 = (uint32_t) -1;
+ a0 = 0;
+ a1 = 0;
+ b0 = 0;
+ b1 = 0;
+ j = len;
+ while (j -- > 0) {
+ uint32_t aw, bw;
+
+ aw = a[j];
+ bw = b[j];
+ a0 ^= (a0 ^ aw) & c0;
+ a1 ^= (a1 ^ aw) & c1;
+ b0 ^= (b0 ^ bw) & c0;
+ b1 ^= (b1 ^ bw) & c1;
+ c1 = c0;
+ c0 &= (((aw | bw) + 0x7FFFFFFF) >> 31) - (uint32_t)1;
+ }
+
+ /*
+ * If c1 = 0, then we grabbed two words for a and b.
+ * If c1 != 0 but c0 = 0, then we grabbed one word. It
+ * is not possible that c1 != 0 and c0 != 0, because that
+ * would mean that both integers are zero.
+ */
+ a1 |= a0 & c1;
+ a0 &= ~c1;
+ b1 |= b0 & c1;
+ b0 &= ~c1;
+ a_hi = ((uint64_t)a0 << 31) + a1;
+ b_hi = ((uint64_t)b0 << 31) + b1;
+ a_lo = a[0];
+ b_lo = b[0];
+
+ /*
+ * Compute reduction factors:
+ *
+ * a' = a*pa + b*pb
+ * b' = a*qa + b*qb
+ *
+ * such that a' and b' are both multiple of 2^31, but are
+ * only marginally larger than a and b.
+ */
+ pa = 1;
+ pb = 0;
+ qa = 0;
+ qb = 1;
+ for (i = 0; i < 31; i ++) {
+ /*
+ * At each iteration:
+ *
+ * a <- (a-b)/2 if: a is odd, b is odd, a_hi > b_hi
+ * b <- (b-a)/2 if: a is odd, b is odd, a_hi <= b_hi
+ * a <- a/2 if: a is even
+ * b <- b/2 if: a is odd, b is even
+ *
+ * We multiply a_lo and b_lo by 2 at each
+ * iteration, thus a division by 2 really is a
+ * non-multiplication by 2.
+ */
+ uint32_t rt, oa, ob, cAB, cBA, cA;
+ uint64_t rz;
+
+ /*
+ * rt = 1 if a_hi > b_hi, 0 otherwise.
+ */
+ rz = b_hi - a_hi;
+ rt = (uint32_t)((rz ^ ((a_hi ^ b_hi)
+ & (a_hi ^ rz))) >> 63);
+
+ /*
+ * cAB = 1 if b must be subtracted from a
+ * cBA = 1 if a must be subtracted from b
+ * cA = 1 if a must be divided by 2
+ *
+ * Rules:
+ *
+ * cAB and cBA cannot both be 1.
+ * If a is not divided by 2, b is.
+ */
+ oa = (a_lo >> i) & 1;
+ ob = (b_lo >> i) & 1;
+ cAB = oa & ob & rt;
+ cBA = oa & ob & ~rt;
+ cA = cAB | (oa ^ 1);
+
+ /*
+ * Conditional subtractions.
+ */
+ a_lo -= b_lo & -cAB;
+ a_hi -= b_hi & -(uint64_t)cAB;
+ pa -= qa & -(int64_t)cAB;
+ pb -= qb & -(int64_t)cAB;
+ b_lo -= a_lo & -cBA;
+ b_hi -= a_hi & -(uint64_t)cBA;
+ qa -= pa & -(int64_t)cBA;
+ qb -= pb & -(int64_t)cBA;
+
+ /*
+ * Shifting.
+ */
+ a_lo += a_lo & (cA - 1);
+ pa += pa & ((int64_t)cA - 1);
+ pb += pb & ((int64_t)cA - 1);
+ a_hi ^= (a_hi ^ (a_hi >> 1)) & -(uint64_t)cA;
+ b_lo += b_lo & -cA;
+ qa += qa & -(int64_t)cA;
+ qb += qb & -(int64_t)cA;
+ b_hi ^= (b_hi ^ (b_hi >> 1)) & ((uint64_t)cA - 1);
+ }
+
+ /*
+ * Apply the computed parameters to our values. We
+ * may have to correct pa and pb depending on the
+ * returned value of zint_co_reduce() (when a and/or b
+ * had to be negated).
+ */
+ r = zint_co_reduce(a, b, len, pa, pb, qa, qb);
+ pa -= (pa + pa) & -(int64_t)(r & 1);
+ pb -= (pb + pb) & -(int64_t)(r & 1);
+ qa -= (qa + qa) & -(int64_t)(r >> 1);
+ qb -= (qb + qb) & -(int64_t)(r >> 1);
+ zint_co_reduce_mod(u0, u1, y, len, y0i, pa, pb, qa, qb);
+ zint_co_reduce_mod(v0, v1, x, len, x0i, pa, pb, qa, qb);
+ }
+
+ /*
+ * At that point, array a[] should contain the GCD, and the
+ * results (u,v) should already be set. We check that the GCD
+ * is indeed 1. We also check that the two operands x and y
+ * are odd.
+ */
+ rc = a[0] ^ 1;
+ for (j = 1; j < len; j ++) {
+ rc |= a[j];
+ }
+ return (int)((1 - ((rc | -rc) >> 31)) & x[0] & y[0]);
+}
+
+/*
+ * Add k*y*2^sc to x. The result is assumed to fit in the array of
+ * size xlen (truncation is applied if necessary).
+ * Scale factor 'sc' is provided as sch and scl, such that:
+ * sch = sc / 31
+ * scl = sc % 31
+ * xlen MUST NOT be lower than ylen.
+ *
+ * x[] and y[] are both signed integers, using two's complement for
+ * negative values.
+ */
+static void
+zint_add_scaled_mul_small(uint32_t *x, size_t xlen,
+ const uint32_t *y, size_t ylen, int32_t k,
+ uint32_t sch, uint32_t scl) {
+ size_t u;
+ uint32_t ysign, tw;
+ int32_t cc;
+
+ if (ylen == 0) {
+ return;
+ }
+
+ ysign = -(y[ylen - 1] >> 30) >> 1;
+ tw = 0;
+ cc = 0;
+ for (u = sch; u < xlen; u ++) {
+ size_t v;
+ uint32_t wy, wys, ccu;
+ uint64_t z;
+
+ /*
+ * Get the next word of y (scaled).
+ */
+ v = u - sch;
+ if (v < ylen) {
+ wy = y[v];
+ } else {
+ wy = ysign;
+ }
+ wys = ((wy << scl) & 0x7FFFFFFF) | tw;
+ tw = wy >> (31 - scl);
+
+ /*
+ * The expression below does not overflow.
+ */
+ z = (uint64_t)((int64_t)wys * (int64_t)k + (int64_t)x[u] + cc);
+ x[u] = (uint32_t)z & 0x7FFFFFFF;
+
+ /*
+ * Right-shifting the signed value z would yield
+ * implementation-defined results (arithmetic shift is
+ * not guaranteed). However, we can cast to unsigned,
+ * and get the next carry as an unsigned word. We can
+ * then convert it back to signed by using the guaranteed
+ * fact that 'int32_t' uses two's complement with no
+ * trap representation or padding bit, and with a layout
+ * compatible with that of 'uint32_t'.
+ */
+ ccu = (uint32_t)(z >> 31);
+ cc = *(int32_t *)&ccu;
+ }
+}
+
+/*
+ * Subtract y*2^sc from x. The result is assumed to fit in the array of
+ * size xlen (truncation is applied if necessary).
+ * Scale factor 'sc' is provided as sch and scl, such that:
+ * sch = sc / 31
+ * scl = sc % 31
+ * xlen MUST NOT be lower than ylen.
+ *
+ * x[] and y[] are both signed integers, using two's complement for
+ * negative values.
+ */
+static void
+zint_sub_scaled(uint32_t *x, size_t xlen,
+ const uint32_t *y, size_t ylen, uint32_t sch, uint32_t scl) {
+ size_t u;
+ uint32_t ysign, tw;
+ uint32_t cc;
+
+ if (ylen == 0) {
+ return;
+ }
+
+ ysign = -(y[ylen - 1] >> 30) >> 1;
+ tw = 0;
+ cc = 0;
+ for (u = sch; u < xlen; u ++) {
+ size_t v;
+ uint32_t w, wy, wys;
+
+ /*
+ * Get the next word of y (scaled).
+ */
+ v = u - sch;
+ if (v < ylen) {
+ wy = y[v];
+ } else {
+ wy = ysign;
+ }
+ wys = ((wy << scl) & 0x7FFFFFFF) | tw;
+ tw = wy >> (31 - scl);
+
+ w = x[u] - wys - cc;
+ x[u] = w & 0x7FFFFFFF;
+ cc = w >> 31;
+ }
+}
+
+/*
+ * Convert a one-word signed big integer into a signed value.
+ */
+static inline int32_t
+zint_one_to_plain(const uint32_t *x) {
+ uint32_t w;
+
+ w = x[0];
+ w |= (w & 0x40000000) << 1;
+ return *(int32_t *)&w;
+}
+
+/* ==================================================================== */
+
+/*
+ * Convert a polynomial to floating-point values.
+ *
+ * Each coefficient has length flen words, and starts fstride words after
+ * the previous.
+ *
+ * IEEE-754 binary64 values can represent values in a finite range,
+ * roughly 2^(-1023) to 2^(+1023); thus, if coefficients are too large,
+ * they should be "trimmed" by pointing not to the lowest word of each,
+ * but upper.
+ */
+static void
+poly_big_to_fp(fpr *d, const uint32_t *f, size_t flen, size_t fstride,
+ unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ if (flen == 0) {
+ for (u = 0; u < n; u ++) {
+ d[u] = fpr_zero;
+ }
+ return;
+ }
+ for (u = 0; u < n; u ++, f += fstride) {
+ size_t v;
+ uint32_t neg, cc, xm;
+ fpr x, fsc;
+
+ /*
+ * Get sign of the integer; if it is negative, then we
+ * will load its absolute value instead, and negate the
+ * result.
+ */
+ neg = -(f[flen - 1] >> 30);
+ xm = neg >> 1;
+ cc = neg & 1;
+ x = fpr_zero;
+ fsc = fpr_one;
+ for (v = 0; v < flen; v ++, fsc = fpr_mul(fsc, fpr_ptwo31)) {
+ uint32_t w;
+
+ w = (f[v] ^ xm) + cc;
+ cc = w >> 31;
+ w &= 0x7FFFFFFF;
+ w -= (w << 1) & neg;
+ x = fpr_add(x, fpr_mul(fpr_of(*(int32_t *)&w), fsc));
+ }
+ d[u] = x;
+ }
+}
+
+/*
+ * Convert a polynomial to small integers. Source values are supposed
+ * to be one-word integers, signed over 31 bits. Returned value is 0
+ * if any of the coefficients exceeds the provided limit (in absolute
+ * value), or 1 on success.
+ *
+ * This is not constant-time; this is not a problem here, because on
+ * any failure, the NTRU-solving process will be deemed to have failed
+ * and the (f,g) polynomials will be discarded.
+ */
+static int
+poly_big_to_small(int8_t *d, const uint32_t *s, int lim, unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = zint_one_to_plain(s + u);
+ if (z < -lim || z > lim) {
+ return 0;
+ }
+ d[u] = (int8_t)z;
+ }
+ return 1;
+}
+
+/*
+ * Subtract k*f from F, where F, f and k are polynomials modulo X^N+1.
+ * Coefficients of polynomial k are small integers (signed values in the
+ * -2^31..2^31 range) scaled by 2^sc. Value sc is provided as sch = sc / 31
+ * and scl = sc % 31.
+ *
+ * This function implements the basic quadratic multiplication algorithm,
+ * which is efficient in space (no extra buffer needed) but slow at
+ * high degree.
+ */
+static void
+poly_sub_scaled(uint32_t *F, size_t Flen, size_t Fstride,
+ const uint32_t *f, size_t flen, size_t fstride,
+ const int32_t *k, uint32_t sch, uint32_t scl, unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ for (u = 0; u < n; u ++) {
+ int32_t kf;
+ size_t v;
+ uint32_t *x;
+ const uint32_t *y;
+
+ kf = -k[u];
+ x = F + u * Fstride;
+ y = f;
+ for (v = 0; v < n; v ++) {
+ zint_add_scaled_mul_small(
+ x, Flen, y, flen, kf, sch, scl);
+ if (u + v == n - 1) {
+ x = F;
+ kf = -kf;
+ } else {
+ x += Fstride;
+ }
+ y += fstride;
+ }
+ }
+}
+
+/*
+ * Subtract k*f from F. Coefficients of polynomial k are small integers
+ * (signed values in the -2^31..2^31 range) scaled by 2^sc. This function
+ * assumes that the degree is large, and integers relatively small.
+ * The value sc is provided as sch = sc / 31 and scl = sc % 31.
+ */
+static void
+poly_sub_scaled_ntt(uint32_t *F, size_t Flen, size_t Fstride,
+ const uint32_t *f, size_t flen, size_t fstride,
+ const int32_t *k, uint32_t sch, uint32_t scl, unsigned logn,
+ uint32_t *tmp) {
+ uint32_t *gm, *igm, *fk, *t1, *x;
+ const uint32_t *y;
+ size_t n, u, tlen;
+ const small_prime *primes;
+
+ n = MKN(logn);
+ tlen = flen + 1;
+ gm = tmp;
+ igm = gm + MKN(logn);
+ fk = igm + MKN(logn);
+ t1 = fk + n * tlen;
+
+ primes = PRIMES;
+
+ /*
+ * Compute k*f in fk[], in RNS notation.
+ */
+ for (u = 0; u < tlen; u ++) {
+ uint32_t p, p0i, R2, Rx;
+ size_t v;
+
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ Rx = modp_Rx((unsigned)flen, p, p0i, R2);
+ modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i);
+
+ for (v = 0; v < n; v ++) {
+ t1[v] = modp_set(k[v], p);
+ }
+ modp_NTT2(t1, gm, logn, p, p0i);
+ for (v = 0, y = f, x = fk + u;
+ v < n; v ++, y += fstride, x += tlen) {
+ *x = zint_mod_small_signed(y, flen, p, p0i, R2, Rx);
+ }
+ modp_NTT2_ext(fk + u, tlen, gm, logn, p, p0i);
+ for (v = 0, x = fk + u; v < n; v ++, x += tlen) {
+ *x = modp_montymul(
+ modp_montymul(t1[v], *x, p, p0i), R2, p, p0i);
+ }
+ modp_iNTT2_ext(fk + u, tlen, igm, logn, p, p0i);
+ }
+
+ /*
+ * Rebuild k*f.
+ */
+ zint_rebuild_CRT(fk, tlen, tlen, n, primes, 1, t1);
+
+ /*
+ * Subtract k*f, scaled, from F.
+ */
+ for (u = 0, x = F, y = fk; u < n; u ++, x += Fstride, y += tlen) {
+ zint_sub_scaled(x, Flen, y, tlen, sch, scl);
+ }
+}
+
+/* ==================================================================== */
+
+
+#define RNG_CONTEXT inner_shake256_context
+
+/*
+ * Get a random 8-byte integer from a SHAKE-based RNG. This function
+ * ensures consistent interpretation of the SHAKE output so that
+ * the same values will be obtained over different platforms, in case
+ * a known seed is used.
+ */
+static inline uint64_t
+get_rng_u64(inner_shake256_context *rng) {
+ /*
+ * We enforce little-endian representation.
+ */
+
+ uint8_t tmp[8];
+
+ inner_shake256_extract(rng, tmp, sizeof tmp);
+ return (uint64_t)tmp[0]
+ | ((uint64_t)tmp[1] << 8)
+ | ((uint64_t)tmp[2] << 16)
+ | ((uint64_t)tmp[3] << 24)
+ | ((uint64_t)tmp[4] << 32)
+ | ((uint64_t)tmp[5] << 40)
+ | ((uint64_t)tmp[6] << 48)
+ | ((uint64_t)tmp[7] << 56);
+}
+
+/*
+ * Table below incarnates a discrete Gaussian distribution:
+ * D(x) = exp(-(x^2)/(2*sigma^2))
+ * where sigma = 1.17*sqrt(q/(2*N)), q = 12289, and N = 1024.
+ * Element 0 of the table is P(x = 0).
+ * For k > 0, element k is P(x >= k+1 | x > 0).
+ * Probabilities are scaled up by 2^63.
+ */
+static const uint64_t gauss_1024_12289[] = {
+ 1283868770400643928u, 6416574995475331444u, 4078260278032692663u,
+ 2353523259288686585u, 1227179971273316331u, 575931623374121527u,
+ 242543240509105209u, 91437049221049666u, 30799446349977173u,
+ 9255276791179340u, 2478152334826140u, 590642893610164u,
+ 125206034929641u, 23590435911403u, 3948334035941u,
+ 586753615614u, 77391054539u, 9056793210u,
+ 940121950u, 86539696u, 7062824u,
+ 510971u, 32764u, 1862u,
+ 94u, 4u, 0u
+};
+
+/*
+ * Generate a random value with a Gaussian distribution centered on 0.
+ * The RNG must be ready for extraction (already flipped).
+ *
+ * Distribution has standard deviation 1.17*sqrt(q/(2*N)). The
+ * precomputed table is for N = 1024. Since the sum of two independent
+ * values of standard deviation sigma has standard deviation
+ * sigma*sqrt(2), then we can just generate more values and add them
+ * together for lower dimensions.
+ */
+static int
+mkgauss(RNG_CONTEXT *rng, unsigned logn) {
+ unsigned u, g;
+ int val;
+
+ g = 1U << (10 - logn);
+ val = 0;
+ for (u = 0; u < g; u ++) {
+ /*
+ * Each iteration generates one value with the
+ * Gaussian distribution for N = 1024.
+ *
+ * We use two random 64-bit values. First value
+ * decides on whether the generated value is 0, and,
+ * if not, the sign of the value. Second random 64-bit
+ * word is used to generate the non-zero value.
+ *
+ * For constant-time code we have to read the complete
+ * table. This has negligible cost, compared with the
+ * remainder of the keygen process (solving the NTRU
+ * equation).
+ */
+ uint64_t r;
+ uint32_t f, v, k, neg;
+
+ /*
+ * First value:
+ * - flag 'neg' is randomly selected to be 0 or 1.
+ * - flag 'f' is set to 1 if the generated value is zero,
+ * or set to 0 otherwise.
+ */
+ r = get_rng_u64(rng);
+ neg = (uint32_t)(r >> 63);
+ r &= ~((uint64_t)1 << 63);
+ f = (uint32_t)((r - gauss_1024_12289[0]) >> 63);
+
+ /*
+ * We produce a new random 63-bit integer r, and go over
+ * the array, starting at index 1. We store in v the
+ * index of the first array element which is not greater
+ * than r, unless the flag f was already 1.
+ */
+ v = 0;
+ r = get_rng_u64(rng);
+ r &= ~((uint64_t)1 << 63);
+ for (k = 1; k < (uint32_t)((sizeof gauss_1024_12289)
+ / (sizeof gauss_1024_12289[0])); k ++) {
+ uint32_t t;
+
+ t = (uint32_t)((r - gauss_1024_12289[k]) >> 63) ^ 1;
+ v |= k & -(t & (f ^ 1));
+ f |= t;
+ }
+
+ /*
+ * We apply the sign ('neg' flag). If the value is zero,
+ * the sign has no effect.
+ */
+ v = (v ^ -neg) + neg;
+
+ /*
+ * Generated value is added to val.
+ */
+ val += *(int32_t *)&v;
+ }
+ return val;
+}
+
+/*
+ * The MAX_BL_SMALL[] and MAX_BL_LARGE[] contain the lengths, in 31-bit
+ * words, of intermediate values in the computation:
+ *
+ * MAX_BL_SMALL[depth]: length for the input f and g at that depth
+ * MAX_BL_LARGE[depth]: length for the unreduced F and G at that depth
+ *
+ * Rules:
+ *
+ * - Within an array, values grow.
+ *
+ * - The 'SMALL' array must have an entry for maximum depth, corresponding
+ * to the size of values used in the binary GCD. There is no such value
+ * for the 'LARGE' array (the binary GCD yields already reduced
+ * coefficients).
+ *
+ * - MAX_BL_LARGE[depth] >= MAX_BL_SMALL[depth + 1].
+ *
+ * - Values must be large enough to handle the common cases, with some
+ * margins.
+ *
+ * - Values must not be "too large" either because we will convert some
+ * integers into floating-point values by considering the top 10 words,
+ * i.e. 310 bits; hence, for values of length more than 10 words, we
+ * should take care to have the length centered on the expected size.
+ *
+ * The following average lengths, in bits, have been measured on thousands
+ * of random keys (fg = max length of the absolute value of coefficients
+ * of f and g at that depth; FG = idem for the unreduced F and G; for the
+ * maximum depth, F and G are the output of binary GCD, multiplied by q;
+ * for each value, the average and standard deviation are provided).
+ *
+ * Binary case:
+ * depth: 10 fg: 6307.52 (24.48) FG: 6319.66 (24.51)
+ * depth: 9 fg: 3138.35 (12.25) FG: 9403.29 (27.55)
+ * depth: 8 fg: 1576.87 ( 7.49) FG: 4703.30 (14.77)
+ * depth: 7 fg: 794.17 ( 4.98) FG: 2361.84 ( 9.31)
+ * depth: 6 fg: 400.67 ( 3.10) FG: 1188.68 ( 6.04)
+ * depth: 5 fg: 202.22 ( 1.87) FG: 599.81 ( 3.87)
+ * depth: 4 fg: 101.62 ( 1.02) FG: 303.49 ( 2.38)
+ * depth: 3 fg: 50.37 ( 0.53) FG: 153.65 ( 1.39)
+ * depth: 2 fg: 24.07 ( 0.25) FG: 78.20 ( 0.73)
+ * depth: 1 fg: 10.99 ( 0.08) FG: 39.82 ( 0.41)
+ * depth: 0 fg: 4.00 ( 0.00) FG: 19.61 ( 0.49)
+ *
+ * Integers are actually represented either in binary notation over
+ * 31-bit words (signed, using two's complement), or in RNS, modulo
+ * many small primes. These small primes are close to, but slightly
+ * lower than, 2^31. Use of RNS loses less than two bits, even for
+ * the largest values.
+ *
+ * IMPORTANT: if these values are modified, then the temporary buffer
+ * sizes (FALCON_KEYGEN_TEMP_*, in inner.h) must be recomputed
+ * accordingly.
+ */
+
+static const size_t MAX_BL_SMALL[] = {
+ 1, 1, 2, 2, 4, 7, 14, 27, 53, 106, 209
+};
+
+static const size_t MAX_BL_LARGE[] = {
+ 2, 2, 5, 7, 12, 21, 40, 78, 157, 308
+};
+
+/*
+ * Average and standard deviation for the maximum size (in bits) of
+ * coefficients of (f,g), depending on depth. These values are used
+ * to compute bounds for Babai's reduction.
+ */
+static const struct {
+ int avg;
+ int std;
+} BITLENGTH[] = {
+ { 4, 0 },
+ { 11, 1 },
+ { 24, 1 },
+ { 50, 1 },
+ { 102, 1 },
+ { 202, 2 },
+ { 401, 4 },
+ { 794, 5 },
+ { 1577, 8 },
+ { 3138, 13 },
+ { 6308, 25 }
+};
+
+/*
+ * Minimal recursion depth at which we rebuild intermediate values
+ * when reconstructing f and g.
+ */
+#define DEPTH_INT_FG 4
+
+/*
+ * Compute squared norm of a short vector. Returned value is saturated to
+ * 2^32-1 if it is not lower than 2^31.
+ */
+static uint32_t
+poly_small_sqnorm(const int8_t *f, unsigned logn) {
+ size_t n, u;
+ uint32_t s, ng;
+
+ n = MKN(logn);
+ s = 0;
+ ng = 0;
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = f[u];
+ s += (uint32_t)(z * z);
+ ng |= s;
+ }
+ return s | -(ng >> 31);
+}
+
+/*
+ * Align (upwards) the provided 'data' pointer with regards to 'base'
+ * so that the offset is a multiple of the size of 'fpr'.
+ */
+static fpr *
+align_fpr(void *base, void *data) {
+ uint8_t *cb, *cd;
+ size_t k, km;
+
+ cb = base;
+ cd = data;
+ k = (size_t)(cd - cb);
+ km = k % sizeof(fpr);
+ if (km) {
+ k += (sizeof(fpr)) - km;
+ }
+ return (fpr *)(cb + k);
+}
+
+/*
+ * Align (upwards) the provided 'data' pointer with regards to 'base'
+ * so that the offset is a multiple of the size of 'uint32_t'.
+ */
+static uint32_t *
+align_u32(void *base, void *data) {
+ uint8_t *cb, *cd;
+ size_t k, km;
+
+ cb = base;
+ cd = data;
+ k = (size_t)(cd - cb);
+ km = k % sizeof(uint32_t);
+ if (km) {
+ k += (sizeof(uint32_t)) - km;
+ }
+ return (uint32_t *)(cb + k);
+}
+
+/*
+ * Convert a small vector to floating point.
+ */
+static void
+poly_small_to_fp(fpr *x, const int8_t *f, unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ for (u = 0; u < n; u ++) {
+ x[u] = fpr_of(f[u]);
+ }
+}
+
+/*
+ * Input: f,g of degree N = 2^logn; 'depth' is used only to get their
+ * individual length.
+ *
+ * Output: f',g' of degree N/2, with the length for 'depth+1'.
+ *
+ * Values are in RNS; input and/or output may also be in NTT.
+ */
+static void
+make_fg_step(uint32_t *data, unsigned logn, unsigned depth,
+ int in_ntt, int out_ntt) {
+ size_t n, hn, u;
+ size_t slen, tlen;
+ uint32_t *fd, *gd, *fs, *gs, *gm, *igm, *t1;
+ const small_prime *primes;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ slen = MAX_BL_SMALL[depth];
+ tlen = MAX_BL_SMALL[depth + 1];
+ primes = PRIMES;
+
+ /*
+ * Prepare room for the result.
+ */
+ fd = data;
+ gd = fd + hn * tlen;
+ fs = gd + hn * tlen;
+ gs = fs + n * slen;
+ gm = gs + n * slen;
+ igm = gm + n;
+ t1 = igm + n;
+ memmove(fs, data, 2 * n * slen * sizeof * data);
+
+ /*
+ * First slen words: we use the input values directly, and apply
+ * inverse NTT as we go.
+ */
+ for (u = 0; u < slen; u ++) {
+ uint32_t p, p0i, R2;
+ size_t v;
+ uint32_t *x;
+
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i);
+
+ for (v = 0, x = fs + u; v < n; v ++, x += slen) {
+ t1[v] = *x;
+ }
+ if (!in_ntt) {
+ modp_NTT2(t1, gm, logn, p, p0i);
+ }
+ for (v = 0, x = fd + u; v < hn; v ++, x += tlen) {
+ uint32_t w0, w1;
+
+ w0 = t1[(v << 1) + 0];
+ w1 = t1[(v << 1) + 1];
+ *x = modp_montymul(
+ modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+ if (in_ntt) {
+ modp_iNTT2_ext(fs + u, slen, igm, logn, p, p0i);
+ }
+
+ for (v = 0, x = gs + u; v < n; v ++, x += slen) {
+ t1[v] = *x;
+ }
+ if (!in_ntt) {
+ modp_NTT2(t1, gm, logn, p, p0i);
+ }
+ for (v = 0, x = gd + u; v < hn; v ++, x += tlen) {
+ uint32_t w0, w1;
+
+ w0 = t1[(v << 1) + 0];
+ w1 = t1[(v << 1) + 1];
+ *x = modp_montymul(
+ modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+ if (in_ntt) {
+ modp_iNTT2_ext(gs + u, slen, igm, logn, p, p0i);
+ }
+
+ if (!out_ntt) {
+ modp_iNTT2_ext(fd + u, tlen, igm, logn - 1, p, p0i);
+ modp_iNTT2_ext(gd + u, tlen, igm, logn - 1, p, p0i);
+ }
+ }
+
+ /*
+ * Since the fs and gs words have been de-NTTized, we can use the
+ * CRT to rebuild the values.
+ */
+ zint_rebuild_CRT(fs, slen, slen, n, primes, 1, gm);
+ zint_rebuild_CRT(gs, slen, slen, n, primes, 1, gm);
+
+ /*
+ * Remaining words: use modular reductions to extract the values.
+ */
+ for (u = slen; u < tlen; u ++) {
+ uint32_t p, p0i, R2, Rx;
+ size_t v;
+ uint32_t *x;
+
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ Rx = modp_Rx((unsigned)slen, p, p0i, R2);
+ modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i);
+ for (v = 0, x = fs; v < n; v ++, x += slen) {
+ t1[v] = zint_mod_small_signed(x, slen, p, p0i, R2, Rx);
+ }
+ modp_NTT2(t1, gm, logn, p, p0i);
+ for (v = 0, x = fd + u; v < hn; v ++, x += tlen) {
+ uint32_t w0, w1;
+
+ w0 = t1[(v << 1) + 0];
+ w1 = t1[(v << 1) + 1];
+ *x = modp_montymul(
+ modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+ for (v = 0, x = gs; v < n; v ++, x += slen) {
+ t1[v] = zint_mod_small_signed(x, slen, p, p0i, R2, Rx);
+ }
+ modp_NTT2(t1, gm, logn, p, p0i);
+ for (v = 0, x = gd + u; v < hn; v ++, x += tlen) {
+ uint32_t w0, w1;
+
+ w0 = t1[(v << 1) + 0];
+ w1 = t1[(v << 1) + 1];
+ *x = modp_montymul(
+ modp_montymul(w0, w1, p, p0i), R2, p, p0i);
+ }
+
+ if (!out_ntt) {
+ modp_iNTT2_ext(fd + u, tlen, igm, logn - 1, p, p0i);
+ modp_iNTT2_ext(gd + u, tlen, igm, logn - 1, p, p0i);
+ }
+ }
+}
+
+/*
+ * Compute f and g at a specific depth, in RNS notation.
+ *
+ * Returned values are stored in the data[] array, at slen words per integer.
+ *
+ * Conditions:
+ * 0 <= depth <= logn
+ *
+ * Space use in data[]: enough room for any two successive values (f', g',
+ * f and g).
+ */
+static void
+make_fg(uint32_t *data, const int8_t *f, const int8_t *g,
+ unsigned logn, unsigned depth, int out_ntt) {
+ size_t n, u;
+ uint32_t *ft, *gt, p0;
+ unsigned d;
+ const small_prime *primes;
+
+ n = MKN(logn);
+ ft = data;
+ gt = ft + n;
+ primes = PRIMES;
+ p0 = primes[0].p;
+ for (u = 0; u < n; u ++) {
+ ft[u] = modp_set(f[u], p0);
+ gt[u] = modp_set(g[u], p0);
+ }
+
+ if (depth == 0 && out_ntt) {
+ uint32_t *gm, *igm;
+ uint32_t p, p0i;
+
+ p = primes[0].p;
+ p0i = modp_ninv31(p);
+ gm = gt + n;
+ igm = gm + MKN(logn);
+ modp_mkgm2(gm, igm, logn, primes[0].g, p, p0i);
+ modp_NTT2(ft, gm, logn, p, p0i);
+ modp_NTT2(gt, gm, logn, p, p0i);
+ return;
+ }
+
+ if (depth == 0) {
+ return;
+ }
+ if (depth == 1) {
+ make_fg_step(data, logn, 0, 0, out_ntt);
+ return;
+ }
+ make_fg_step(data, logn, 0, 0, 1);
+ for (d = 1; d + 1 < depth; d ++) {
+ make_fg_step(data, logn - d, d, 1, 1);
+ }
+ make_fg_step(data, logn - depth + 1, depth - 1, 1, out_ntt);
+}
+
+/*
+ * Solving the NTRU equation, deepest level: compute the resultants of
+ * f and g with X^N+1, and use binary GCD. The F and G values are
+ * returned in tmp[].
+ *
+ * Returned value: 1 on success, 0 on error.
+ */
+static int
+solve_NTRU_deepest(unsigned logn_top,
+ const int8_t *f, const int8_t *g, uint32_t *tmp) {
+ size_t len;
+ uint32_t *Fp, *Gp, *fp, *gp, *t1, q;
+ const small_prime *primes;
+
+ len = MAX_BL_SMALL[logn_top];
+ primes = PRIMES;
+
+ Fp = tmp;
+ Gp = Fp + len;
+ fp = Gp + len;
+ gp = fp + len;
+ t1 = gp + len;
+
+ make_fg(fp, f, g, logn_top, logn_top, 0);
+
+ /*
+ * We use the CRT to rebuild the resultants as big integers.
+ * There are two such big integers. The resultants are always
+ * nonnegative.
+ */
+ zint_rebuild_CRT(fp, len, len, 2, primes, 0, t1);
+
+ /*
+ * Apply the binary GCD. The zint_bezout() function works only
+ * if both inputs are odd.
+ *
+ * We can test on the result and return 0 because that would
+ * imply failure of the NTRU solving equation, and the (f,g)
+ * values will be abandoned in that case.
+ */
+ if (!zint_bezout(Gp, Fp, fp, gp, len, t1)) {
+ return 0;
+ }
+
+ /*
+ * Multiply the two values by the target value q. Values must
+ * fit in the destination arrays.
+ * We can again test on the returned words: a non-zero output
+ * of zint_mul_small() means that we exceeded our array
+ * capacity, and that implies failure and rejection of (f,g).
+ */
+ q = 12289;
+ if (zint_mul_small(Fp, len, q) != 0
+ || zint_mul_small(Gp, len, q) != 0) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Solving the NTRU equation, intermediate level. Upon entry, the F and G
+ * from the previous level should be in the tmp[] array.
+ * This function MAY be invoked for the top-level (in which case depth = 0).
+ *
+ * Returned value: 1 on success, 0 on error.
+ */
+static int
+solve_NTRU_intermediate(unsigned logn_top,
+ const int8_t *f, const int8_t *g, unsigned depth, uint32_t *tmp) {
+ /*
+ * In this function, 'logn' is the log2 of the degree for
+ * this step. If N = 2^logn, then:
+ * - the F and G values already in fk->tmp (from the deeper
+ * levels) have degree N/2;
+ * - this function should return F and G of degree N.
+ */
+ unsigned logn;
+ size_t n, hn, slen, dlen, llen, rlen, FGlen, u;
+ uint32_t *Fd, *Gd, *Ft, *Gt, *ft, *gt, *t1;
+ fpr *rt1, *rt2, *rt3, *rt4, *rt5;
+ int scale_fg, minbl_fg, maxbl_fg, maxbl_FG, scale_k;
+ uint32_t *x, *y;
+ int32_t *k;
+ const small_prime *primes;
+
+ logn = logn_top - depth;
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+
+ /*
+ * slen = size for our input f and g; also size of the reduced
+ * F and G we return (degree N)
+ *
+ * dlen = size of the F and G obtained from the deeper level
+ * (degree N/2 or N/3)
+ *
+ * llen = size for intermediary F and G before reduction (degree N)
+ *
+ * We build our non-reduced F and G as two independent halves each,
+ * of degree N/2 (F = F0 + X*F1, G = G0 + X*G1).
+ */
+ slen = MAX_BL_SMALL[depth];
+ dlen = MAX_BL_SMALL[depth + 1];
+ llen = MAX_BL_LARGE[depth];
+ primes = PRIMES;
+
+ /*
+ * Fd and Gd are the F and G from the deeper level.
+ */
+ Fd = tmp;
+ Gd = Fd + dlen * hn;
+
+ /*
+ * Compute the input f and g for this level. Note that we get f
+ * and g in RNS + NTT representation.
+ */
+ ft = Gd + dlen * hn;
+ make_fg(ft, f, g, logn_top, depth, 1);
+
+ /*
+ * Move the newly computed f and g to make room for our candidate
+ * F and G (unreduced).
+ */
+ Ft = tmp;
+ Gt = Ft + n * llen;
+ t1 = Gt + n * llen;
+ memmove(t1, ft, 2 * n * slen * sizeof * ft);
+ ft = t1;
+ gt = ft + slen * n;
+ t1 = gt + slen * n;
+
+ /*
+ * Move Fd and Gd _after_ f and g.
+ */
+ memmove(t1, Fd, 2 * hn * dlen * sizeof * Fd);
+ Fd = t1;
+ Gd = Fd + hn * dlen;
+
+ /*
+ * We reduce Fd and Gd modulo all the small primes we will need,
+ * and store the values in Ft and Gt (only n/2 values in each).
+ */
+ for (u = 0; u < llen; u ++) {
+ uint32_t p, p0i, R2, Rx;
+ size_t v;
+ uint32_t *xs, *ys, *xd, *yd;
+
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ Rx = modp_Rx((unsigned)dlen, p, p0i, R2);
+ for (v = 0, xs = Fd, ys = Gd, xd = Ft + u, yd = Gt + u;
+ v < hn;
+ v ++, xs += dlen, ys += dlen, xd += llen, yd += llen) {
+ *xd = zint_mod_small_signed(xs, dlen, p, p0i, R2, Rx);
+ *yd = zint_mod_small_signed(ys, dlen, p, p0i, R2, Rx);
+ }
+ }
+
+ /*
+ * We do not need Fd and Gd after that point.
+ */
+
+ /*
+ * Compute our F and G modulo sufficiently many small primes.
+ */
+ for (u = 0; u < llen; u ++) {
+ uint32_t p, p0i, R2;
+ uint32_t *gm, *igm, *fx, *gx, *Fp, *Gp;
+ size_t v;
+
+ /*
+ * All computations are done modulo p.
+ */
+ p = primes[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+
+ /*
+ * If we processed slen words, then f and g have been
+ * de-NTTized, and are in RNS; we can rebuild them.
+ */
+ if (u == slen) {
+ zint_rebuild_CRT(ft, slen, slen, n, primes, 1, t1);
+ zint_rebuild_CRT(gt, slen, slen, n, primes, 1, t1);
+ }
+
+ gm = t1;
+ igm = gm + n;
+ fx = igm + n;
+ gx = fx + n;
+
+ modp_mkgm2(gm, igm, logn, primes[u].g, p, p0i);
+
+ if (u < slen) {
+ for (v = 0, x = ft + u, y = gt + u;
+ v < n; v ++, x += slen, y += slen) {
+ fx[v] = *x;
+ gx[v] = *y;
+ }
+ modp_iNTT2_ext(ft + u, slen, igm, logn, p, p0i);
+ modp_iNTT2_ext(gt + u, slen, igm, logn, p, p0i);
+ } else {
+ uint32_t Rx;
+
+ Rx = modp_Rx((unsigned)slen, p, p0i, R2);
+ for (v = 0, x = ft, y = gt;
+ v < n; v ++, x += slen, y += slen) {
+ fx[v] = zint_mod_small_signed(x, slen,
+ p, p0i, R2, Rx);
+ gx[v] = zint_mod_small_signed(y, slen,
+ p, p0i, R2, Rx);
+ }
+ modp_NTT2(fx, gm, logn, p, p0i);
+ modp_NTT2(gx, gm, logn, p, p0i);
+ }
+
+ /*
+ * Get F' and G' modulo p and in NTT representation
+ * (they have degree n/2). These values were computed in
+ * a previous step, and stored in Ft and Gt.
+ */
+ Fp = gx + n;
+ Gp = Fp + hn;
+ for (v = 0, x = Ft + u, y = Gt + u;
+ v < hn; v ++, x += llen, y += llen) {
+ Fp[v] = *x;
+ Gp[v] = *y;
+ }
+ modp_NTT2(Fp, gm, logn - 1, p, p0i);
+ modp_NTT2(Gp, gm, logn - 1, p, p0i);
+
+ /*
+ * Compute our F and G modulo p.
+ *
+ * General case:
+ *
+ * we divide degree by d = 2 or 3
+ * f'(x^d) = N(f)(x^d) = f * adj(f)
+ * g'(x^d) = N(g)(x^d) = g * adj(g)
+ * f'*G' - g'*F' = q
+ * F = F'(x^d) * adj(g)
+ * G = G'(x^d) * adj(f)
+ *
+ * We compute things in the NTT. We group roots of phi
+ * such that all roots x in a group share the same x^d.
+ * If the roots in a group are x_1, x_2... x_d, then:
+ *
+ * N(f)(x_1^d) = f(x_1)*f(x_2)*...*f(x_d)
+ *
+ * Thus, we have:
+ *
+ * G(x_1) = f(x_2)*f(x_3)*...*f(x_d)*G'(x_1^d)
+ * G(x_2) = f(x_1)*f(x_3)*...*f(x_d)*G'(x_1^d)
+ * ...
+ * G(x_d) = f(x_1)*f(x_2)*...*f(x_{d-1})*G'(x_1^d)
+ *
+ * In all cases, we can thus compute F and G in NTT
+ * representation by a few simple multiplications.
+ * Moreover, in our chosen NTT representation, roots
+ * from the same group are consecutive in RAM.
+ */
+ for (v = 0, x = Ft + u, y = Gt + u; v < hn;
+ v ++, x += (llen << 1), y += (llen << 1)) {
+ uint32_t ftA, ftB, gtA, gtB;
+ uint32_t mFp, mGp;
+
+ ftA = fx[(v << 1) + 0];
+ ftB = fx[(v << 1) + 1];
+ gtA = gx[(v << 1) + 0];
+ gtB = gx[(v << 1) + 1];
+ mFp = modp_montymul(Fp[v], R2, p, p0i);
+ mGp = modp_montymul(Gp[v], R2, p, p0i);
+ x[0] = modp_montymul(gtB, mFp, p, p0i);
+ x[llen] = modp_montymul(gtA, mFp, p, p0i);
+ y[0] = modp_montymul(ftB, mGp, p, p0i);
+ y[llen] = modp_montymul(ftA, mGp, p, p0i);
+ }
+ modp_iNTT2_ext(Ft + u, llen, igm, logn, p, p0i);
+ modp_iNTT2_ext(Gt + u, llen, igm, logn, p, p0i);
+ }
+
+ /*
+ * Rebuild F and G with the CRT.
+ */
+ zint_rebuild_CRT(Ft, llen, llen, n, primes, 1, t1);
+ zint_rebuild_CRT(Gt, llen, llen, n, primes, 1, t1);
+
+ /*
+ * At that point, Ft, Gt, ft and gt are consecutive in RAM (in that
+ * order).
+ */
+
+ /*
+ * Apply Babai reduction to bring back F and G to size slen.
+ *
+ * We use the FFT to compute successive approximations of the
+ * reduction coefficient. We first isolate the top bits of
+ * the coefficients of f and g, and convert them to floating
+ * point; with the FFT, we compute adj(f), adj(g), and
+ * 1/(f*adj(f)+g*adj(g)).
+ *
+ * Then, we repeatedly apply the following:
+ *
+ * - Get the top bits of the coefficients of F and G into
+ * floating point, and use the FFT to compute:
+ * (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g))
+ *
+ * - Convert back that value into normal representation, and
+ * round it to the nearest integers, yielding a polynomial k.
+ * Proper scaling is applied to f, g, F and G so that the
+ * coefficients fit on 32 bits (signed).
+ *
+ * - Subtract k*f from F and k*g from G.
+ *
+ * Under normal conditions, this process reduces the size of F
+ * and G by some bits at each iteration. For constant-time
+ * operation, we do not want to measure the actual length of
+ * F and G; instead, we do the following:
+ *
+ * - f and g are converted to floating-point, with some scaling
+ * if necessary to keep values in the representable range.
+ *
+ * - For each iteration, we _assume_ a maximum size for F and G,
+ * and use the values at that size. If we overreach, then
+ * we get zeros, which is harmless: the resulting coefficients
+ * of k will be 0 and the value won't be reduced.
+ *
+ * - We conservatively assume that F and G will be reduced by
+ * at least 25 bits at each iteration.
+ *
+ * Even when reaching the bottom of the reduction, reduction
+ * coefficient will remain low. If it goes out-of-range, then
+ * something wrong occurred and the whole NTRU solving fails.
+ */
+
+ /*
+ * Memory layout:
+ * - We need to compute and keep adj(f), adj(g), and
+ * 1/(f*adj(f)+g*adj(g)) (sizes N, N and N/2 fp numbers,
+ * respectively).
+ * - At each iteration we need two extra fp buffer (N fp values),
+ * and produce a k (N 32-bit words). k will be shared with one
+ * of the fp buffers.
+ * - To compute k*f and k*g efficiently (with the NTT), we need
+ * some extra room; we reuse the space of the temporary buffers.
+ *
+ * Arrays of 'fpr' are obtained from the temporary array itself.
+ * We ensure that the base is at a properly aligned offset (the
+ * source array tmp[] is supposed to be already aligned).
+ */
+
+ rt3 = align_fpr(tmp, t1);
+ rt4 = rt3 + n;
+ rt5 = rt4 + n;
+ rt1 = rt5 + (n >> 1);
+ k = (int32_t *)align_u32(tmp, rt1);
+ rt2 = align_fpr(tmp, k + n);
+ if (rt2 < (rt1 + n)) {
+ rt2 = rt1 + n;
+ }
+ t1 = (uint32_t *)k + n;
+
+ /*
+ * Get f and g into rt3 and rt4 as floating-point approximations.
+ *
+ * We need to "scale down" the floating-point representation of
+ * coefficients when they are too big. We want to keep the value
+ * below 2^310 or so. Thus, when values are larger than 10 words,
+ * we consider only the top 10 words. Array lengths have been
+ * computed so that average maximum length will fall in the
+ * middle or the upper half of these top 10 words.
+ */
+ rlen = slen;
+ if (rlen > 10) {
+ rlen = 10;
+ }
+ poly_big_to_fp(rt3, ft + slen - rlen, rlen, slen, logn);
+ poly_big_to_fp(rt4, gt + slen - rlen, rlen, slen, logn);
+
+ /*
+ * Values in rt3 and rt4 are downscaled by 2^(scale_fg).
+ */
+ scale_fg = 31 * (int)(slen - rlen);
+
+ /*
+ * Estimated boundaries for the maximum size (in bits) of the
+ * coefficients of (f,g). We use the measured average, and
+ * allow for a deviation of at most six times the standard
+ * deviation.
+ */
+ minbl_fg = BITLENGTH[depth].avg - 6 * BITLENGTH[depth].std;
+ maxbl_fg = BITLENGTH[depth].avg + 6 * BITLENGTH[depth].std;
+
+ /*
+ * Compute 1/(f*adj(f)+g*adj(g)) in rt5. We also keep adj(f)
+ * and adj(g) in rt3 and rt4, respectively.
+ */
+ PQCLEAN_FALCON512_CLEAN_FFT(rt3, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rt4, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_invnorm2_fft(rt5, rt3, rt4, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_adj_fft(rt3, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_adj_fft(rt4, logn);
+
+ /*
+ * Reduce F and G repeatedly.
+ *
+ * The expected maximum bit length of coefficients of F and G
+ * is kept in maxbl_FG, with the corresponding word length in
+ * FGlen.
+ */
+ FGlen = llen;
+ maxbl_FG = 31 * (int)llen;
+
+ /*
+ * Each reduction operation computes the reduction polynomial
+ * "k". We need that polynomial to have coefficients that fit
+ * on 32-bit signed integers, with some scaling; thus, we use
+ * a descending sequence of scaling values, down to zero.
+ *
+ * The size of the coefficients of k is (roughly) the difference
+ * between the size of the coefficients of (F,G) and the size
+ * of the coefficients of (f,g). Thus, the maximum size of the
+ * coefficients of k is, at the start, maxbl_FG - minbl_fg;
+ * this is our starting scale value for k.
+ *
+ * We need to estimate the size of (F,G) during the execution of
+ * the algorithm; we are allowed some overestimation but not too
+ * much (poly_big_to_fp() uses a 310-bit window). Generally
+ * speaking, after applying a reduction with k scaled to
+ * scale_k, the size of (F,G) will be size(f,g) + scale_k + dd,
+ * where 'dd' is a few bits to account for the fact that the
+ * reduction is never perfect (intuitively, dd is on the order
+ * of sqrt(N), so at most 5 bits; we here allow for 10 extra
+ * bits).
+ *
+ * The size of (f,g) is not known exactly, but maxbl_fg is an
+ * upper bound.
+ */
+ scale_k = maxbl_FG - minbl_fg;
+
+ for (;;) {
+ int scale_FG, dc, new_maxbl_FG;
+ uint32_t scl, sch;
+ fpr pdc, pt;
+
+ /*
+ * Convert current F and G into floating-point. We apply
+ * scaling if the current length is more than 10 words.
+ */
+ rlen = FGlen;
+ if (rlen > 10) {
+ rlen = 10;
+ }
+ scale_FG = 31 * (int)(FGlen - rlen);
+ poly_big_to_fp(rt1, Ft + FGlen - rlen, rlen, llen, logn);
+ poly_big_to_fp(rt2, Gt + FGlen - rlen, rlen, llen, logn);
+
+ /*
+ * Compute (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) in rt2.
+ */
+ PQCLEAN_FALCON512_CLEAN_FFT(rt1, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rt2, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(rt1, rt3, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(rt2, rt4, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(rt2, rt1, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_autoadj_fft(rt2, rt5, logn);
+ PQCLEAN_FALCON512_CLEAN_iFFT(rt2, logn);
+
+ /*
+ * (f,g) are scaled by 'scale_fg', meaning that the
+ * numbers in rt3/rt4 should be multiplied by 2^(scale_fg)
+ * to have their true mathematical value.
+ *
+ * (F,G) are similarly scaled by 'scale_FG'. Therefore,
+ * the value we computed in rt2 is scaled by
+ * 'scale_FG-scale_fg'.
+ *
+ * We want that value to be scaled by 'scale_k', hence we
+ * apply a corrective scaling. After scaling, the values
+ * should fit in -2^31-1..+2^31-1.
+ */
+ dc = scale_k - scale_FG + scale_fg;
+
+ /*
+ * We will need to multiply values by 2^(-dc). The value
+ * 'dc' is not secret, so we can compute 2^(-dc) with a
+ * non-constant-time process.
+ * (We could use ldexp(), but we prefer to avoid any
+ * dependency on libm. When using FP emulation, we could
+ * use our fpr_ldexp(), which is constant-time.)
+ */
+ if (dc < 0) {
+ dc = -dc;
+ pt = fpr_two;
+ } else {
+ pt = fpr_onehalf;
+ }
+ pdc = fpr_one;
+ while (dc != 0) {
+ if ((dc & 1) != 0) {
+ pdc = fpr_mul(pdc, pt);
+ }
+ dc >>= 1;
+ pt = fpr_sqr(pt);
+ }
+
+ for (u = 0; u < n; u ++) {
+ fpr xv;
+
+ xv = fpr_mul(rt2[u], pdc);
+
+ /*
+ * Sometimes the values can be out-of-bounds if
+ * the algorithm fails; we must not call
+ * fpr_rint() (and cast to int32_t) if the value
+ * is not in-bounds. Note that the test does not
+ * break constant-time discipline, since any
+ * failure here implies that we discard the current
+ * secret key (f,g).
+ */
+ if (!fpr_lt(fpr_mtwo31m1, xv)
+ || !fpr_lt(xv, fpr_ptwo31m1)) {
+ return 0;
+ }
+ k[u] = (int32_t)fpr_rint(xv);
+ }
+
+ /*
+ * Values in k[] are integers. They really are scaled
+ * down by maxbl_FG - minbl_fg bits.
+ *
+ * If we are at low depth, then we use the NTT to
+ * compute k*f and k*g.
+ */
+ sch = (uint32_t)(scale_k / 31);
+ scl = (uint32_t)(scale_k % 31);
+ if (depth <= DEPTH_INT_FG) {
+ poly_sub_scaled_ntt(Ft, FGlen, llen, ft, slen, slen,
+ k, sch, scl, logn, t1);
+ poly_sub_scaled_ntt(Gt, FGlen, llen, gt, slen, slen,
+ k, sch, scl, logn, t1);
+ } else {
+ poly_sub_scaled(Ft, FGlen, llen, ft, slen, slen,
+ k, sch, scl, logn);
+ poly_sub_scaled(Gt, FGlen, llen, gt, slen, slen,
+ k, sch, scl, logn);
+ }
+
+ /*
+ * We compute the new maximum size of (F,G), assuming that
+ * (f,g) has _maximal_ length (i.e. that reduction is
+ * "late" instead of "early". We also adjust FGlen
+ * accordingly.
+ */
+ new_maxbl_FG = scale_k + maxbl_fg + 10;
+ if (new_maxbl_FG < maxbl_FG) {
+ maxbl_FG = new_maxbl_FG;
+ if ((int)FGlen * 31 >= maxbl_FG + 31) {
+ FGlen --;
+ }
+ }
+
+ /*
+ * We suppose that scaling down achieves a reduction by
+ * at least 25 bits per iteration. We stop when we have
+ * done the loop with an unscaled k.
+ */
+ if (scale_k <= 0) {
+ break;
+ }
+ scale_k -= 25;
+ if (scale_k < 0) {
+ scale_k = 0;
+ }
+ }
+
+ /*
+ * If (F,G) length was lowered below 'slen', then we must take
+ * care to re-extend the sign.
+ */
+ if (FGlen < slen) {
+ for (u = 0; u < n; u ++, Ft += llen, Gt += llen) {
+ size_t v;
+ uint32_t sw;
+
+ sw = -(Ft[FGlen - 1] >> 30) >> 1;
+ for (v = FGlen; v < slen; v ++) {
+ Ft[v] = sw;
+ }
+ sw = -(Gt[FGlen - 1] >> 30) >> 1;
+ for (v = FGlen; v < slen; v ++) {
+ Gt[v] = sw;
+ }
+ }
+ }
+
+ /*
+ * Compress encoding of all values to 'slen' words (this is the
+ * expected output format).
+ */
+ for (u = 0, x = tmp, y = tmp;
+ u < (n << 1); u ++, x += slen, y += llen) {
+ memmove(x, y, slen * sizeof * y);
+ }
+ return 1;
+}
+
+/*
+ * Solving the NTRU equation, binary case, depth = 1. Upon entry, the
+ * F and G from the previous level should be in the tmp[] array.
+ *
+ * Returned value: 1 on success, 0 on error.
+ */
+static int
+solve_NTRU_binary_depth1(unsigned logn_top,
+ const int8_t *f, const int8_t *g, uint32_t *tmp) {
+ /*
+ * The first half of this function is a copy of the corresponding
+ * part in solve_NTRU_intermediate(), for the reconstruction of
+ * the unreduced F and G. The second half (Babai reduction) is
+ * done differently, because the unreduced F and G fit in 53 bits
+ * of precision, allowing a much simpler process with lower RAM
+ * usage.
+ */
+ unsigned depth, logn;
+ size_t n_top, n, hn, slen, dlen, llen, u;
+ uint32_t *Fd, *Gd, *Ft, *Gt, *ft, *gt, *t1;
+ fpr *rt1, *rt2, *rt3, *rt4, *rt5, *rt6;
+ uint32_t *x, *y;
+
+ depth = 1;
+ n_top = (size_t)1 << logn_top;
+ logn = logn_top - depth;
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+
+ /*
+ * Equations are:
+ *
+ * f' = f0^2 - X^2*f1^2
+ * g' = g0^2 - X^2*g1^2
+ * F' and G' are a solution to f'G' - g'F' = q (from deeper levels)
+ * F = F'*(g0 - X*g1)
+ * G = G'*(f0 - X*f1)
+ *
+ * f0, f1, g0, g1, f', g', F' and G' are all "compressed" to
+ * degree N/2 (their odd-indexed coefficients are all zero).
+ */
+
+ /*
+ * slen = size for our input f and g; also size of the reduced
+ * F and G we return (degree N)
+ *
+ * dlen = size of the F and G obtained from the deeper level
+ * (degree N/2)
+ *
+ * llen = size for intermediary F and G before reduction (degree N)
+ *
+ * We build our non-reduced F and G as two independent halves each,
+ * of degree N/2 (F = F0 + X*F1, G = G0 + X*G1).
+ */
+ slen = MAX_BL_SMALL[depth];
+ dlen = MAX_BL_SMALL[depth + 1];
+ llen = MAX_BL_LARGE[depth];
+
+ /*
+ * Fd and Gd are the F and G from the deeper level. Ft and Gt
+ * are the destination arrays for the unreduced F and G.
+ */
+ Fd = tmp;
+ Gd = Fd + dlen * hn;
+ Ft = Gd + dlen * hn;
+ Gt = Ft + llen * n;
+
+ /*
+ * We reduce Fd and Gd modulo all the small primes we will need,
+ * and store the values in Ft and Gt.
+ */
+ for (u = 0; u < llen; u ++) {
+ uint32_t p, p0i, R2, Rx;
+ size_t v;
+ uint32_t *xs, *ys, *xd, *yd;
+
+ p = PRIMES[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+ Rx = modp_Rx((unsigned)dlen, p, p0i, R2);
+ for (v = 0, xs = Fd, ys = Gd, xd = Ft + u, yd = Gt + u;
+ v < hn;
+ v ++, xs += dlen, ys += dlen, xd += llen, yd += llen) {
+ *xd = zint_mod_small_signed(xs, dlen, p, p0i, R2, Rx);
+ *yd = zint_mod_small_signed(ys, dlen, p, p0i, R2, Rx);
+ }
+ }
+
+ /*
+ * Now Fd and Gd are not needed anymore; we can squeeze them out.
+ */
+ memmove(tmp, Ft, llen * n * sizeof(uint32_t));
+ Ft = tmp;
+ memmove(Ft + llen * n, Gt, llen * n * sizeof(uint32_t));
+ Gt = Ft + llen * n;
+ ft = Gt + llen * n;
+ gt = ft + slen * n;
+
+ t1 = gt + slen * n;
+
+ /*
+ * Compute our F and G modulo sufficiently many small primes.
+ */
+ for (u = 0; u < llen; u ++) {
+ uint32_t p, p0i, R2;
+ uint32_t *gm, *igm, *fx, *gx, *Fp, *Gp;
+ unsigned e;
+ size_t v;
+
+ /*
+ * All computations are done modulo p.
+ */
+ p = PRIMES[u].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+
+ /*
+ * We recompute things from the source f and g, of full
+ * degree. However, we will need only the n first elements
+ * of the inverse NTT table (igm); the call to modp_mkgm()
+ * below will fill n_top elements in igm[] (thus overflowing
+ * into fx[]) but later code will overwrite these extra
+ * elements.
+ */
+ gm = t1;
+ igm = gm + n_top;
+ fx = igm + n;
+ gx = fx + n_top;
+ modp_mkgm2(gm, igm, logn_top, PRIMES[u].g, p, p0i);
+
+ /*
+ * Set ft and gt to f and g modulo p, respectively.
+ */
+ for (v = 0; v < n_top; v ++) {
+ fx[v] = modp_set(f[v], p);
+ gx[v] = modp_set(g[v], p);
+ }
+
+ /*
+ * Convert to NTT and compute our f and g.
+ */
+ modp_NTT2(fx, gm, logn_top, p, p0i);
+ modp_NTT2(gx, gm, logn_top, p, p0i);
+ for (e = logn_top; e > logn; e --) {
+ modp_poly_rec_res(fx, e, p, p0i, R2);
+ modp_poly_rec_res(gx, e, p, p0i, R2);
+ }
+
+ /*
+ * From that point onward, we only need tables for
+ * degree n, so we can save some space.
+ */
+ if (depth > 0) { /* always true */
+ memmove(gm + n, igm, n * sizeof * igm);
+ igm = gm + n;
+ memmove(igm + n, fx, n * sizeof * ft);
+ fx = igm + n;
+ memmove(fx + n, gx, n * sizeof * gt);
+ gx = fx + n;
+ }
+
+ /*
+ * Get F' and G' modulo p and in NTT representation
+ * (they have degree n/2). These values were computed
+ * in a previous step, and stored in Ft and Gt.
+ */
+ Fp = gx + n;
+ Gp = Fp + hn;
+ for (v = 0, x = Ft + u, y = Gt + u;
+ v < hn; v ++, x += llen, y += llen) {
+ Fp[v] = *x;
+ Gp[v] = *y;
+ }
+ modp_NTT2(Fp, gm, logn - 1, p, p0i);
+ modp_NTT2(Gp, gm, logn - 1, p, p0i);
+
+ /*
+ * Compute our F and G modulo p.
+ *
+ * Equations are:
+ *
+ * f'(x^2) = N(f)(x^2) = f * adj(f)
+ * g'(x^2) = N(g)(x^2) = g * adj(g)
+ *
+ * f'*G' - g'*F' = q
+ *
+ * F = F'(x^2) * adj(g)
+ * G = G'(x^2) * adj(f)
+ *
+ * The NTT representation of f is f(w) for all w which
+ * are roots of phi. In the binary case, as well as in
+ * the ternary case for all depth except the deepest,
+ * these roots can be grouped in pairs (w,-w), and we
+ * then have:
+ *
+ * f(w) = adj(f)(-w)
+ * f(-w) = adj(f)(w)
+ *
+ * and w^2 is then a root for phi at the half-degree.
+ *
+ * At the deepest level in the ternary case, this still
+ * holds, in the following sense: the roots of x^2-x+1
+ * are (w,-w^2) (for w^3 = -1, and w != -1), and we
+ * have:
+ *
+ * f(w) = adj(f)(-w^2)
+ * f(-w^2) = adj(f)(w)
+ *
+ * In all case, we can thus compute F and G in NTT
+ * representation by a few simple multiplications.
+ * Moreover, the two roots for each pair are consecutive
+ * in our bit-reversal encoding.
+ */
+ for (v = 0, x = Ft + u, y = Gt + u;
+ v < hn; v ++, x += (llen << 1), y += (llen << 1)) {
+ uint32_t ftA, ftB, gtA, gtB;
+ uint32_t mFp, mGp;
+
+ ftA = fx[(v << 1) + 0];
+ ftB = fx[(v << 1) + 1];
+ gtA = gx[(v << 1) + 0];
+ gtB = gx[(v << 1) + 1];
+ mFp = modp_montymul(Fp[v], R2, p, p0i);
+ mGp = modp_montymul(Gp[v], R2, p, p0i);
+ x[0] = modp_montymul(gtB, mFp, p, p0i);
+ x[llen] = modp_montymul(gtA, mFp, p, p0i);
+ y[0] = modp_montymul(ftB, mGp, p, p0i);
+ y[llen] = modp_montymul(ftA, mGp, p, p0i);
+ }
+ modp_iNTT2_ext(Ft + u, llen, igm, logn, p, p0i);
+ modp_iNTT2_ext(Gt + u, llen, igm, logn, p, p0i);
+
+ /*
+ * Also save ft and gt (only up to size slen).
+ */
+ if (u < slen) {
+ modp_iNTT2(fx, igm, logn, p, p0i);
+ modp_iNTT2(gx, igm, logn, p, p0i);
+ for (v = 0, x = ft + u, y = gt + u;
+ v < n; v ++, x += slen, y += slen) {
+ *x = fx[v];
+ *y = gx[v];
+ }
+ }
+ }
+
+ /*
+ * Rebuild f, g, F and G with the CRT. Note that the elements of F
+ * and G are consecutive, and thus can be rebuilt in a single
+ * loop; similarly, the elements of f and g are consecutive.
+ */
+ zint_rebuild_CRT(Ft, llen, llen, n << 1, PRIMES, 1, t1);
+ zint_rebuild_CRT(ft, slen, slen, n << 1, PRIMES, 1, t1);
+
+ /*
+ * Here starts the Babai reduction, specialized for depth = 1.
+ *
+ * Candidates F and G (from Ft and Gt), and base f and g (ft and gt),
+ * are converted to floating point. There is no scaling, and a
+ * single pass is sufficient.
+ */
+
+ /*
+ * Convert F and G into floating point (rt1 and rt2).
+ */
+ rt1 = align_fpr(tmp, gt + slen * n);
+ rt2 = rt1 + n;
+ poly_big_to_fp(rt1, Ft, llen, llen, logn);
+ poly_big_to_fp(rt2, Gt, llen, llen, logn);
+
+ /*
+ * Integer representation of F and G is no longer needed, we
+ * can remove it.
+ */
+ memmove(tmp, ft, 2 * slen * n * sizeof * ft);
+ ft = tmp;
+ gt = ft + slen * n;
+ rt3 = align_fpr(tmp, gt + slen * n);
+ memmove(rt3, rt1, 2 * n * sizeof * rt1);
+ rt1 = rt3;
+ rt2 = rt1 + n;
+ rt3 = rt2 + n;
+ rt4 = rt3 + n;
+
+ /*
+ * Convert f and g into floating point (rt3 and rt4).
+ */
+ poly_big_to_fp(rt3, ft, slen, slen, logn);
+ poly_big_to_fp(rt4, gt, slen, slen, logn);
+
+ /*
+ * Remove unneeded ft and gt.
+ */
+ memmove(tmp, rt1, 4 * n * sizeof * rt1);
+ rt1 = (fpr *)tmp;
+ rt2 = rt1 + n;
+ rt3 = rt2 + n;
+ rt4 = rt3 + n;
+
+ /*
+ * We now have:
+ * rt1 = F
+ * rt2 = G
+ * rt3 = f
+ * rt4 = g
+ * in that order in RAM. We convert all of them to FFT.
+ */
+ PQCLEAN_FALCON512_CLEAN_FFT(rt1, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rt2, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rt3, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rt4, logn);
+
+ /*
+ * Compute:
+ * rt5 = F*adj(f) + G*adj(g)
+ * rt6 = 1 / (f*adj(f) + g*adj(g))
+ * (Note that rt6 is half-length.)
+ */
+ rt5 = rt4 + n;
+ rt6 = rt5 + n;
+ PQCLEAN_FALCON512_CLEAN_poly_add_muladj_fft(rt5, rt1, rt2, rt3, rt4, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_invnorm2_fft(rt6, rt3, rt4, logn);
+
+ /*
+ * Compute:
+ * rt5 = (F*adj(f)+G*adj(g)) / (f*adj(f)+g*adj(g))
+ */
+ PQCLEAN_FALCON512_CLEAN_poly_mul_autoadj_fft(rt5, rt6, logn);
+
+ /*
+ * Compute k as the rounded version of rt5. Check that none of
+ * the values is larger than 2^63-1 (in absolute value)
+ * because that would make the fpr_rint() do something undefined;
+ * note that any out-of-bounds value here implies a failure and
+ * (f,g) will be discarded, so we can make a simple test.
+ */
+ PQCLEAN_FALCON512_CLEAN_iFFT(rt5, logn);
+ for (u = 0; u < n; u ++) {
+ fpr z;
+
+ z = rt5[u];
+ if (!fpr_lt(z, fpr_ptwo63m1) || !fpr_lt(fpr_mtwo63m1, z)) {
+ return 0;
+ }
+ rt5[u] = fpr_of(fpr_rint(z));
+ }
+ PQCLEAN_FALCON512_CLEAN_FFT(rt5, logn);
+
+ /*
+ * Subtract k*f from F, and k*g from G.
+ */
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(rt3, rt5, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(rt4, rt5, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_sub(rt1, rt3, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_sub(rt2, rt4, logn);
+ PQCLEAN_FALCON512_CLEAN_iFFT(rt1, logn);
+ PQCLEAN_FALCON512_CLEAN_iFFT(rt2, logn);
+
+ /*
+ * Convert back F and G to integers, and return.
+ */
+ Ft = tmp;
+ Gt = Ft + n;
+ rt3 = align_fpr(tmp, Gt + n);
+ memmove(rt3, rt1, 2 * n * sizeof * rt1);
+ rt1 = rt3;
+ rt2 = rt1 + n;
+ for (u = 0; u < n; u ++) {
+ Ft[u] = (uint32_t)fpr_rint(rt1[u]);
+ Gt[u] = (uint32_t)fpr_rint(rt2[u]);
+ }
+
+ return 1;
+}
+
+/*
+ * Solving the NTRU equation, top level. Upon entry, the F and G
+ * from the previous level should be in the tmp[] array.
+ *
+ * Returned value: 1 on success, 0 on error.
+ */
+static int
+solve_NTRU_binary_depth0(unsigned logn,
+ const int8_t *f, const int8_t *g, uint32_t *tmp) {
+ size_t n, hn, u;
+ uint32_t p, p0i, R2;
+ uint32_t *Fp, *Gp, *t1, *t2, *t3, *t4, *t5;
+ uint32_t *gm, *igm, *ft, *gt;
+ fpr *rt2, *rt3;
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+
+ /*
+ * Equations are:
+ *
+ * f' = f0^2 - X^2*f1^2
+ * g' = g0^2 - X^2*g1^2
+ * F' and G' are a solution to f'G' - g'F' = q (from deeper levels)
+ * F = F'*(g0 - X*g1)
+ * G = G'*(f0 - X*f1)
+ *
+ * f0, f1, g0, g1, f', g', F' and G' are all "compressed" to
+ * degree N/2 (their odd-indexed coefficients are all zero).
+ *
+ * Everything should fit in 31-bit integers, hence we can just use
+ * the first small prime p = 2147473409.
+ */
+ p = PRIMES[0].p;
+ p0i = modp_ninv31(p);
+ R2 = modp_R2(p, p0i);
+
+ Fp = tmp;
+ Gp = Fp + hn;
+ ft = Gp + hn;
+ gt = ft + n;
+ gm = gt + n;
+ igm = gm + n;
+
+ modp_mkgm2(gm, igm, logn, PRIMES[0].g, p, p0i);
+
+ /*
+ * Convert F' anf G' in NTT representation.
+ */
+ for (u = 0; u < hn; u ++) {
+ Fp[u] = modp_set(zint_one_to_plain(Fp + u), p);
+ Gp[u] = modp_set(zint_one_to_plain(Gp + u), p);
+ }
+ modp_NTT2(Fp, gm, logn - 1, p, p0i);
+ modp_NTT2(Gp, gm, logn - 1, p, p0i);
+
+ /*
+ * Load f and g and convert them to NTT representation.
+ */
+ for (u = 0; u < n; u ++) {
+ ft[u] = modp_set(f[u], p);
+ gt[u] = modp_set(g[u], p);
+ }
+ modp_NTT2(ft, gm, logn, p, p0i);
+ modp_NTT2(gt, gm, logn, p, p0i);
+
+ /*
+ * Build the unreduced F,G in ft and gt.
+ */
+ for (u = 0; u < n; u += 2) {
+ uint32_t ftA, ftB, gtA, gtB;
+ uint32_t mFp, mGp;
+
+ ftA = ft[u + 0];
+ ftB = ft[u + 1];
+ gtA = gt[u + 0];
+ gtB = gt[u + 1];
+ mFp = modp_montymul(Fp[u >> 1], R2, p, p0i);
+ mGp = modp_montymul(Gp[u >> 1], R2, p, p0i);
+ ft[u + 0] = modp_montymul(gtB, mFp, p, p0i);
+ ft[u + 1] = modp_montymul(gtA, mFp, p, p0i);
+ gt[u + 0] = modp_montymul(ftB, mGp, p, p0i);
+ gt[u + 1] = modp_montymul(ftA, mGp, p, p0i);
+ }
+ modp_iNTT2(ft, igm, logn, p, p0i);
+ modp_iNTT2(gt, igm, logn, p, p0i);
+
+ Gp = Fp + n;
+ t1 = Gp + n;
+ memmove(Fp, ft, 2 * n * sizeof * ft);
+
+ /*
+ * We now need to apply the Babai reduction. At that point,
+ * we have F and G in two n-word arrays.
+ *
+ * We can compute F*adj(f)+G*adj(g) and f*adj(f)+g*adj(g)
+ * modulo p, using the NTT. We still move memory around in
+ * order to save RAM.
+ */
+ t2 = t1 + n;
+ t3 = t2 + n;
+ t4 = t3 + n;
+ t5 = t4 + n;
+
+ /*
+ * Compute the NTT tables in t1 and t2. We do not keep t2
+ * (we'll recompute it later on).
+ */
+ modp_mkgm2(t1, t2, logn, PRIMES[0].g, p, p0i);
+
+ /*
+ * Convert F and G to NTT.
+ */
+ modp_NTT2(Fp, t1, logn, p, p0i);
+ modp_NTT2(Gp, t1, logn, p, p0i);
+
+ /*
+ * Load f and adj(f) in t4 and t5, and convert them to NTT
+ * representation.
+ */
+ t4[0] = t5[0] = modp_set(f[0], p);
+ for (u = 1; u < n; u ++) {
+ t4[u] = modp_set(f[u], p);
+ t5[n - u] = modp_set(-f[u], p);
+ }
+ modp_NTT2(t4, t1, logn, p, p0i);
+ modp_NTT2(t5, t1, logn, p, p0i);
+
+ /*
+ * Compute F*adj(f) in t2, and f*adj(f) in t3.
+ */
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = modp_montymul(t5[u], R2, p, p0i);
+ t2[u] = modp_montymul(w, Fp[u], p, p0i);
+ t3[u] = modp_montymul(w, t4[u], p, p0i);
+ }
+
+ /*
+ * Load g and adj(g) in t4 and t5, and convert them to NTT
+ * representation.
+ */
+ t4[0] = t5[0] = modp_set(g[0], p);
+ for (u = 1; u < n; u ++) {
+ t4[u] = modp_set(g[u], p);
+ t5[n - u] = modp_set(-g[u], p);
+ }
+ modp_NTT2(t4, t1, logn, p, p0i);
+ modp_NTT2(t5, t1, logn, p, p0i);
+
+ /*
+ * Add G*adj(g) to t2, and g*adj(g) to t3.
+ */
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = modp_montymul(t5[u], R2, p, p0i);
+ t2[u] = modp_add(t2[u],
+ modp_montymul(w, Gp[u], p, p0i), p);
+ t3[u] = modp_add(t3[u],
+ modp_montymul(w, t4[u], p, p0i), p);
+ }
+
+ /*
+ * Convert back t2 and t3 to normal representation (normalized
+ * around 0), and then
+ * move them to t1 and t2. We first need to recompute the
+ * inverse table for NTT.
+ */
+ modp_mkgm2(t1, t4, logn, PRIMES[0].g, p, p0i);
+ modp_iNTT2(t2, t4, logn, p, p0i);
+ modp_iNTT2(t3, t4, logn, p, p0i);
+ for (u = 0; u < n; u ++) {
+ t1[u] = (uint32_t)modp_norm(t2[u], p);
+ t2[u] = (uint32_t)modp_norm(t3[u], p);
+ }
+
+ /*
+ * At that point, array contents are:
+ *
+ * F (NTT representation) (Fp)
+ * G (NTT representation) (Gp)
+ * F*adj(f)+G*adj(g) (t1)
+ * f*adj(f)+g*adj(g) (t2)
+ *
+ * We want to divide t1 by t2. The result is not integral; it
+ * must be rounded. We thus need to use the FFT.
+ */
+
+ /*
+ * Get f*adj(f)+g*adj(g) in FFT representation. Since this
+ * polynomial is auto-adjoint, all its coordinates in FFT
+ * representation are actually real, so we can truncate off
+ * the imaginary parts.
+ */
+ rt3 = align_fpr(tmp, t3);
+ for (u = 0; u < n; u ++) {
+ rt3[u] = fpr_of(((int32_t *)t2)[u]);
+ }
+ PQCLEAN_FALCON512_CLEAN_FFT(rt3, logn);
+ rt2 = align_fpr(tmp, t2);
+ memmove(rt2, rt3, hn * sizeof * rt3);
+
+ /*
+ * Convert F*adj(f)+G*adj(g) in FFT representation.
+ */
+ rt3 = rt2 + hn;
+ for (u = 0; u < n; u ++) {
+ rt3[u] = fpr_of(((int32_t *)t1)[u]);
+ }
+ PQCLEAN_FALCON512_CLEAN_FFT(rt3, logn);
+
+ /*
+ * Compute (F*adj(f)+G*adj(g))/(f*adj(f)+g*adj(g)) and get
+ * its rounded normal representation in t1.
+ */
+ PQCLEAN_FALCON512_CLEAN_poly_div_autoadj_fft(rt3, rt2, logn);
+ PQCLEAN_FALCON512_CLEAN_iFFT(rt3, logn);
+ for (u = 0; u < n; u ++) {
+ t1[u] = modp_set((int32_t)fpr_rint(rt3[u]), p);
+ }
+
+ /*
+ * RAM contents are now:
+ *
+ * F (NTT representation) (Fp)
+ * G (NTT representation) (Gp)
+ * k (t1)
+ *
+ * We want to compute F-k*f, and G-k*g.
+ */
+ t2 = t1 + n;
+ t3 = t2 + n;
+ t4 = t3 + n;
+ t5 = t4 + n;
+ modp_mkgm2(t2, t3, logn, PRIMES[0].g, p, p0i);
+ for (u = 0; u < n; u ++) {
+ t4[u] = modp_set(f[u], p);
+ t5[u] = modp_set(g[u], p);
+ }
+ modp_NTT2(t1, t2, logn, p, p0i);
+ modp_NTT2(t4, t2, logn, p, p0i);
+ modp_NTT2(t5, t2, logn, p, p0i);
+ for (u = 0; u < n; u ++) {
+ uint32_t kw;
+
+ kw = modp_montymul(t1[u], R2, p, p0i);
+ Fp[u] = modp_sub(Fp[u],
+ modp_montymul(kw, t4[u], p, p0i), p);
+ Gp[u] = modp_sub(Gp[u],
+ modp_montymul(kw, t5[u], p, p0i), p);
+ }
+ modp_iNTT2(Fp, t3, logn, p, p0i);
+ modp_iNTT2(Gp, t3, logn, p, p0i);
+ for (u = 0; u < n; u ++) {
+ Fp[u] = (uint32_t)modp_norm(Fp[u], p);
+ Gp[u] = (uint32_t)modp_norm(Gp[u], p);
+ }
+
+ return 1;
+}
+
+/*
+ * Solve the NTRU equation. Returned value is 1 on success, 0 on error.
+ * G can be NULL, in which case that value is computed but not returned.
+ * If any of the coefficients of F and G exceeds lim (in absolute value),
+ * then 0 is returned.
+ */
+static int
+solve_NTRU(unsigned logn, int8_t *F, int8_t *G,
+ const int8_t *f, const int8_t *g, int lim, uint32_t *tmp) {
+ size_t n, u;
+ uint32_t *ft, *gt, *Ft, *Gt, *gm;
+ uint32_t p, p0i, r;
+ const small_prime *primes;
+
+ n = MKN(logn);
+
+ if (!solve_NTRU_deepest(logn, f, g, tmp)) {
+ return 0;
+ }
+
+ /*
+ * For logn <= 2, we need to use solve_NTRU_intermediate()
+ * directly, because coefficients are a bit too large and
+ * do not fit the hypotheses in solve_NTRU_binary_depth0().
+ */
+ if (logn <= 2) {
+ unsigned depth;
+
+ depth = logn;
+ while (depth -- > 0) {
+ if (!solve_NTRU_intermediate(logn, f, g, depth, tmp)) {
+ return 0;
+ }
+ }
+ } else {
+ unsigned depth;
+
+ depth = logn;
+ while (depth -- > 2) {
+ if (!solve_NTRU_intermediate(logn, f, g, depth, tmp)) {
+ return 0;
+ }
+ }
+ if (!solve_NTRU_binary_depth1(logn, f, g, tmp)) {
+ return 0;
+ }
+ if (!solve_NTRU_binary_depth0(logn, f, g, tmp)) {
+ return 0;
+ }
+ }
+
+ /*
+ * If no buffer has been provided for G, use a temporary one.
+ */
+ if (G == NULL) {
+ G = (int8_t *)(tmp + 2 * n);
+ }
+
+ /*
+ * Final F and G are in fk->tmp, one word per coefficient
+ * (signed value over 31 bits).
+ */
+ if (!poly_big_to_small(F, tmp, lim, logn)
+ || !poly_big_to_small(G, tmp + n, lim, logn)) {
+ return 0;
+ }
+
+ /*
+ * Verify that the NTRU equation is fulfilled. Since all elements
+ * have short lengths, verifying modulo a small prime p works, and
+ * allows using the NTT.
+ *
+ * We put Gt[] first in tmp[], and process it first, so that it does
+ * not overlap with G[] in case we allocated it ourselves.
+ */
+ Gt = tmp;
+ ft = Gt + n;
+ gt = ft + n;
+ Ft = gt + n;
+ gm = Ft + n;
+
+ primes = PRIMES;
+ p = primes[0].p;
+ p0i = modp_ninv31(p);
+ modp_mkgm2(gm, tmp, logn, primes[0].g, p, p0i);
+ for (u = 0; u < n; u ++) {
+ Gt[u] = modp_set(G[u], p);
+ }
+ for (u = 0; u < n; u ++) {
+ ft[u] = modp_set(f[u], p);
+ gt[u] = modp_set(g[u], p);
+ Ft[u] = modp_set(F[u], p);
+ }
+ modp_NTT2(ft, gm, logn, p, p0i);
+ modp_NTT2(gt, gm, logn, p, p0i);
+ modp_NTT2(Ft, gm, logn, p, p0i);
+ modp_NTT2(Gt, gm, logn, p, p0i);
+ r = modp_montymul(12289, 1, p, p0i);
+ for (u = 0; u < n; u ++) {
+ uint32_t z;
+
+ z = modp_sub(modp_montymul(ft[u], Gt[u], p, p0i),
+ modp_montymul(gt[u], Ft[u], p, p0i), p);
+ if (z != r) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * Generate a random polynomial with a Gaussian distribution. This function
+ * also makes sure that the resultant of the polynomial with phi is odd.
+ */
+static void
+poly_small_mkgauss(RNG_CONTEXT *rng, int8_t *f, unsigned logn) {
+ size_t n, u;
+ unsigned mod2;
+
+ n = MKN(logn);
+ mod2 = 0;
+ for (u = 0; u < n; u ++) {
+ int s;
+
+restart:
+ s = mkgauss(rng, logn);
+
+ /*
+ * We need the coefficient to fit within -127..+127;
+ * realistically, this is always the case except for
+ * the very low degrees (N = 2 or 4), for which there
+ * is no real security anyway.
+ */
+ if (s < -127 || s > 127) {
+ goto restart;
+ }
+
+ /*
+ * We need the sum of all coefficients to be 1; otherwise,
+ * the resultant of the polynomial with X^N+1 will be even,
+ * and the binary GCD will fail.
+ */
+ if (u == n - 1) {
+ if ((mod2 ^ (unsigned)(s & 1)) == 0) {
+ goto restart;
+ }
+ } else {
+ mod2 ^= (unsigned)(s & 1);
+ }
+ f[u] = (int8_t)s;
+ }
+}
+
+/* see falcon.h */
+void
+PQCLEAN_FALCON512_CLEAN_keygen(inner_shake256_context *rng,
+ int8_t *f, int8_t *g, int8_t *F, int8_t *G, uint16_t *h,
+ unsigned logn, uint8_t *tmp) {
+ /*
+ * Algorithm is the following:
+ *
+ * - Generate f and g with the Gaussian distribution.
+ *
+ * - If either Res(f,phi) or Res(g,phi) is even, try again.
+ *
+ * - If ||(f,g)|| is too large, try again.
+ *
+ * - If ||B~_{f,g}|| is too large, try again.
+ *
+ * - If f is not invertible mod phi mod q, try again.
+ *
+ * - Compute h = g/f mod phi mod q.
+ *
+ * - Solve the NTRU equation fG - gF = q; if the solving fails,
+ * try again. Usual failure condition is when Res(f,phi)
+ * and Res(g,phi) are not prime to each other.
+ */
+ size_t n, u;
+ uint16_t *h2, *tmp2;
+ RNG_CONTEXT *rc;
+
+ n = MKN(logn);
+ rc = rng;
+
+ /*
+ * We need to generate f and g randomly, until we find values
+ * such that the norm of (g,-f), and of the orthogonalized
+ * vector, are satisfying. The orthogonalized vector is:
+ * (q*adj(f)/(f*adj(f)+g*adj(g)), q*adj(g)/(f*adj(f)+g*adj(g)))
+ * (it is actually the (N+1)-th row of the Gram-Schmidt basis).
+ *
+ * In the binary case, coefficients of f and g are generated
+ * independently of each other, with a discrete Gaussian
+ * distribution of standard deviation 1.17*sqrt(q/(2*N)). Then,
+ * the two vectors have expected norm 1.17*sqrt(q), which is
+ * also our acceptance bound: we require both vectors to be no
+ * larger than that (this will be satisfied about 1/4th of the
+ * time, thus we expect sampling new (f,g) about 4 times for that
+ * step).
+ *
+ * We require that Res(f,phi) and Res(g,phi) are both odd (the
+ * NTRU equation solver requires it).
+ */
+ for (;;) {
+ fpr *rt1, *rt2, *rt3;
+ fpr bnorm;
+ uint32_t normf, normg, norm;
+ int lim;
+
+ /*
+ * The poly_small_mkgauss() function makes sure
+ * that the sum of coefficients is 1 modulo 2
+ * (i.e. the resultant of the polynomial with phi
+ * will be odd).
+ */
+ poly_small_mkgauss(rc, f, logn);
+ poly_small_mkgauss(rc, g, logn);
+
+ /*
+ * Verify that all coefficients are within the bounds
+ * defined in max_fg_bits. This is the case with
+ * overwhelming probability; this guarantees that the
+ * key will be encodable with FALCON_COMP_TRIM.
+ */
+ lim = 1 << (PQCLEAN_FALCON512_CLEAN_max_fg_bits[logn] - 1);
+ for (u = 0; u < n; u ++) {
+ /*
+ * We can use non-CT tests since on any failure
+ * we will discard f and g.
+ */
+ if (f[u] >= lim || f[u] <= -lim
+ || g[u] >= lim || g[u] <= -lim) {
+ lim = -1;
+ break;
+ }
+ }
+ if (lim < 0) {
+ continue;
+ }
+
+ /*
+ * Bound is 1.17*sqrt(q). We compute the squared
+ * norms. With q = 12289, the squared bound is:
+ * (1.17^2)* 12289 = 16822.4121
+ * Since f and g are integral, the squared norm
+ * of (g,-f) is an integer.
+ */
+ normf = poly_small_sqnorm(f, logn);
+ normg = poly_small_sqnorm(g, logn);
+ norm = (normf + normg) | -((normf | normg) >> 31);
+ if (norm >= 16823) {
+ continue;
+ }
+
+ /*
+ * We compute the orthogonalized vector norm.
+ */
+ rt1 = (fpr *)tmp;
+ rt2 = rt1 + n;
+ rt3 = rt2 + n;
+ poly_small_to_fp(rt1, f, logn);
+ poly_small_to_fp(rt2, g, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rt1, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rt2, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_invnorm2_fft(rt3, rt1, rt2, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_adj_fft(rt1, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_adj_fft(rt2, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mulconst(rt1, fpr_q, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mulconst(rt2, fpr_q, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_autoadj_fft(rt1, rt3, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_autoadj_fft(rt2, rt3, logn);
+ PQCLEAN_FALCON512_CLEAN_iFFT(rt1, logn);
+ PQCLEAN_FALCON512_CLEAN_iFFT(rt2, logn);
+ bnorm = fpr_zero;
+ for (u = 0; u < n; u ++) {
+ bnorm = fpr_add(bnorm, fpr_sqr(rt1[u]));
+ bnorm = fpr_add(bnorm, fpr_sqr(rt2[u]));
+ }
+ if (!fpr_lt(bnorm, fpr_bnorm_max)) {
+ continue;
+ }
+
+ /*
+ * Compute public key h = g/f mod X^N+1 mod q. If this
+ * fails, we must restart.
+ */
+ if (h == NULL) {
+ h2 = (uint16_t *)tmp;
+ tmp2 = h2 + n;
+ } else {
+ h2 = h;
+ tmp2 = (uint16_t *)tmp;
+ }
+ if (!PQCLEAN_FALCON512_CLEAN_compute_public(h2, f, g, logn, (uint8_t *)tmp2)) {
+ continue;
+ }
+
+ /*
+ * Solve the NTRU equation to get F and G.
+ */
+ lim = (1 << (PQCLEAN_FALCON512_CLEAN_max_FG_bits[logn] - 1)) - 1;
+ if (!solve_NTRU(logn, F, G, f, g, lim, (uint32_t *)tmp)) {
+ continue;
+ }
+
+ /*
+ * Key pair is generated.
+ */
+ break;
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/manifest.mn b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/manifest.mn
new file mode 100644
index 000000000..979f06cc6
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/manifest.mn
@@ -0,0 +1,32 @@
+# DO NOT EDIT: generated from manifest.mn.subdirs.template
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+CORE_DEPTH = ../../../../../..
+
+MODULE = oqs
+
+LIBRARY_NAME = oqs_src_sig_falcon_pqclean_falcon-512_clean
+SHARED_LIBRARY = $(NULL)
+
+CSRCS = \
+ codec.c \
+ common.c \
+ fft.c \
+ fpr.c \
+ inner.c \
+ keygen.c \
+ pqclean.c \
+ rng.c \
+ sign.c \
+ vrfy.c \
+ $(NULL)
+
+# only add module debugging in opt builds if DEBUG_PKCS11 is set
+ifdef DEBUG_PKCS11
+ DEFINES += -DDEBUG_MODULE
+endif
+
+# This part of the code, including all sub-dirs, can be optimized for size
+export ALLOW_OPT_CODE_SIZE = 1
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/pqclean.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/pqclean.c
new file mode 100644
index 000000000..3abf68149
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/pqclean.c
@@ -0,0 +1,384 @@
+#include "api.h"
+#include "inner.h"
+#include "randombytes.h"
+#include <stddef.h>
+#include <string.h>
+/*
+ * Wrapper for implementing the PQClean API.
+ */
+
+
+
+#define NONCELEN 40
+#define SEEDLEN 48
+
+/*
+ * Encoding formats (nnnn = log of degree, 9 for Falcon-512, 10 for Falcon-1024)
+ *
+ * private key:
+ * header byte: 0101nnnn
+ * private f (6 or 5 bits by element, depending on degree)
+ * private g (6 or 5 bits by element, depending on degree)
+ * private F (8 bits by element)
+ *
+ * public key:
+ * header byte: 0000nnnn
+ * public h (14 bits by element)
+ *
+ * signature:
+ * header byte: 0011nnnn
+ * nonce 40 bytes
+ * value (12 bits by element)
+ *
+ * message + signature:
+ * signature length (2 bytes, big-endian)
+ * nonce 40 bytes
+ * message
+ * header byte: 0010nnnn
+ * value (12 bits by element)
+ * (signature length is 1+len(value), not counting the nonce)
+ */
+
+/* see api.h */
+int
+PQCLEAN_FALCON512_CLEAN_crypto_sign_keypair(unsigned char *pk, unsigned char *sk) {
+ union {
+ uint8_t b[FALCON_KEYGEN_TEMP_9];
+ uint64_t dummy_u64;
+ fpr dummy_fpr;
+ } tmp;
+ int8_t f[512], g[512], F[512];
+ uint16_t h[512];
+ unsigned char seed[SEEDLEN];
+ inner_shake256_context rng;
+ size_t u, v;
+
+ /*
+ * Generate key pair.
+ */
+ randombytes(seed, sizeof seed);
+ inner_shake256_init(&rng);
+ inner_shake256_inject(&rng, seed, sizeof seed);
+ inner_shake256_flip(&rng);
+ PQCLEAN_FALCON512_CLEAN_keygen(&rng, f, g, F, NULL, h, 9, tmp.b);
+ inner_shake256_ctx_release(&rng);
+
+ /*
+ * Encode private key.
+ */
+ sk[0] = 0x50 + 9;
+ u = 1;
+ v = PQCLEAN_FALCON512_CLEAN_trim_i8_encode(
+ sk + u, PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES - u,
+ f, 9, PQCLEAN_FALCON512_CLEAN_max_fg_bits[9]);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ v = PQCLEAN_FALCON512_CLEAN_trim_i8_encode(
+ sk + u, PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES - u,
+ g, 9, PQCLEAN_FALCON512_CLEAN_max_fg_bits[9]);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ v = PQCLEAN_FALCON512_CLEAN_trim_i8_encode(
+ sk + u, PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES - u,
+ F, 9, PQCLEAN_FALCON512_CLEAN_max_FG_bits[9]);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ if (u != PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES) {
+ return -1;
+ }
+
+ /*
+ * Encode public key.
+ */
+ pk[0] = 0x00 + 9;
+ v = PQCLEAN_FALCON512_CLEAN_modq_encode(
+ pk + 1, PQCLEAN_FALCON512_CLEAN_CRYPTO_PUBLICKEYBYTES - 1,
+ h, 9);
+ if (v != PQCLEAN_FALCON512_CLEAN_CRYPTO_PUBLICKEYBYTES - 1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Compute the signature. nonce[] receives the nonce and must have length
+ * NONCELEN bytes. sigbuf[] receives the signature value (without nonce
+ * or header byte), with *sigbuflen providing the maximum value length and
+ * receiving the actual value length.
+ *
+ * If a signature could be computed but not encoded because it would
+ * exceed the output buffer size, then a new signature is computed. If
+ * the provided buffer size is too low, this could loop indefinitely, so
+ * the caller must provide a size that can accommodate signatures with a
+ * large enough probability.
+ *
+ * Return value: 0 on success, -1 on error.
+ */
+static int
+do_sign(uint8_t *nonce, uint8_t *sigbuf, size_t *sigbuflen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk) {
+ union {
+ uint8_t b[72 * 512];
+ uint64_t dummy_u64;
+ fpr dummy_fpr;
+ } tmp;
+ int8_t f[512], g[512], F[512], G[512];
+ union {
+ int16_t sig[512];
+ uint16_t hm[512];
+ } r;
+ unsigned char seed[SEEDLEN];
+ inner_shake256_context sc;
+ size_t u, v;
+
+ /*
+ * Decode the private key.
+ */
+ if (sk[0] != 0x50 + 9) {
+ return -1;
+ }
+ u = 1;
+ v = PQCLEAN_FALCON512_CLEAN_trim_i8_decode(
+ f, 9, PQCLEAN_FALCON512_CLEAN_max_fg_bits[9],
+ sk + u, PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES - u);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ v = PQCLEAN_FALCON512_CLEAN_trim_i8_decode(
+ g, 9, PQCLEAN_FALCON512_CLEAN_max_fg_bits[9],
+ sk + u, PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES - u);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ v = PQCLEAN_FALCON512_CLEAN_trim_i8_decode(
+ F, 9, PQCLEAN_FALCON512_CLEAN_max_FG_bits[9],
+ sk + u, PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES - u);
+ if (v == 0) {
+ return -1;
+ }
+ u += v;
+ if (u != PQCLEAN_FALCON512_CLEAN_CRYPTO_SECRETKEYBYTES) {
+ return -1;
+ }
+ if (!PQCLEAN_FALCON512_CLEAN_complete_private(G, f, g, F, 9, tmp.b)) {
+ return -1;
+ }
+
+ /*
+ * Create a random nonce (40 bytes).
+ */
+ randombytes(nonce, NONCELEN);
+
+ /*
+ * Hash message nonce + message into a vector.
+ */
+ inner_shake256_init(&sc);
+ inner_shake256_inject(&sc, nonce, NONCELEN);
+ inner_shake256_inject(&sc, m, mlen);
+ inner_shake256_flip(&sc);
+ PQCLEAN_FALCON512_CLEAN_hash_to_point_ct(&sc, r.hm, 9, tmp.b);
+ inner_shake256_ctx_release(&sc);
+
+ /*
+ * Initialize a RNG.
+ */
+ randombytes(seed, sizeof seed);
+ inner_shake256_init(&sc);
+ inner_shake256_inject(&sc, seed, sizeof seed);
+ inner_shake256_flip(&sc);
+
+ /*
+ * Compute and return the signature. This loops until a signature
+ * value is found that fits in the provided buffer.
+ */
+ for (;;) {
+ PQCLEAN_FALCON512_CLEAN_sign_dyn(r.sig, &sc, f, g, F, G, r.hm, 9, tmp.b);
+ v = PQCLEAN_FALCON512_CLEAN_comp_encode(sigbuf, *sigbuflen, r.sig, 9);
+ if (v != 0) {
+ inner_shake256_ctx_release(&sc);
+ *sigbuflen = v;
+ return 0;
+ }
+ }
+}
+
+/*
+ * Verify a sigature. The nonce has size NONCELEN bytes. sigbuf[]
+ * (of size sigbuflen) contains the signature value, not including the
+ * header byte or nonce. Return value is 0 on success, -1 on error.
+ */
+static int
+do_verify(
+ const uint8_t *nonce, const uint8_t *sigbuf, size_t sigbuflen,
+ const uint8_t *m, size_t mlen, const uint8_t *pk) {
+ union {
+ uint8_t b[2 * 512];
+ uint64_t dummy_u64;
+ fpr dummy_fpr;
+ } tmp;
+ uint16_t h[512], hm[512];
+ int16_t sig[512];
+ inner_shake256_context sc;
+
+ /*
+ * Decode public key.
+ */
+ if (pk[0] != 0x00 + 9) {
+ return -1;
+ }
+ if (PQCLEAN_FALCON512_CLEAN_modq_decode(h, 9,
+ pk + 1, PQCLEAN_FALCON512_CLEAN_CRYPTO_PUBLICKEYBYTES - 1)
+ != PQCLEAN_FALCON512_CLEAN_CRYPTO_PUBLICKEYBYTES - 1) {
+ return -1;
+ }
+ PQCLEAN_FALCON512_CLEAN_to_ntt_monty(h, 9);
+
+ /*
+ * Decode signature.
+ */
+ if (sigbuflen == 0) {
+ return -1;
+ }
+ if (PQCLEAN_FALCON512_CLEAN_comp_decode(sig, 9, sigbuf, sigbuflen) != sigbuflen) {
+ return -1;
+ }
+
+ /*
+ * Hash nonce + message into a vector.
+ */
+ inner_shake256_init(&sc);
+ inner_shake256_inject(&sc, nonce, NONCELEN);
+ inner_shake256_inject(&sc, m, mlen);
+ inner_shake256_flip(&sc);
+ PQCLEAN_FALCON512_CLEAN_hash_to_point_ct(&sc, hm, 9, tmp.b);
+ inner_shake256_ctx_release(&sc);
+
+ /*
+ * Verify signature.
+ */
+ if (!PQCLEAN_FALCON512_CLEAN_verify_raw(hm, sig, h, 9, tmp.b)) {
+ return -1;
+ }
+ return 0;
+}
+
+/* see api.h */
+int
+PQCLEAN_FALCON512_CLEAN_crypto_sign_signature(
+ uint8_t *sig, size_t *siglen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk) {
+ /*
+ * The PQCLEAN_FALCON512_CLEAN_CRYPTO_BYTES constant is used for
+ * the signed message object (as produced by PQCLEAN_FALCON512_CLEAN_crypto_sign())
+ * and includes a two-byte length value, so we take care here
+ * to only generate signatures that are two bytes shorter than
+ * the maximum. This is done to ensure that PQCLEAN_FALCON512_CLEAN_crypto_sign()
+ * and PQCLEAN_FALCON512_CLEAN_crypto_sign_signature() produce the exact same signature
+ * value, if used on the same message, with the same private key,
+ * and using the same output from randombytes() (this is for
+ * reproducibility of tests).
+ */
+ size_t vlen;
+
+ vlen = PQCLEAN_FALCON512_CLEAN_CRYPTO_BYTES - NONCELEN - 3;
+ if (do_sign(sig + 1, sig + 1 + NONCELEN, &vlen, m, mlen, sk) < 0) {
+ return -1;
+ }
+ sig[0] = 0x30 + 9;
+ *siglen = 1 + NONCELEN + vlen;
+ return 0;
+}
+
+/* see api.h */
+int
+PQCLEAN_FALCON512_CLEAN_crypto_sign_verify(
+ const uint8_t *sig, size_t siglen,
+ const uint8_t *m, size_t mlen, const uint8_t *pk) {
+ if (siglen < 1 + NONCELEN) {
+ return -1;
+ }
+ if (sig[0] != 0x30 + 9) {
+ return -1;
+ }
+ return do_verify(sig + 1,
+ sig + 1 + NONCELEN, siglen - 1 - NONCELEN, m, mlen, pk);
+}
+
+/* see api.h */
+int
+PQCLEAN_FALCON512_CLEAN_crypto_sign(
+ uint8_t *sm, size_t *smlen,
+ const uint8_t *m, size_t mlen, const uint8_t *sk) {
+ uint8_t *pm, *sigbuf;
+ size_t sigbuflen;
+
+ /*
+ * Move the message to its final location; this is a memmove() so
+ * it handles overlaps properly.
+ */
+ memmove(sm + 2 + NONCELEN, m, mlen);
+ pm = sm + 2 + NONCELEN;
+ sigbuf = pm + 1 + mlen;
+ sigbuflen = PQCLEAN_FALCON512_CLEAN_CRYPTO_BYTES - NONCELEN - 3;
+ if (do_sign(sm + 2, sigbuf, &sigbuflen, pm, mlen, sk) < 0) {
+ return -1;
+ }
+ pm[mlen] = 0x20 + 9;
+ sigbuflen ++;
+ sm[0] = (uint8_t)(sigbuflen >> 8);
+ sm[1] = (uint8_t)sigbuflen;
+ *smlen = mlen + 2 + NONCELEN + sigbuflen;
+ return 0;
+}
+
+/* see api.h */
+int
+PQCLEAN_FALCON512_CLEAN_crypto_sign_open(
+ uint8_t *m, size_t *mlen,
+ const uint8_t *sm, size_t smlen, const uint8_t *pk) {
+ const uint8_t *sigbuf;
+ size_t pmlen, sigbuflen;
+
+ if (smlen < 3 + NONCELEN) {
+ return -1;
+ }
+ sigbuflen = ((size_t)sm[0] << 8) | (size_t)sm[1];
+ if (sigbuflen < 2 || sigbuflen > (smlen - NONCELEN - 2)) {
+ return -1;
+ }
+ sigbuflen --;
+ pmlen = smlen - NONCELEN - 3 - sigbuflen;
+ if (sm[2 + NONCELEN + pmlen] != 0x20 + 9) {
+ return -1;
+ }
+ sigbuf = sm + 2 + NONCELEN + pmlen + 1;
+
+ /*
+ * The 2-byte length header and the one-byte signature header
+ * have been verified. Nonce is at sm+2, followed by the message
+ * itself. Message length is in pmlen. sigbuf/sigbuflen point to
+ * the signature value (excluding the header byte).
+ */
+ if (do_verify(sm + 2, sigbuf, sigbuflen,
+ sm + 2 + NONCELEN, pmlen, pk) < 0) {
+ return -1;
+ }
+
+ /*
+ * Signature is correct, we just have to copy/move the message
+ * to its final destination. The memmove() properly handles
+ * overlaps.
+ */
+ memmove(m, sm + 2 + NONCELEN, pmlen);
+ *mlen = pmlen;
+ return 0;
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/pqclean_falcon-512_clean.gyp b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/pqclean_falcon-512_clean.gyp
new file mode 100644
index 000000000..ec88d54ad
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/pqclean_falcon-512_clean.gyp
@@ -0,0 +1,48 @@
+# DO NOT EDIT: generated from subdir.gyp.template
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+{
+ 'includes': [
+ '../../../../../../coreconf/config.gypi'
+ ],
+ 'targets': [
+ {
+ 'target_name': 'oqs_src_sig_falcon_pqclean_falcon-512_clean',
+ 'type': 'static_library',
+ 'sources': [
+ 'codec.c',
+ 'common.c',
+ 'fft.c',
+ 'fpr.c',
+ 'inner.c',
+ 'keygen.c',
+ 'pqclean.c',
+ 'rng.c',
+ 'sign.c',
+ 'vrfy.c',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/exports.gyp:nss_exports'
+ ]
+ }
+ ],
+ 'target_defaults': {
+ 'defines': [
+ ],
+ 'include_dirs': [
+ '<(DEPTH)/lib/liboqs/src/common/pqclean_shims',
+ '<(DEPTH)/lib/liboqs/src/common/sha3/xkcp_low/KeccakP-1600/plain-64bits',
+ ],
+ [ 'OS=="mac"', {
+ 'defines': [
+ 'OQS_HAVE_POSIX_MEMALIGN',
+ 'OQS_HAVE_ALIGNED_ALLOC',
+ 'OQS_HAVE_MEMALIGN'
+ ]
+ }]
+ },
+ 'variables': {
+ 'module': 'oqs'
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/rng.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/rng.c
new file mode 100644
index 000000000..266db7572
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/rng.c
@@ -0,0 +1,201 @@
+#include "inner.h"
+#include <assert.h>
+/*
+ * PRNG and interface to the system RNG.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+
+/*
+ * Include relevant system header files. For Win32, this will also need
+ * linking with advapi32.dll, which we trigger with an appropriate #pragma.
+ */
+
+/* see inner.h */
+int
+PQCLEAN_FALCON512_CLEAN_get_seed(void *seed, size_t len) {
+ (void)seed;
+ if (len == 0) {
+ return 1;
+ }
+ return 0;
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_prng_init(prng *p, inner_shake256_context *src) {
+ /*
+ * To ensure reproducibility for a given seed, we
+ * must enforce little-endian interpretation of
+ * the state words.
+ */
+ uint8_t tmp[56];
+ uint64_t th, tl;
+ int i;
+
+ inner_shake256_extract(src, tmp, 56);
+ for (i = 0; i < 14; i ++) {
+ uint32_t w;
+
+ w = (uint32_t)tmp[(i << 2) + 0]
+ | ((uint32_t)tmp[(i << 2) + 1] << 8)
+ | ((uint32_t)tmp[(i << 2) + 2] << 16)
+ | ((uint32_t)tmp[(i << 2) + 3] << 24);
+ *(uint32_t *)(p->state.d + (i << 2)) = w;
+ }
+ tl = *(uint32_t *)(p->state.d + 48);
+ th = *(uint32_t *)(p->state.d + 52);
+ *(uint64_t *)(p->state.d + 48) = tl + (th << 32);
+ PQCLEAN_FALCON512_CLEAN_prng_refill(p);
+}
+
+/*
+ * PRNG based on ChaCha20.
+ *
+ * State consists in key (32 bytes) then IV (16 bytes) and block counter
+ * (8 bytes). Normally, we should not care about local endianness (this
+ * is for a PRNG), but for the NIST competition we need reproducible KAT
+ * vectors that work across architectures, so we enforce little-endian
+ * interpretation where applicable. Moreover, output words are "spread
+ * out" over the output buffer with the interleaving pattern that is
+ * naturally obtained from the AVX2 implementation that runs eight
+ * ChaCha20 instances in parallel.
+ *
+ * The block counter is XORed into the first 8 bytes of the IV.
+ */
+void
+PQCLEAN_FALCON512_CLEAN_prng_refill(prng *p) {
+
+ static const uint32_t CW[] = {
+ 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574
+ };
+
+ uint64_t cc;
+ size_t u;
+
+ /*
+ * State uses local endianness. Only the output bytes must be
+ * converted to little endian (if used on a big-endian machine).
+ */
+ cc = *(uint64_t *)(p->state.d + 48);
+ for (u = 0; u < 8; u ++) {
+ uint32_t state[16];
+ size_t v;
+ int i;
+
+ memcpy(&state[0], CW, sizeof CW);
+ memcpy(&state[4], p->state.d, 48);
+ state[14] ^= (uint32_t)cc;
+ state[15] ^= (uint32_t)(cc >> 32);
+ for (i = 0; i < 10; i ++) {
+
+#define QROUND(a, b, c, d) do { \
+ state[a] += state[b]; \
+ state[d] ^= state[a]; \
+ state[d] = (state[d] << 16) | (state[d] >> 16); \
+ state[c] += state[d]; \
+ state[b] ^= state[c]; \
+ state[b] = (state[b] << 12) | (state[b] >> 20); \
+ state[a] += state[b]; \
+ state[d] ^= state[a]; \
+ state[d] = (state[d] << 8) | (state[d] >> 24); \
+ state[c] += state[d]; \
+ state[b] ^= state[c]; \
+ state[b] = (state[b] << 7) | (state[b] >> 25); \
+ } while (0)
+
+ QROUND( 0, 4, 8, 12);
+ QROUND( 1, 5, 9, 13);
+ QROUND( 2, 6, 10, 14);
+ QROUND( 3, 7, 11, 15);
+ QROUND( 0, 5, 10, 15);
+ QROUND( 1, 6, 11, 12);
+ QROUND( 2, 7, 8, 13);
+ QROUND( 3, 4, 9, 14);
+
+#undef QROUND
+
+ }
+
+ for (v = 0; v < 4; v ++) {
+ state[v] += CW[v];
+ }
+ for (v = 4; v < 14; v ++) {
+ state[v] += ((uint32_t *)p->state.d)[v - 4];
+ }
+ state[14] += ((uint32_t *)p->state.d)[10]
+ ^ (uint32_t)cc;
+ state[15] += ((uint32_t *)p->state.d)[11]
+ ^ (uint32_t)(cc >> 32);
+ cc ++;
+
+ /*
+ * We mimic the interleaving that is used in the AVX2
+ * implementation.
+ */
+ for (v = 0; v < 16; v ++) {
+ p->buf.d[(u << 2) + (v << 5) + 0] =
+ (uint8_t)state[v];
+ p->buf.d[(u << 2) + (v << 5) + 1] =
+ (uint8_t)(state[v] >> 8);
+ p->buf.d[(u << 2) + (v << 5) + 2] =
+ (uint8_t)(state[v] >> 16);
+ p->buf.d[(u << 2) + (v << 5) + 3] =
+ (uint8_t)(state[v] >> 24);
+ }
+ }
+ *(uint64_t *)(p->state.d + 48) = cc;
+
+
+ p->ptr = 0;
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_prng_get_bytes(prng *p, void *dst, size_t len) {
+ uint8_t *buf;
+
+ buf = dst;
+ while (len > 0) {
+ size_t clen;
+
+ clen = (sizeof p->buf.d) - p->ptr;
+ if (clen > len) {
+ clen = len;
+ }
+ memcpy(buf, p->buf.d, clen);
+ buf += clen;
+ len -= clen;
+ p->ptr += clen;
+ if (p->ptr == sizeof p->buf.d) {
+ PQCLEAN_FALCON512_CLEAN_prng_refill(p);
+ }
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/sign.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/sign.c
new file mode 100644
index 000000000..469ae3b42
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/sign.c
@@ -0,0 +1,1254 @@
+#include "inner.h"
+
+/*
+ * Falcon signature generation.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* =================================================================== */
+
+/*
+ * Compute degree N from logarithm 'logn'.
+ */
+#define MKN(logn) ((size_t)1 << (logn))
+
+/* =================================================================== */
+/*
+ * Binary case:
+ * N = 2^logn
+ * phi = X^N+1
+ */
+
+/*
+ * Get the size of the LDL tree for an input with polynomials of size
+ * 2^logn. The size is expressed in the number of elements.
+ */
+static inline unsigned
+ffLDL_treesize(unsigned logn) {
+ /*
+ * For logn = 0 (polynomials are constant), the "tree" is a
+ * single element. Otherwise, the tree node has size 2^logn, and
+ * has two child trees for size logn-1 each. Thus, treesize s()
+ * must fulfill these two relations:
+ *
+ * s(0) = 1
+ * s(logn) = (2^logn) + 2*s(logn-1)
+ */
+ return (logn + 1) << logn;
+}
+
+/*
+ * Inner function for ffLDL_fft(). It expects the matrix to be both
+ * auto-adjoint and quasicyclic; also, it uses the source operands
+ * as modifiable temporaries.
+ *
+ * tmp[] must have room for at least one polynomial.
+ */
+static void
+ffLDL_fft_inner(fpr *tree,
+ fpr *g0, fpr *g1, unsigned logn, fpr *tmp) {
+ size_t n, hn;
+
+ n = MKN(logn);
+ if (n == 1) {
+ tree[0] = g0[0];
+ return;
+ }
+ hn = n >> 1;
+
+ /*
+ * The LDL decomposition yields L (which is written in the tree)
+ * and the diagonal of D. Since d00 = g0, we just write d11
+ * into tmp.
+ */
+ PQCLEAN_FALCON512_CLEAN_poly_LDLmv_fft(tmp, tree, g0, g1, g0, logn);
+
+ /*
+ * Split d00 (currently in g0) and d11 (currently in tmp). We
+ * reuse g0 and g1 as temporary storage spaces:
+ * d00 splits into g1, g1+hn
+ * d11 splits into g0, g0+hn
+ */
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(g1, g1 + hn, g0, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(g0, g0 + hn, tmp, logn);
+
+ /*
+ * Each split result is the first row of a new auto-adjoint
+ * quasicyclic matrix for the next recursive step.
+ */
+ ffLDL_fft_inner(tree + n,
+ g1, g1 + hn, logn - 1, tmp);
+ ffLDL_fft_inner(tree + n + ffLDL_treesize(logn - 1),
+ g0, g0 + hn, logn - 1, tmp);
+}
+
+/*
+ * Compute the ffLDL tree of an auto-adjoint matrix G. The matrix
+ * is provided as three polynomials (FFT representation).
+ *
+ * The "tree" array is filled with the computed tree, of size
+ * (logn+1)*(2^logn) elements (see ffLDL_treesize()).
+ *
+ * Input arrays MUST NOT overlap, except possibly the three unmodified
+ * arrays g00, g01 and g11. tmp[] should have room for at least three
+ * polynomials of 2^logn elements each.
+ */
+static void
+ffLDL_fft(fpr *tree, const fpr *g00,
+ const fpr *g01, const fpr *g11,
+ unsigned logn, fpr *tmp) {
+ size_t n, hn;
+ fpr *d00, *d11;
+
+ n = MKN(logn);
+ if (n == 1) {
+ tree[0] = g00[0];
+ return;
+ }
+ hn = n >> 1;
+ d00 = tmp;
+ d11 = tmp + n;
+ tmp += n << 1;
+
+ memcpy(d00, g00, n * sizeof * g00);
+ PQCLEAN_FALCON512_CLEAN_poly_LDLmv_fft(d11, tree, g00, g01, g11, logn);
+
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(tmp, tmp + hn, d00, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(d00, d00 + hn, d11, logn);
+ memcpy(d11, tmp, n * sizeof * tmp);
+ ffLDL_fft_inner(tree + n,
+ d11, d11 + hn, logn - 1, tmp);
+ ffLDL_fft_inner(tree + n + ffLDL_treesize(logn - 1),
+ d00, d00 + hn, logn - 1, tmp);
+}
+
+/*
+ * Normalize an ffLDL tree: each leaf of value x is replaced with
+ * sigma / sqrt(x).
+ */
+static void
+ffLDL_binary_normalize(fpr *tree, unsigned logn) {
+ /*
+ * TODO: make an iterative version.
+ */
+ size_t n;
+
+ n = MKN(logn);
+ if (n == 1) {
+ /*
+ * We actually store in the tree leaf the inverse of
+ * the value mandated by the specification: this
+ * saves a division both here and in the sampler.
+ */
+ tree[0] = fpr_mul(fpr_sqrt(tree[0]), fpr_inv_sigma);
+ } else {
+ ffLDL_binary_normalize(tree + n, logn - 1);
+ ffLDL_binary_normalize(tree + n + ffLDL_treesize(logn - 1),
+ logn - 1);
+ }
+}
+
+/* =================================================================== */
+
+/*
+ * Convert an integer polynomial (with small values) into the
+ * representation with complex numbers.
+ */
+static void
+smallints_to_fpr(fpr *r, const int8_t *t, unsigned logn) {
+ size_t n, u;
+
+ n = MKN(logn);
+ for (u = 0; u < n; u ++) {
+ r[u] = fpr_of(t[u]);
+ }
+}
+
+/*
+ * The expanded private key contains:
+ * - The B0 matrix (four elements)
+ * - The ffLDL tree
+ */
+
+static inline size_t
+skoff_b00(unsigned logn) {
+ (void)logn;
+ return 0;
+}
+
+static inline size_t
+skoff_b01(unsigned logn) {
+ return MKN(logn);
+}
+
+static inline size_t
+skoff_b10(unsigned logn) {
+ return 2 * MKN(logn);
+}
+
+static inline size_t
+skoff_b11(unsigned logn) {
+ return 3 * MKN(logn);
+}
+
+static inline size_t
+skoff_tree(unsigned logn) {
+ return 4 * MKN(logn);
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_expand_privkey(fpr *expanded_key,
+ const int8_t *f, const int8_t *g,
+ const int8_t *F, const int8_t *G,
+ unsigned logn, uint8_t *tmp) {
+ size_t n;
+ fpr *rf, *rg, *rF, *rG;
+ fpr *b00, *b01, *b10, *b11;
+ fpr *g00, *g01, *g11, *gxx;
+ fpr *tree;
+
+ n = MKN(logn);
+ b00 = expanded_key + skoff_b00(logn);
+ b01 = expanded_key + skoff_b01(logn);
+ b10 = expanded_key + skoff_b10(logn);
+ b11 = expanded_key + skoff_b11(logn);
+ tree = expanded_key + skoff_tree(logn);
+
+ /*
+ * We load the private key elements directly into the B0 matrix,
+ * since B0 = [[g, -f], [G, -F]].
+ */
+ rf = b01;
+ rg = b00;
+ rF = b11;
+ rG = b10;
+
+ smallints_to_fpr(rf, f, logn);
+ smallints_to_fpr(rg, g, logn);
+ smallints_to_fpr(rF, F, logn);
+ smallints_to_fpr(rG, G, logn);
+
+ /*
+ * Compute the FFT for the key elements, and negate f and F.
+ */
+ PQCLEAN_FALCON512_CLEAN_FFT(rf, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rg, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rF, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(rG, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_neg(rf, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_neg(rF, logn);
+
+ /*
+ * The Gram matrix is G = B x B*. Formulas are:
+ * g00 = b00*adj(b00) + b01*adj(b01)
+ * g01 = b00*adj(b10) + b01*adj(b11)
+ * g10 = b10*adj(b00) + b11*adj(b01)
+ * g11 = b10*adj(b10) + b11*adj(b11)
+ *
+ * For historical reasons, this implementation uses
+ * g00, g01 and g11 (upper triangle).
+ */
+ g00 = (fpr *)tmp;
+ g01 = g00 + n;
+ g11 = g01 + n;
+ gxx = g11 + n;
+
+ memcpy(g00, b00, n * sizeof * b00);
+ PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(g00, logn);
+ memcpy(gxx, b01, n * sizeof * b01);
+ PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(gxx, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(g00, gxx, logn);
+
+ memcpy(g01, b00, n * sizeof * b00);
+ PQCLEAN_FALCON512_CLEAN_poly_muladj_fft(g01, b10, logn);
+ memcpy(gxx, b01, n * sizeof * b01);
+ PQCLEAN_FALCON512_CLEAN_poly_muladj_fft(gxx, b11, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(g01, gxx, logn);
+
+ memcpy(g11, b10, n * sizeof * b10);
+ PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(g11, logn);
+ memcpy(gxx, b11, n * sizeof * b11);
+ PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(gxx, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(g11, gxx, logn);
+
+ /*
+ * Compute the Falcon tree.
+ */
+ ffLDL_fft(tree, g00, g01, g11, logn, gxx);
+
+ /*
+ * Normalize tree.
+ */
+ ffLDL_binary_normalize(tree, logn);
+}
+
+typedef int (*samplerZ)(void *ctx, fpr mu, fpr sigma);
+
+/*
+ * Perform Fast Fourier Sampling for target vector t. The Gram matrix
+ * is provided (G = [[g00, g01], [adj(g01), g11]]). The sampled vector
+ * is written over (t0,t1). The Gram matrix is modified as well. The
+ * tmp[] buffer must have room for four polynomials.
+ */
+static void
+ffSampling_fft_dyntree(samplerZ samp, void *samp_ctx,
+ fpr *t0, fpr *t1,
+ fpr *g00, fpr *g01, fpr *g11,
+ unsigned logn, fpr *tmp) {
+ size_t n, hn;
+ fpr *z0, *z1;
+
+ /*
+ * Deepest level: the LDL tree leaf value is just g00 (the
+ * array has length only 1 at this point); we normalize it
+ * with regards to sigma, then use it for sampling.
+ */
+ if (logn == 0) {
+ fpr leaf;
+
+ leaf = g00[0];
+ leaf = fpr_mul(fpr_sqrt(leaf), fpr_inv_sigma);
+ t0[0] = fpr_of(samp(samp_ctx, t0[0], leaf));
+ t1[0] = fpr_of(samp(samp_ctx, t1[0], leaf));
+ return;
+ }
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+
+ /*
+ * Decompose G into LDL. We only need d00 (identical to g00),
+ * d11, and l10; we do that in place.
+ */
+ PQCLEAN_FALCON512_CLEAN_poly_LDL_fft(g00, g01, g11, logn);
+
+ /*
+ * Split d00 and d11 and expand them into half-size quasi-cyclic
+ * Gram matrices. We also save l10 in tmp[].
+ */
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(tmp, tmp + hn, g00, logn);
+ memcpy(g00, tmp, n * sizeof * tmp);
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(tmp, tmp + hn, g11, logn);
+ memcpy(g11, tmp, n * sizeof * tmp);
+ memcpy(tmp, g01, n * sizeof * g01);
+ memcpy(g01, g00, hn * sizeof * g00);
+ memcpy(g01 + hn, g11, hn * sizeof * g00);
+
+ /*
+ * The half-size Gram matrices for the recursive LDL tree
+ * building are now:
+ * - left sub-tree: g00, g00+hn, g01
+ * - right sub-tree: g11, g11+hn, g01+hn
+ * l10 is in tmp[].
+ */
+
+ /*
+ * We split t1 and use the first recursive call on the two
+ * halves, using the right sub-tree. The result is merged
+ * back into tmp + 2*n.
+ */
+ z1 = tmp + n;
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(z1, z1 + hn, t1, logn);
+ ffSampling_fft_dyntree(samp, samp_ctx, z1, z1 + hn,
+ g11, g11 + hn, g01 + hn, logn - 1, z1 + n);
+ PQCLEAN_FALCON512_CLEAN_poly_merge_fft(tmp + (n << 1), z1, z1 + hn, logn);
+
+ /*
+ * Compute tb0 = t0 + (t1 - z1) * l10.
+ * At that point, l10 is in tmp, t1 is unmodified, and z1 is
+ * in tmp + (n << 1). The buffer in z1 is free.
+ *
+ * In the end, z1 is written over t1, and tb0 is in t0.
+ */
+ memcpy(z1, t1, n * sizeof * t1);
+ PQCLEAN_FALCON512_CLEAN_poly_sub(z1, tmp + (n << 1), logn);
+ memcpy(t1, tmp + (n << 1), n * sizeof * tmp);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(tmp, z1, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(t0, tmp, logn);
+
+ /*
+ * Second recursive invocation, on the split tb0 (currently in t0)
+ * and the left sub-tree.
+ */
+ z0 = tmp;
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(z0, z0 + hn, t0, logn);
+ ffSampling_fft_dyntree(samp, samp_ctx, z0, z0 + hn,
+ g00, g00 + hn, g01, logn - 1, z0 + n);
+ PQCLEAN_FALCON512_CLEAN_poly_merge_fft(t0, z0, z0 + hn, logn);
+}
+
+/*
+ * Perform Fast Fourier Sampling for target vector t and LDL tree T.
+ * tmp[] must have size for at least two polynomials of size 2^logn.
+ */
+static void
+ffSampling_fft(samplerZ samp, void *samp_ctx,
+ fpr *z0, fpr *z1,
+ const fpr *tree,
+ const fpr *t0, const fpr *t1, unsigned logn,
+ fpr *tmp) {
+ size_t n, hn;
+ const fpr *tree0, *tree1;
+
+ /*
+ * When logn == 2, we inline the last two recursion levels.
+ */
+ if (logn == 2) {
+ fpr x0, x1, y0, y1, w0, w1, w2, w3, sigma;
+ fpr a_re, a_im, b_re, b_im, c_re, c_im;
+
+ tree0 = tree + 4;
+ tree1 = tree + 8;
+
+ /*
+ * We split t1 into w*, then do the recursive invocation,
+ * with output in w*. We finally merge back into z1.
+ */
+ a_re = t1[0];
+ a_im = t1[2];
+ b_re = t1[1];
+ b_im = t1[3];
+ c_re = fpr_add(a_re, b_re);
+ c_im = fpr_add(a_im, b_im);
+ w0 = fpr_half(c_re);
+ w1 = fpr_half(c_im);
+ c_re = fpr_sub(a_re, b_re);
+ c_im = fpr_sub(a_im, b_im);
+ w2 = fpr_mul(fpr_add(c_re, c_im), fpr_invsqrt8);
+ w3 = fpr_mul(fpr_sub(c_im, c_re), fpr_invsqrt8);
+
+ x0 = w2;
+ x1 = w3;
+ sigma = tree1[3];
+ w2 = fpr_of(samp(samp_ctx, x0, sigma));
+ w3 = fpr_of(samp(samp_ctx, x1, sigma));
+ a_re = fpr_sub(x0, w2);
+ a_im = fpr_sub(x1, w3);
+ b_re = tree1[0];
+ b_im = tree1[1];
+ c_re = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ c_im = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+ x0 = fpr_add(c_re, w0);
+ x1 = fpr_add(c_im, w1);
+ sigma = tree1[2];
+ w0 = fpr_of(samp(samp_ctx, x0, sigma));
+ w1 = fpr_of(samp(samp_ctx, x1, sigma));
+
+ a_re = w0;
+ a_im = w1;
+ b_re = w2;
+ b_im = w3;
+ c_re = fpr_mul(fpr_sub(b_re, b_im), fpr_invsqrt2);
+ c_im = fpr_mul(fpr_add(b_re, b_im), fpr_invsqrt2);
+ z1[0] = w0 = fpr_add(a_re, c_re);
+ z1[2] = w2 = fpr_add(a_im, c_im);
+ z1[1] = w1 = fpr_sub(a_re, c_re);
+ z1[3] = w3 = fpr_sub(a_im, c_im);
+
+ /*
+ * Compute tb0 = t0 + (t1 - z1) * L. Value tb0 ends up in w*.
+ */
+ w0 = fpr_sub(t1[0], w0);
+ w1 = fpr_sub(t1[1], w1);
+ w2 = fpr_sub(t1[2], w2);
+ w3 = fpr_sub(t1[3], w3);
+
+ a_re = w0;
+ a_im = w2;
+ b_re = tree[0];
+ b_im = tree[2];
+ w0 = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ w2 = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+ a_re = w1;
+ a_im = w3;
+ b_re = tree[1];
+ b_im = tree[3];
+ w1 = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ w3 = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+
+ w0 = fpr_add(w0, t0[0]);
+ w1 = fpr_add(w1, t0[1]);
+ w2 = fpr_add(w2, t0[2]);
+ w3 = fpr_add(w3, t0[3]);
+
+ /*
+ * Second recursive invocation.
+ */
+ a_re = w0;
+ a_im = w2;
+ b_re = w1;
+ b_im = w3;
+ c_re = fpr_add(a_re, b_re);
+ c_im = fpr_add(a_im, b_im);
+ w0 = fpr_half(c_re);
+ w1 = fpr_half(c_im);
+ c_re = fpr_sub(a_re, b_re);
+ c_im = fpr_sub(a_im, b_im);
+ w2 = fpr_mul(fpr_add(c_re, c_im), fpr_invsqrt8);
+ w3 = fpr_mul(fpr_sub(c_im, c_re), fpr_invsqrt8);
+
+ x0 = w2;
+ x1 = w3;
+ sigma = tree0[3];
+ w2 = y0 = fpr_of(samp(samp_ctx, x0, sigma));
+ w3 = y1 = fpr_of(samp(samp_ctx, x1, sigma));
+ a_re = fpr_sub(x0, y0);
+ a_im = fpr_sub(x1, y1);
+ b_re = tree0[0];
+ b_im = tree0[1];
+ c_re = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ c_im = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+ x0 = fpr_add(c_re, w0);
+ x1 = fpr_add(c_im, w1);
+ sigma = tree0[2];
+ w0 = fpr_of(samp(samp_ctx, x0, sigma));
+ w1 = fpr_of(samp(samp_ctx, x1, sigma));
+
+ a_re = w0;
+ a_im = w1;
+ b_re = w2;
+ b_im = w3;
+ c_re = fpr_mul(fpr_sub(b_re, b_im), fpr_invsqrt2);
+ c_im = fpr_mul(fpr_add(b_re, b_im), fpr_invsqrt2);
+ z0[0] = fpr_add(a_re, c_re);
+ z0[2] = fpr_add(a_im, c_im);
+ z0[1] = fpr_sub(a_re, c_re);
+ z0[3] = fpr_sub(a_im, c_im);
+
+ return;
+ }
+
+ /*
+ * Case logn == 1 is reachable only when using Falcon-2 (the
+ * smallest size for which Falcon is mathematically defined, but
+ * of course way too insecure to be of any use).
+ */
+ if (logn == 1) {
+ fpr x0, x1, y0, y1, sigma;
+ fpr a_re, a_im, b_re, b_im, c_re, c_im;
+
+ x0 = t1[0];
+ x1 = t1[1];
+ sigma = tree[3];
+ z1[0] = y0 = fpr_of(samp(samp_ctx, x0, sigma));
+ z1[1] = y1 = fpr_of(samp(samp_ctx, x1, sigma));
+ a_re = fpr_sub(x0, y0);
+ a_im = fpr_sub(x1, y1);
+ b_re = tree[0];
+ b_im = tree[1];
+ c_re = fpr_sub(fpr_mul(a_re, b_re), fpr_mul(a_im, b_im));
+ c_im = fpr_add(fpr_mul(a_re, b_im), fpr_mul(a_im, b_re));
+ x0 = fpr_add(c_re, t0[0]);
+ x1 = fpr_add(c_im, t0[1]);
+ sigma = tree[2];
+ z0[0] = fpr_of(samp(samp_ctx, x0, sigma));
+ z0[1] = fpr_of(samp(samp_ctx, x1, sigma));
+
+ return;
+ }
+
+ /*
+ * Normal end of recursion is for logn == 0. Since the last
+ * steps of the recursions were inlined in the blocks above
+ * (when logn == 1 or 2), this case is not reachable, and is
+ * retained here only for documentation purposes.
+
+ if (logn == 0) {
+ fpr x0, x1, sigma;
+
+ x0 = t0[0];
+ x1 = t1[0];
+ sigma = tree[0];
+ z0[0] = fpr_of(samp(samp_ctx, x0, sigma));
+ z1[0] = fpr_of(samp(samp_ctx, x1, sigma));
+ return;
+ }
+
+ */
+
+ /*
+ * General recursive case (logn >= 3).
+ */
+
+ n = (size_t)1 << logn;
+ hn = n >> 1;
+ tree0 = tree + n;
+ tree1 = tree + n + ffLDL_treesize(logn - 1);
+
+ /*
+ * We split t1 into z1 (reused as temporary storage), then do
+ * the recursive invocation, with output in tmp. We finally
+ * merge back into z1.
+ */
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(z1, z1 + hn, t1, logn);
+ ffSampling_fft(samp, samp_ctx, tmp, tmp + hn,
+ tree1, z1, z1 + hn, logn - 1, tmp + n);
+ PQCLEAN_FALCON512_CLEAN_poly_merge_fft(z1, tmp, tmp + hn, logn);
+
+ /*
+ * Compute tb0 = t0 + (t1 - z1) * L. Value tb0 ends up in tmp[].
+ */
+ memcpy(tmp, t1, n * sizeof * t1);
+ PQCLEAN_FALCON512_CLEAN_poly_sub(tmp, z1, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(tmp, tree, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(tmp, t0, logn);
+
+ /*
+ * Second recursive invocation.
+ */
+ PQCLEAN_FALCON512_CLEAN_poly_split_fft(z0, z0 + hn, tmp, logn);
+ ffSampling_fft(samp, samp_ctx, tmp, tmp + hn,
+ tree0, z0, z0 + hn, logn - 1, tmp + n);
+ PQCLEAN_FALCON512_CLEAN_poly_merge_fft(z0, tmp, tmp + hn, logn);
+}
+
+/*
+ * Compute a signature: the signature contains two vectors, s1 and s2.
+ * The s1 vector is not returned. The squared norm of (s1,s2) is
+ * computed, and if it is short enough, then s2 is returned into the
+ * s2[] buffer, and 1 is returned; otherwise, s2[] is untouched and 0 is
+ * returned; the caller should then try again. This function uses an
+ * expanded key.
+ *
+ * tmp[] must have room for at least six polynomials.
+ */
+static int
+do_sign_tree(samplerZ samp, void *samp_ctx, int16_t *s2,
+ const fpr *expanded_key,
+ const uint16_t *hm,
+ unsigned logn, fpr *tmp) {
+ size_t n, u;
+ fpr *t0, *t1, *tx, *ty;
+ const fpr *b00, *b01, *b10, *b11, *tree;
+ fpr ni;
+ uint32_t sqn, ng;
+ int16_t *s1tmp, *s2tmp;
+
+ n = MKN(logn);
+ t0 = tmp;
+ t1 = t0 + n;
+ b00 = expanded_key + skoff_b00(logn);
+ b01 = expanded_key + skoff_b01(logn);
+ b10 = expanded_key + skoff_b10(logn);
+ b11 = expanded_key + skoff_b11(logn);
+ tree = expanded_key + skoff_tree(logn);
+
+ /*
+ * Set the target vector to [hm, 0] (hm is the hashed message).
+ */
+ for (u = 0; u < n; u ++) {
+ t0[u] = fpr_of(hm[u]);
+ /* This is implicit.
+ t1[u] = fpr_zero;
+ */
+ }
+
+ /*
+ * Apply the lattice basis to obtain the real target
+ * vector (after normalization with regards to modulus).
+ */
+ PQCLEAN_FALCON512_CLEAN_FFT(t0, logn);
+ ni = fpr_inverse_of_q;
+ memcpy(t1, t0, n * sizeof * t0);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(t1, b01, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mulconst(t1, fpr_neg(ni), logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(t0, b11, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mulconst(t0, ni, logn);
+
+ tx = t1 + n;
+ ty = tx + n;
+
+ /*
+ * Apply sampling. Output is written back in [tx, ty].
+ */
+ ffSampling_fft(samp, samp_ctx, tx, ty, tree, t0, t1, logn, ty + n);
+
+ /*
+ * Get the lattice point corresponding to that tiny vector.
+ */
+ memcpy(t0, tx, n * sizeof * tx);
+ memcpy(t1, ty, n * sizeof * ty);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(tx, b00, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(ty, b10, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(tx, ty, logn);
+ memcpy(ty, t0, n * sizeof * t0);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(ty, b01, logn);
+
+ memcpy(t0, tx, n * sizeof * tx);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(t1, b11, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(t1, ty, logn);
+
+ PQCLEAN_FALCON512_CLEAN_iFFT(t0, logn);
+ PQCLEAN_FALCON512_CLEAN_iFFT(t1, logn);
+
+ /*
+ * Compute the signature.
+ */
+ s1tmp = (int16_t *)tx;
+ sqn = 0;
+ ng = 0;
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = (int32_t)hm[u] - (int32_t)fpr_rint(t0[u]);
+ sqn += (uint32_t)(z * z);
+ ng |= sqn;
+ s1tmp[u] = (int16_t)z;
+ }
+ sqn |= -(ng >> 31);
+
+ /*
+ * With "normal" degrees (e.g. 512 or 1024), it is very
+ * improbable that the computed vector is not short enough;
+ * however, it may happen in practice for the very reduced
+ * versions (e.g. degree 16 or below). In that case, the caller
+ * will loop, and we must not write anything into s2[] because
+ * s2[] may overlap with the hashed message hm[] and we need
+ * hm[] for the next iteration.
+ */
+ s2tmp = (int16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ s2tmp[u] = (int16_t) - fpr_rint(t1[u]);
+ }
+ if (PQCLEAN_FALCON512_CLEAN_is_short_half(sqn, s2tmp, logn)) {
+ memcpy(s2, s2tmp, n * sizeof * s2);
+ memcpy(tmp, s1tmp, n * sizeof * s1tmp);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Compute a signature: the signature contains two vectors, s1 and s2.
+ * The s1 vector is not returned. The squared norm of (s1,s2) is
+ * computed, and if it is short enough, then s2 is returned into the
+ * s2[] buffer, and 1 is returned; otherwise, s2[] is untouched and 0 is
+ * returned; the caller should then try again.
+ *
+ * tmp[] must have room for at least nine polynomials.
+ */
+static int
+do_sign_dyn(samplerZ samp, void *samp_ctx, int16_t *s2,
+ const int8_t *f, const int8_t *g,
+ const int8_t *F, const int8_t *G,
+ const uint16_t *hm, unsigned logn, fpr *tmp) {
+ size_t n, u;
+ fpr *t0, *t1, *tx, *ty;
+ fpr *b00, *b01, *b10, *b11, *g00, *g01, *g11;
+ fpr ni;
+ uint32_t sqn, ng;
+ int16_t *s1tmp, *s2tmp;
+
+ n = MKN(logn);
+
+ /*
+ * Lattice basis is B = [[g, -f], [G, -F]]. We convert it to FFT.
+ */
+ b00 = tmp;
+ b01 = b00 + n;
+ b10 = b01 + n;
+ b11 = b10 + n;
+ smallints_to_fpr(b01, f, logn);
+ smallints_to_fpr(b00, g, logn);
+ smallints_to_fpr(b11, F, logn);
+ smallints_to_fpr(b10, G, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(b01, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(b00, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(b11, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(b10, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_neg(b01, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_neg(b11, logn);
+
+ /*
+ * Compute the Gram matrix G = B x B*. Formulas are:
+ * g00 = b00*adj(b00) + b01*adj(b01)
+ * g01 = b00*adj(b10) + b01*adj(b11)
+ * g10 = b10*adj(b00) + b11*adj(b01)
+ * g11 = b10*adj(b10) + b11*adj(b11)
+ *
+ * For historical reasons, this implementation uses
+ * g00, g01 and g11 (upper triangle). g10 is not kept
+ * since it is equal to adj(g01).
+ *
+ * We _replace_ the matrix B with the Gram matrix, but we
+ * must keep b01 and b11 for computing the target vector.
+ */
+ t0 = b11 + n;
+ t1 = t0 + n;
+
+ memcpy(t0, b01, n * sizeof * b01);
+ PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(t0, logn); // t0 <- b01*adj(b01)
+
+ memcpy(t1, b00, n * sizeof * b00);
+ PQCLEAN_FALCON512_CLEAN_poly_muladj_fft(t1, b10, logn); // t1 <- b00*adj(b10)
+ PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(b00, logn); // b00 <- b00*adj(b00)
+ PQCLEAN_FALCON512_CLEAN_poly_add(b00, t0, logn); // b00 <- g00
+ memcpy(t0, b01, n * sizeof * b01);
+ PQCLEAN_FALCON512_CLEAN_poly_muladj_fft(b01, b11, logn); // b01 <- b01*adj(b11)
+ PQCLEAN_FALCON512_CLEAN_poly_add(b01, t1, logn); // b01 <- g01
+
+ PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(b10, logn); // b10 <- b10*adj(b10)
+ memcpy(t1, b11, n * sizeof * b11);
+ PQCLEAN_FALCON512_CLEAN_poly_mulselfadj_fft(t1, logn); // t1 <- b11*adj(b11)
+ PQCLEAN_FALCON512_CLEAN_poly_add(b10, t1, logn); // b10 <- g11
+
+ /*
+ * We rename variables to make things clearer. The three elements
+ * of the Gram matrix uses the first 3*n slots of tmp[], followed
+ * by b11 and b01 (in that order).
+ */
+ g00 = b00;
+ g01 = b01;
+ g11 = b10;
+ b01 = t0;
+ t0 = b01 + n;
+ t1 = t0 + n;
+
+ /*
+ * Memory layout at that point:
+ * g00 g01 g11 b11 b01 t0 t1
+ */
+
+ /*
+ * Set the target vector to [hm, 0] (hm is the hashed message).
+ */
+ for (u = 0; u < n; u ++) {
+ t0[u] = fpr_of(hm[u]);
+ /* This is implicit.
+ t1[u] = fpr_zero;
+ */
+ }
+
+ /*
+ * Apply the lattice basis to obtain the real target
+ * vector (after normalization with regards to modulus).
+ */
+ PQCLEAN_FALCON512_CLEAN_FFT(t0, logn);
+ ni = fpr_inverse_of_q;
+ memcpy(t1, t0, n * sizeof * t0);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(t1, b01, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mulconst(t1, fpr_neg(ni), logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(t0, b11, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mulconst(t0, ni, logn);
+
+ /*
+ * b01 and b11 can be discarded, so we move back (t0,t1).
+ * Memory layout is now:
+ * g00 g01 g11 t0 t1
+ */
+ memcpy(b11, t0, n * 2 * sizeof * t0);
+ t0 = g11 + n;
+ t1 = t0 + n;
+
+ /*
+ * Apply sampling; result is written over (t0,t1).
+ */
+ ffSampling_fft_dyntree(samp, samp_ctx,
+ t0, t1, g00, g01, g11, logn, t1 + n);
+
+ /*
+ * We arrange the layout back to:
+ * b00 b01 b10 b11 t0 t1
+ *
+ * We did not conserve the matrix basis, so we must recompute
+ * it now.
+ */
+ b00 = tmp;
+ b01 = b00 + n;
+ b10 = b01 + n;
+ b11 = b10 + n;
+ memmove(b11 + n, t0, n * 2 * sizeof * t0);
+ t0 = b11 + n;
+ t1 = t0 + n;
+ smallints_to_fpr(b01, f, logn);
+ smallints_to_fpr(b00, g, logn);
+ smallints_to_fpr(b11, F, logn);
+ smallints_to_fpr(b10, G, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(b01, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(b00, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(b11, logn);
+ PQCLEAN_FALCON512_CLEAN_FFT(b10, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_neg(b01, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_neg(b11, logn);
+ tx = t1 + n;
+ ty = tx + n;
+
+ /*
+ * Get the lattice point corresponding to that tiny vector.
+ */
+ memcpy(tx, t0, n * sizeof * t0);
+ memcpy(ty, t1, n * sizeof * t1);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(tx, b00, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(ty, b10, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(tx, ty, logn);
+ memcpy(ty, t0, n * sizeof * t0);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(ty, b01, logn);
+
+ memcpy(t0, tx, n * sizeof * tx);
+ PQCLEAN_FALCON512_CLEAN_poly_mul_fft(t1, b11, logn);
+ PQCLEAN_FALCON512_CLEAN_poly_add(t1, ty, logn);
+ PQCLEAN_FALCON512_CLEAN_iFFT(t0, logn);
+ PQCLEAN_FALCON512_CLEAN_iFFT(t1, logn);
+
+ s1tmp = (int16_t *)tx;
+ sqn = 0;
+ ng = 0;
+ for (u = 0; u < n; u ++) {
+ int32_t z;
+
+ z = (int32_t)hm[u] - (int32_t)fpr_rint(t0[u]);
+ sqn += (uint32_t)(z * z);
+ ng |= sqn;
+ s1tmp[u] = (int16_t)z;
+ }
+ sqn |= -(ng >> 31);
+
+ /*
+ * With "normal" degrees (e.g. 512 or 1024), it is very
+ * improbable that the computed vector is not short enough;
+ * however, it may happen in practice for the very reduced
+ * versions (e.g. degree 16 or below). In that case, the caller
+ * will loop, and we must not write anything into s2[] because
+ * s2[] may overlap with the hashed message hm[] and we need
+ * hm[] for the next iteration.
+ */
+ s2tmp = (int16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ s2tmp[u] = (int16_t) - fpr_rint(t1[u]);
+ }
+ if (PQCLEAN_FALCON512_CLEAN_is_short_half(sqn, s2tmp, logn)) {
+ memcpy(s2, s2tmp, n * sizeof * s2);
+ memcpy(tmp, s1tmp, n * sizeof * s1tmp);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Sample an integer value along a half-gaussian distribution centered
+ * on zero and standard deviation 1.8205, with a precision of 72 bits.
+ */
+int
+PQCLEAN_FALCON512_CLEAN_gaussian0_sampler(prng *p) {
+
+ static const uint32_t dist[] = {
+ 10745844u, 3068844u, 3741698u,
+ 5559083u, 1580863u, 8248194u,
+ 2260429u, 13669192u, 2736639u,
+ 708981u, 4421575u, 10046180u,
+ 169348u, 7122675u, 4136815u,
+ 30538u, 13063405u, 7650655u,
+ 4132u, 14505003u, 7826148u,
+ 417u, 16768101u, 11363290u,
+ 31u, 8444042u, 8086568u,
+ 1u, 12844466u, 265321u,
+ 0u, 1232676u, 13644283u,
+ 0u, 38047u, 9111839u,
+ 0u, 870u, 6138264u,
+ 0u, 14u, 12545723u,
+ 0u, 0u, 3104126u,
+ 0u, 0u, 28824u,
+ 0u, 0u, 198u,
+ 0u, 0u, 1u
+ };
+
+ uint32_t v0, v1, v2, hi;
+ uint64_t lo;
+ size_t u;
+ int z;
+
+ /*
+ * Get a random 72-bit value, into three 24-bit limbs v0..v2.
+ */
+ lo = prng_get_u64(p);
+ hi = prng_get_u8(p);
+ v0 = (uint32_t)lo & 0xFFFFFF;
+ v1 = (uint32_t)(lo >> 24) & 0xFFFFFF;
+ v2 = (uint32_t)(lo >> 48) | (hi << 16);
+
+ /*
+ * Sampled value is z, such that v0..v2 is lower than the first
+ * z elements of the table.
+ */
+ z = 0;
+ for (u = 0; u < (sizeof dist) / sizeof(dist[0]); u += 3) {
+ uint32_t w0, w1, w2, cc;
+
+ w0 = dist[u + 2];
+ w1 = dist[u + 1];
+ w2 = dist[u + 0];
+ cc = (v0 - w0) >> 31;
+ cc = (v1 - w1 - cc) >> 31;
+ cc = (v2 - w2 - cc) >> 31;
+ z += (int)cc;
+ }
+ return z;
+
+}
+
+/*
+ * Sample a bit with probability exp(-x) for some x >= 0.
+ */
+static int
+BerExp(prng *p, fpr x, fpr ccs) {
+ int s, i;
+ fpr r;
+ uint32_t sw, w;
+ uint64_t z;
+
+ /*
+ * Reduce x modulo log(2): x = s*log(2) + r, with s an integer,
+ * and 0 <= r < log(2). Since x >= 0, we can use fpr_trunc().
+ */
+ s = (int)fpr_trunc(fpr_mul(x, fpr_inv_log2));
+ r = fpr_sub(x, fpr_mul(fpr_of(s), fpr_log2));
+
+ /*
+ * It may happen (quite rarely) that s >= 64; if sigma = 1.2
+ * (the minimum value for sigma), r = 0 and b = 1, then we get
+ * s >= 64 if the half-Gaussian produced a z >= 13, which happens
+ * with probability about 0.000000000230383991, which is
+ * approximatively equal to 2^(-32). In any case, if s >= 64,
+ * then BerExp will be non-zero with probability less than
+ * 2^(-64), so we can simply saturate s at 63.
+ */
+ sw = (uint32_t)s;
+ sw ^= (sw ^ 63) & -((63 - sw) >> 31);
+ s = (int)sw;
+
+ /*
+ * Compute exp(-r); we know that 0 <= r < log(2) at this point, so
+ * we can use fpr_expm_p63(), which yields a result scaled to 2^63.
+ * We scale it up to 2^64, then right-shift it by s bits because
+ * we really want exp(-x) = 2^(-s)*exp(-r).
+ *
+ * The "-1" operation makes sure that the value fits on 64 bits
+ * (i.e. if r = 0, we may get 2^64, and we prefer 2^64-1 in that
+ * case). The bias is negligible since fpr_expm_p63() only computes
+ * with 51 bits of precision or so.
+ */
+ z = ((fpr_expm_p63(r, ccs) << 1) - 1) >> s;
+
+ /*
+ * Sample a bit with probability exp(-x). Since x = s*log(2) + r,
+ * exp(-x) = 2^-s * exp(-r), we compare lazily exp(-x) with the
+ * PRNG output to limit its consumption, the sign of the difference
+ * yields the expected result.
+ */
+ i = 64;
+ do {
+ i -= 8;
+ w = prng_get_u8(p) - ((uint32_t)(z >> i) & 0xFF);
+ } while (!w && i > 0);
+ return (int)(w >> 31);
+}
+
+/*
+ * The sampler produces a random integer that follows a discrete Gaussian
+ * distribution, centered on mu, and with standard deviation sigma. The
+ * provided parameter isigma is equal to 1/sigma.
+ *
+ * The value of sigma MUST lie between 1 and 2 (i.e. isigma lies between
+ * 0.5 and 1); in Falcon, sigma should always be between 1.2 and 1.9.
+ */
+int
+PQCLEAN_FALCON512_CLEAN_sampler(void *ctx, fpr mu, fpr isigma) {
+ sampler_context *spc;
+ int s, z0, z, b;
+ fpr r, dss, ccs, x;
+
+ spc = ctx;
+
+ /*
+ * Center is mu. We compute mu = s + r where s is an integer
+ * and 0 <= r < 1.
+ */
+ s = (int)fpr_floor(mu);
+ r = fpr_sub(mu, fpr_of(s));
+
+ /*
+ * dss = 1/(2*sigma^2) = 0.5*(isigma^2).
+ */
+ dss = fpr_half(fpr_sqr(isigma));
+
+ /*
+ * ccs = sigma_min / sigma = sigma_min * isigma.
+ */
+ ccs = fpr_mul(isigma, spc->sigma_min);
+
+ /*
+ * We now need to sample on center r.
+ */
+ for (;;) {
+ /*
+ * Sample z for a Gaussian distribution. Then get a
+ * random bit b to turn the sampling into a bimodal
+ * distribution: if b = 1, we use z+1, otherwise we
+ * use -z. We thus have two situations:
+ *
+ * - b = 1: z >= 1 and sampled against a Gaussian
+ * centered on 1.
+ * - b = 0: z <= 0 and sampled against a Gaussian
+ * centered on 0.
+ */
+ z0 = PQCLEAN_FALCON512_CLEAN_gaussian0_sampler(&spc->p);
+ b = (int)prng_get_u8(&spc->p) & 1;
+ z = b + ((b << 1) - 1) * z0;
+
+ /*
+ * Rejection sampling. We want a Gaussian centered on r;
+ * but we sampled against a Gaussian centered on b (0 or
+ * 1). But we know that z is always in the range where
+ * our sampling distribution is greater than the Gaussian
+ * distribution, so rejection works.
+ *
+ * We got z with distribution:
+ * G(z) = exp(-((z-b)^2)/(2*sigma0^2))
+ * We target distribution:
+ * S(z) = exp(-((z-r)^2)/(2*sigma^2))
+ * Rejection sampling works by keeping the value z with
+ * probability S(z)/G(z), and starting again otherwise.
+ * This requires S(z) <= G(z), which is the case here.
+ * Thus, we simply need to keep our z with probability:
+ * P = exp(-x)
+ * where:
+ * x = ((z-r)^2)/(2*sigma^2) - ((z-b)^2)/(2*sigma0^2)
+ *
+ * Here, we scale up the Bernouilli distribution, which
+ * makes rejection more probable, but makes rejection
+ * rate sufficiently decorrelated from the Gaussian
+ * center and standard deviation that the whole sampler
+ * can be said to be constant-time.
+ */
+ x = fpr_mul(fpr_sqr(fpr_sub(fpr_of(z), r)), dss);
+ x = fpr_sub(x, fpr_mul(fpr_of(z0 * z0), fpr_inv_2sqrsigma0));
+ if (BerExp(&spc->p, x, ccs)) {
+ /*
+ * Rejection sampling was centered on r, but the
+ * actual center is mu = s + r.
+ */
+ return s + z;
+ }
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_sign_tree(int16_t *sig, inner_shake256_context *rng,
+ const fpr *expanded_key,
+ const uint16_t *hm, unsigned logn, uint8_t *tmp) {
+ fpr *ftmp;
+
+ ftmp = (fpr *)tmp;
+ for (;;) {
+ /*
+ * Signature produces short vectors s1 and s2. The
+ * signature is acceptable only if the aggregate vector
+ * s1,s2 is short; we must use the same bound as the
+ * verifier.
+ *
+ * If the signature is acceptable, then we return only s2
+ * (the verifier recomputes s1 from s2, the hashed message,
+ * and the public key).
+ */
+ sampler_context spc;
+ samplerZ samp;
+ void *samp_ctx;
+
+ /*
+ * Normal sampling. We use a fast PRNG seeded from our
+ * SHAKE context ('rng').
+ */
+ if (logn == 10) {
+ spc.sigma_min = fpr_sigma_min_10;
+ } else {
+ spc.sigma_min = fpr_sigma_min_9;
+ }
+ PQCLEAN_FALCON512_CLEAN_prng_init(&spc.p, rng);
+ samp = PQCLEAN_FALCON512_CLEAN_sampler;
+ samp_ctx = &spc;
+
+ /*
+ * Do the actual signature.
+ */
+ if (do_sign_tree(samp, samp_ctx, sig,
+ expanded_key, hm, logn, ftmp)) {
+ break;
+ }
+ }
+}
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_sign_dyn(int16_t *sig, inner_shake256_context *rng,
+ const int8_t *f, const int8_t *g,
+ const int8_t *F, const int8_t *G,
+ const uint16_t *hm, unsigned logn, uint8_t *tmp) {
+ fpr *ftmp;
+
+ ftmp = (fpr *)tmp;
+ for (;;) {
+ /*
+ * Signature produces short vectors s1 and s2. The
+ * signature is acceptable only if the aggregate vector
+ * s1,s2 is short; we must use the same bound as the
+ * verifier.
+ *
+ * If the signature is acceptable, then we return only s2
+ * (the verifier recomputes s1 from s2, the hashed message,
+ * and the public key).
+ */
+ sampler_context spc;
+ samplerZ samp;
+ void *samp_ctx;
+
+ /*
+ * Normal sampling. We use a fast PRNG seeded from our
+ * SHAKE context ('rng').
+ */
+ if (logn == 10) {
+ spc.sigma_min = fpr_sigma_min_10;
+ } else {
+ spc.sigma_min = fpr_sigma_min_9;
+ }
+ PQCLEAN_FALCON512_CLEAN_prng_init(&spc.p, rng);
+ samp = PQCLEAN_FALCON512_CLEAN_sampler;
+ samp_ctx = &spc;
+
+ /*
+ * Do the actual signature.
+ */
+ if (do_sign_dyn(samp, samp_ctx, sig,
+ f, g, F, G, hm, logn, ftmp)) {
+ break;
+ }
+ }
+}
diff --git a/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/vrfy.c b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/vrfy.c
new file mode 100644
index 000000000..cf89f69f6
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/pqclean_falcon-512_clean/vrfy.c
@@ -0,0 +1,853 @@
+#include "inner.h"
+
+/*
+ * Falcon signature verification.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2017-2019 Falcon Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@nccgroup.com>
+ */
+
+
+/* ===================================================================== */
+/*
+ * Constants for NTT.
+ *
+ * n = 2^logn (2 <= n <= 1024)
+ * phi = X^n + 1
+ * q = 12289
+ * q0i = -1/q mod 2^16
+ * R = 2^16 mod q
+ * R2 = 2^32 mod q
+ */
+
+#define Q 12289
+#define Q0I 12287
+#define R 4091
+#define R2 10952
+
+/*
+ * Table for NTT, binary case:
+ * GMb[x] = R*(g^rev(x)) mod q
+ * where g = 7 (it is a 2048-th primitive root of 1 modulo q)
+ * and rev() is the bit-reversal function over 10 bits.
+ */
+static const uint16_t GMb[] = {
+ 4091, 7888, 11060, 11208, 6960, 4342, 6275, 9759,
+ 1591, 6399, 9477, 5266, 586, 5825, 7538, 9710,
+ 1134, 6407, 1711, 965, 7099, 7674, 3743, 6442,
+ 10414, 8100, 1885, 1688, 1364, 10329, 10164, 9180,
+ 12210, 6240, 997, 117, 4783, 4407, 1549, 7072,
+ 2829, 6458, 4431, 8877, 7144, 2564, 5664, 4042,
+ 12189, 432, 10751, 1237, 7610, 1534, 3983, 7863,
+ 2181, 6308, 8720, 6570, 4843, 1690, 14, 3872,
+ 5569, 9368, 12163, 2019, 7543, 2315, 4673, 7340,
+ 1553, 1156, 8401, 11389, 1020, 2967, 10772, 7045,
+ 3316, 11236, 5285, 11578, 10637, 10086, 9493, 6180,
+ 9277, 6130, 3323, 883, 10469, 489, 1502, 2851,
+ 11061, 9729, 2742, 12241, 4970, 10481, 10078, 1195,
+ 730, 1762, 3854, 2030, 5892, 10922, 9020, 5274,
+ 9179, 3604, 3782, 10206, 3180, 3467, 4668, 2446,
+ 7613, 9386, 834, 7703, 6836, 3403, 5351, 12276,
+ 3580, 1739, 10820, 9787, 10209, 4070, 12250, 8525,
+ 10401, 2749, 7338, 10574, 6040, 943, 9330, 1477,
+ 6865, 9668, 3585, 6633, 12145, 4063, 3684, 7680,
+ 8188, 6902, 3533, 9807, 6090, 727, 10099, 7003,
+ 6945, 1949, 9731, 10559, 6057, 378, 7871, 8763,
+ 8901, 9229, 8846, 4551, 9589, 11664, 7630, 8821,
+ 5680, 4956, 6251, 8388, 10156, 8723, 2341, 3159,
+ 1467, 5460, 8553, 7783, 2649, 2320, 9036, 6188,
+ 737, 3698, 4699, 5753, 9046, 3687, 16, 914,
+ 5186, 10531, 4552, 1964, 3509, 8436, 7516, 5381,
+ 10733, 3281, 7037, 1060, 2895, 7156, 8887, 5357,
+ 6409, 8197, 2962, 6375, 5064, 6634, 5625, 278,
+ 932, 10229, 8927, 7642, 351, 9298, 237, 5858,
+ 7692, 3146, 12126, 7586, 2053, 11285, 3802, 5204,
+ 4602, 1748, 11300, 340, 3711, 4614, 300, 10993,
+ 5070, 10049, 11616, 12247, 7421, 10707, 5746, 5654,
+ 3835, 5553, 1224, 8476, 9237, 3845, 250, 11209,
+ 4225, 6326, 9680, 12254, 4136, 2778, 692, 8808,
+ 6410, 6718, 10105, 10418, 3759, 7356, 11361, 8433,
+ 6437, 3652, 6342, 8978, 5391, 2272, 6476, 7416,
+ 8418, 10824, 11986, 5733, 876, 7030, 2167, 2436,
+ 3442, 9217, 8206, 4858, 5964, 2746, 7178, 1434,
+ 7389, 8879, 10661, 11457, 4220, 1432, 10832, 4328,
+ 8557, 1867, 9454, 2416, 3816, 9076, 686, 5393,
+ 2523, 4339, 6115, 619, 937, 2834, 7775, 3279,
+ 2363, 7488, 6112, 5056, 824, 10204, 11690, 1113,
+ 2727, 9848, 896, 2028, 5075, 2654, 10464, 7884,
+ 12169, 5434, 3070, 6400, 9132, 11672, 12153, 4520,
+ 1273, 9739, 11468, 9937, 10039, 9720, 2262, 9399,
+ 11192, 315, 4511, 1158, 6061, 6751, 11865, 357,
+ 7367, 4550, 983, 8534, 8352, 10126, 7530, 9253,
+ 4367, 5221, 3999, 8777, 3161, 6990, 4130, 11652,
+ 3374, 11477, 1753, 292, 8681, 2806, 10378, 12188,
+ 5800, 11811, 3181, 1988, 1024, 9340, 2477, 10928,
+ 4582, 6750, 3619, 5503, 5233, 2463, 8470, 7650,
+ 7964, 6395, 1071, 1272, 3474, 11045, 3291, 11344,
+ 8502, 9478, 9837, 1253, 1857, 6233, 4720, 11561,
+ 6034, 9817, 3339, 1797, 2879, 6242, 5200, 2114,
+ 7962, 9353, 11363, 5475, 6084, 9601, 4108, 7323,
+ 10438, 9471, 1271, 408, 6911, 3079, 360, 8276,
+ 11535, 9156, 9049, 11539, 850, 8617, 784, 7919,
+ 8334, 12170, 1846, 10213, 12184, 7827, 11903, 5600,
+ 9779, 1012, 721, 2784, 6676, 6552, 5348, 4424,
+ 6816, 8405, 9959, 5150, 2356, 5552, 5267, 1333,
+ 8801, 9661, 7308, 5788, 4910, 909, 11613, 4395,
+ 8238, 6686, 4302, 3044, 2285, 12249, 1963, 9216,
+ 4296, 11918, 695, 4371, 9793, 4884, 2411, 10230,
+ 2650, 841, 3890, 10231, 7248, 8505, 11196, 6688,
+ 4059, 6060, 3686, 4722, 11853, 5816, 7058, 6868,
+ 11137, 7926, 4894, 12284, 4102, 3908, 3610, 6525,
+ 7938, 7982, 11977, 6755, 537, 4562, 1623, 8227,
+ 11453, 7544, 906, 11816, 9548, 10858, 9703, 2815,
+ 11736, 6813, 6979, 819, 8903, 6271, 10843, 348,
+ 7514, 8339, 6439, 694, 852, 5659, 2781, 3716,
+ 11589, 3024, 1523, 8659, 4114, 10738, 3303, 5885,
+ 2978, 7289, 11884, 9123, 9323, 11830, 98, 2526,
+ 2116, 4131, 11407, 1844, 3645, 3916, 8133, 2224,
+ 10871, 8092, 9651, 5989, 7140, 8480, 1670, 159,
+ 10923, 4918, 128, 7312, 725, 9157, 5006, 6393,
+ 3494, 6043, 10972, 6181, 11838, 3423, 10514, 7668,
+ 3693, 6658, 6905, 11953, 10212, 11922, 9101, 8365,
+ 5110, 45, 2400, 1921, 4377, 2720, 1695, 51,
+ 2808, 650, 1896, 9997, 9971, 11980, 8098, 4833,
+ 4135, 4257, 5838, 4765, 10985, 11532, 590, 12198,
+ 482, 12173, 2006, 7064, 10018, 3912, 12016, 10519,
+ 11362, 6954, 2210, 284, 5413, 6601, 3865, 10339,
+ 11188, 6231, 517, 9564, 11281, 3863, 1210, 4604,
+ 8160, 11447, 153, 7204, 5763, 5089, 9248, 12154,
+ 11748, 1354, 6672, 179, 5532, 2646, 5941, 12185,
+ 862, 3158, 477, 7279, 5678, 7914, 4254, 302,
+ 2893, 10114, 6890, 9560, 9647, 11905, 4098, 9824,
+ 10269, 1353, 10715, 5325, 6254, 3951, 1807, 6449,
+ 5159, 1308, 8315, 3404, 1877, 1231, 112, 6398,
+ 11724, 12272, 7286, 1459, 12274, 9896, 3456, 800,
+ 1397, 10678, 103, 7420, 7976, 936, 764, 632,
+ 7996, 8223, 8445, 7758, 10870, 9571, 2508, 1946,
+ 6524, 10158, 1044, 4338, 2457, 3641, 1659, 4139,
+ 4688, 9733, 11148, 3946, 2082, 5261, 2036, 11850,
+ 7636, 12236, 5366, 2380, 1399, 7720, 2100, 3217,
+ 10912, 8898, 7578, 11995, 2791, 1215, 3355, 2711,
+ 2267, 2004, 8568, 10176, 3214, 2337, 1750, 4729,
+ 4997, 7415, 6315, 12044, 4374, 7157, 4844, 211,
+ 8003, 10159, 9290, 11481, 1735, 2336, 5793, 9875,
+ 8192, 986, 7527, 1401, 870, 3615, 8465, 2756,
+ 9770, 2034, 10168, 3264, 6132, 54, 2880, 4763,
+ 11805, 3074, 8286, 9428, 4881, 6933, 1090, 10038,
+ 2567, 708, 893, 6465, 4962, 10024, 2090, 5718,
+ 10743, 780, 4733, 4623, 2134, 2087, 4802, 884,
+ 5372, 5795, 5938, 4333, 6559, 7549, 5269, 10664,
+ 4252, 3260, 5917, 10814, 5768, 9983, 8096, 7791,
+ 6800, 7491, 6272, 1907, 10947, 6289, 11803, 6032,
+ 11449, 1171, 9201, 7933, 2479, 7970, 11337, 7062,
+ 8911, 6728, 6542, 8114, 8828, 6595, 3545, 4348,
+ 4610, 2205, 6999, 8106, 5560, 10390, 9321, 2499,
+ 2413, 7272, 6881, 10582, 9308, 9437, 3554, 3326,
+ 5991, 11969, 3415, 12283, 9838, 12063, 4332, 7830,
+ 11329, 6605, 12271, 2044, 11611, 7353, 11201, 11582,
+ 3733, 8943, 9978, 1627, 7168, 3935, 5050, 2762,
+ 7496, 10383, 755, 1654, 12053, 4952, 10134, 4394,
+ 6592, 7898, 7497, 8904, 12029, 3581, 10748, 5674,
+ 10358, 4901, 7414, 8771, 710, 6764, 8462, 7193,
+ 5371, 7274, 11084, 290, 7864, 6827, 11822, 2509,
+ 6578, 4026, 5807, 1458, 5721, 5762, 4178, 2105,
+ 11621, 4852, 8897, 2856, 11510, 9264, 2520, 8776,
+ 7011, 2647, 1898, 7039, 5950, 11163, 5488, 6277,
+ 9182, 11456, 633, 10046, 11554, 5633, 9587, 2333,
+ 7008, 7084, 5047, 7199, 9865, 8997, 569, 6390,
+ 10845, 9679, 8268, 11472, 4203, 1997, 2, 9331,
+ 162, 6182, 2000, 3649, 9792, 6363, 7557, 6187,
+ 8510, 9935, 5536, 9019, 3706, 12009, 1452, 3067,
+ 5494, 9692, 4865, 6019, 7106, 9610, 4588, 10165,
+ 6261, 5887, 2652, 10172, 1580, 10379, 4638, 9949
+};
+
+/*
+ * Table for inverse NTT, binary case:
+ * iGMb[x] = R*((1/g)^rev(x)) mod q
+ * Since g = 7, 1/g = 8778 mod 12289.
+ */
+static const uint16_t iGMb[] = {
+ 4091, 4401, 1081, 1229, 2530, 6014, 7947, 5329,
+ 2579, 4751, 6464, 11703, 7023, 2812, 5890, 10698,
+ 3109, 2125, 1960, 10925, 10601, 10404, 4189, 1875,
+ 5847, 8546, 4615, 5190, 11324, 10578, 5882, 11155,
+ 8417, 12275, 10599, 7446, 5719, 3569, 5981, 10108,
+ 4426, 8306, 10755, 4679, 11052, 1538, 11857, 100,
+ 8247, 6625, 9725, 5145, 3412, 7858, 5831, 9460,
+ 5217, 10740, 7882, 7506, 12172, 11292, 6049, 79,
+ 13, 6938, 8886, 5453, 4586, 11455, 2903, 4676,
+ 9843, 7621, 8822, 9109, 2083, 8507, 8685, 3110,
+ 7015, 3269, 1367, 6397, 10259, 8435, 10527, 11559,
+ 11094, 2211, 1808, 7319, 48, 9547, 2560, 1228,
+ 9438, 10787, 11800, 1820, 11406, 8966, 6159, 3012,
+ 6109, 2796, 2203, 1652, 711, 7004, 1053, 8973,
+ 5244, 1517, 9322, 11269, 900, 3888, 11133, 10736,
+ 4949, 7616, 9974, 4746, 10270, 126, 2921, 6720,
+ 6635, 6543, 1582, 4868, 42, 673, 2240, 7219,
+ 1296, 11989, 7675, 8578, 11949, 989, 10541, 7687,
+ 7085, 8487, 1004, 10236, 4703, 163, 9143, 4597,
+ 6431, 12052, 2991, 11938, 4647, 3362, 2060, 11357,
+ 12011, 6664, 5655, 7225, 5914, 9327, 4092, 5880,
+ 6932, 3402, 5133, 9394, 11229, 5252, 9008, 1556,
+ 6908, 4773, 3853, 8780, 10325, 7737, 1758, 7103,
+ 11375, 12273, 8602, 3243, 6536, 7590, 8591, 11552,
+ 6101, 3253, 9969, 9640, 4506, 3736, 6829, 10822,
+ 9130, 9948, 3566, 2133, 3901, 6038, 7333, 6609,
+ 3468, 4659, 625, 2700, 7738, 3443, 3060, 3388,
+ 3526, 4418, 11911, 6232, 1730, 2558, 10340, 5344,
+ 5286, 2190, 11562, 6199, 2482, 8756, 5387, 4101,
+ 4609, 8605, 8226, 144, 5656, 8704, 2621, 5424,
+ 10812, 2959, 11346, 6249, 1715, 4951, 9540, 1888,
+ 3764, 39, 8219, 2080, 2502, 1469, 10550, 8709,
+ 5601, 1093, 3784, 5041, 2058, 8399, 11448, 9639,
+ 2059, 9878, 7405, 2496, 7918, 11594, 371, 7993,
+ 3073, 10326, 40, 10004, 9245, 7987, 5603, 4051,
+ 7894, 676, 11380, 7379, 6501, 4981, 2628, 3488,
+ 10956, 7022, 6737, 9933, 7139, 2330, 3884, 5473,
+ 7865, 6941, 5737, 5613, 9505, 11568, 11277, 2510,
+ 6689, 386, 4462, 105, 2076, 10443, 119, 3955,
+ 4370, 11505, 3672, 11439, 750, 3240, 3133, 754,
+ 4013, 11929, 9210, 5378, 11881, 11018, 2818, 1851,
+ 4966, 8181, 2688, 6205, 6814, 926, 2936, 4327,
+ 10175, 7089, 6047, 9410, 10492, 8950, 2472, 6255,
+ 728, 7569, 6056, 10432, 11036, 2452, 2811, 3787,
+ 945, 8998, 1244, 8815, 11017, 11218, 5894, 4325,
+ 4639, 3819, 9826, 7056, 6786, 8670, 5539, 7707,
+ 1361, 9812, 2949, 11265, 10301, 9108, 478, 6489,
+ 101, 1911, 9483, 3608, 11997, 10536, 812, 8915,
+ 637, 8159, 5299, 9128, 3512, 8290, 7068, 7922,
+ 3036, 4759, 2163, 3937, 3755, 11306, 7739, 4922,
+ 11932, 424, 5538, 6228, 11131, 7778, 11974, 1097,
+ 2890, 10027, 2569, 2250, 2352, 821, 2550, 11016,
+ 7769, 136, 617, 3157, 5889, 9219, 6855, 120,
+ 4405, 1825, 9635, 7214, 10261, 11393, 2441, 9562,
+ 11176, 599, 2085, 11465, 7233, 6177, 4801, 9926,
+ 9010, 4514, 9455, 11352, 11670, 6174, 7950, 9766,
+ 6896, 11603, 3213, 8473, 9873, 2835, 10422, 3732,
+ 7961, 1457, 10857, 8069, 832, 1628, 3410, 4900,
+ 10855, 5111, 9543, 6325, 7431, 4083, 3072, 8847,
+ 9853, 10122, 5259, 11413, 6556, 303, 1465, 3871,
+ 4873, 5813, 10017, 6898, 3311, 5947, 8637, 5852,
+ 3856, 928, 4933, 8530, 1871, 2184, 5571, 5879,
+ 3481, 11597, 9511, 8153, 35, 2609, 5963, 8064,
+ 1080, 12039, 8444, 3052, 3813, 11065, 6736, 8454,
+ 2340, 7651, 1910, 10709, 2117, 9637, 6402, 6028,
+ 2124, 7701, 2679, 5183, 6270, 7424, 2597, 6795,
+ 9222, 10837, 280, 8583, 3270, 6753, 2354, 3779,
+ 6102, 4732, 5926, 2497, 8640, 10289, 6107, 12127,
+ 2958, 12287, 10292, 8086, 817, 4021, 2610, 1444,
+ 5899, 11720, 3292, 2424, 5090, 7242, 5205, 5281,
+ 9956, 2702, 6656, 735, 2243, 11656, 833, 3107,
+ 6012, 6801, 1126, 6339, 5250, 10391, 9642, 5278,
+ 3513, 9769, 3025, 779, 9433, 3392, 7437, 668,
+ 10184, 8111, 6527, 6568, 10831, 6482, 8263, 5711,
+ 9780, 467, 5462, 4425, 11999, 1205, 5015, 6918,
+ 5096, 3827, 5525, 11579, 3518, 4875, 7388, 1931,
+ 6615, 1541, 8708, 260, 3385, 4792, 4391, 5697,
+ 7895, 2155, 7337, 236, 10635, 11534, 1906, 4793,
+ 9527, 7239, 8354, 5121, 10662, 2311, 3346, 8556,
+ 707, 1088, 4936, 678, 10245, 18, 5684, 960,
+ 4459, 7957, 226, 2451, 6, 8874, 320, 6298,
+ 8963, 8735, 2852, 2981, 1707, 5408, 5017, 9876,
+ 9790, 2968, 1899, 6729, 4183, 5290, 10084, 7679,
+ 7941, 8744, 5694, 3461, 4175, 5747, 5561, 3378,
+ 5227, 952, 4319, 9810, 4356, 3088, 11118, 840,
+ 6257, 486, 6000, 1342, 10382, 6017, 4798, 5489,
+ 4498, 4193, 2306, 6521, 1475, 6372, 9029, 8037,
+ 1625, 7020, 4740, 5730, 7956, 6351, 6494, 6917,
+ 11405, 7487, 10202, 10155, 7666, 7556, 11509, 1546,
+ 6571, 10199, 2265, 7327, 5824, 11396, 11581, 9722,
+ 2251, 11199, 5356, 7408, 2861, 4003, 9215, 484,
+ 7526, 9409, 12235, 6157, 9025, 2121, 10255, 2519,
+ 9533, 3824, 8674, 11419, 10888, 4762, 11303, 4097,
+ 2414, 6496, 9953, 10554, 808, 2999, 2130, 4286,
+ 12078, 7445, 5132, 7915, 245, 5974, 4874, 7292,
+ 7560, 10539, 9952, 9075, 2113, 3721, 10285, 10022,
+ 9578, 8934, 11074, 9498, 294, 4711, 3391, 1377,
+ 9072, 10189, 4569, 10890, 9909, 6923, 53, 4653,
+ 439, 10253, 7028, 10207, 8343, 1141, 2556, 7601,
+ 8150, 10630, 8648, 9832, 7951, 11245, 2131, 5765,
+ 10343, 9781, 2718, 1419, 4531, 3844, 4066, 4293,
+ 11657, 11525, 11353, 4313, 4869, 12186, 1611, 10892,
+ 11489, 8833, 2393, 15, 10830, 5003, 17, 565,
+ 5891, 12177, 11058, 10412, 8885, 3974, 10981, 7130,
+ 5840, 10482, 8338, 6035, 6964, 1574, 10936, 2020,
+ 2465, 8191, 384, 2642, 2729, 5399, 2175, 9396,
+ 11987, 8035, 4375, 6611, 5010, 11812, 9131, 11427,
+ 104, 6348, 9643, 6757, 12110, 5617, 10935, 541,
+ 135, 3041, 7200, 6526, 5085, 12136, 842, 4129,
+ 7685, 11079, 8426, 1008, 2725, 11772, 6058, 1101,
+ 1950, 8424, 5688, 6876, 12005, 10079, 5335, 927,
+ 1770, 273, 8377, 2271, 5225, 10283, 116, 11807,
+ 91, 11699, 757, 1304, 7524, 6451, 8032, 8154,
+ 7456, 4191, 309, 2318, 2292, 10393, 11639, 9481,
+ 12238, 10594, 9569, 7912, 10368, 9889, 12244, 7179,
+ 3924, 3188, 367, 2077, 336, 5384, 5631, 8596,
+ 4621, 1775, 8866, 451, 6108, 1317, 6246, 8795,
+ 5896, 7283, 3132, 11564, 4977, 12161, 7371, 1366,
+ 12130, 10619, 3809, 5149, 6300, 2638, 4197, 1418,
+ 10065, 4156, 8373, 8644, 10445, 882, 8158, 10173,
+ 9763, 12191, 459, 2966, 3166, 405, 5000, 9311,
+ 6404, 8986, 1551, 8175, 3630, 10766, 9265, 700,
+ 8573, 9508, 6630, 11437, 11595, 5850, 3950, 4775,
+ 11941, 1446, 6018, 3386, 11470, 5310, 5476, 553,
+ 9474, 2586, 1431, 2741, 473, 11383, 4745, 836,
+ 4062, 10666, 7727, 11752, 5534, 312, 4307, 4351,
+ 5764, 8679, 8381, 8187, 5, 7395, 4363, 1152,
+ 5421, 5231, 6473, 436, 7567, 8603, 6229, 8230
+};
+
+/*
+ * Reduce a small signed integer modulo q. The source integer MUST
+ * be between -q/2 and +q/2.
+ */
+static inline uint32_t
+mq_conv_small(int x) {
+ /*
+ * If x < 0, the cast to uint32_t will set the high bit to 1.
+ */
+ uint32_t y;
+
+ y = (uint32_t)x;
+ y += Q & -(y >> 31);
+ return y;
+}
+
+/*
+ * Addition modulo q. Operands must be in the 0..q-1 range.
+ */
+static inline uint32_t
+mq_add(uint32_t x, uint32_t y) {
+ /*
+ * We compute x + y - q. If the result is negative, then the
+ * high bit will be set, and 'd >> 31' will be equal to 1;
+ * thus '-(d >> 31)' will be an all-one pattern. Otherwise,
+ * it will be an all-zero pattern. In other words, this
+ * implements a conditional addition of q.
+ */
+ uint32_t d;
+
+ d = x + y - Q;
+ d += Q & -(d >> 31);
+ return d;
+}
+
+/*
+ * Subtraction modulo q. Operands must be in the 0..q-1 range.
+ */
+static inline uint32_t
+mq_sub(uint32_t x, uint32_t y) {
+ /*
+ * As in mq_add(), we use a conditional addition to ensure the
+ * result is in the 0..q-1 range.
+ */
+ uint32_t d;
+
+ d = x - y;
+ d += Q & -(d >> 31);
+ return d;
+}
+
+/*
+ * Division by 2 modulo q. Operand must be in the 0..q-1 range.
+ */
+static inline uint32_t
+mq_rshift1(uint32_t x) {
+ x += Q & -(x & 1);
+ return (x >> 1);
+}
+
+/*
+ * Montgomery multiplication modulo q. If we set R = 2^16 mod q, then
+ * this function computes: x * y / R mod q
+ * Operands must be in the 0..q-1 range.
+ */
+static inline uint32_t
+mq_montymul(uint32_t x, uint32_t y) {
+ uint32_t z, w;
+
+ /*
+ * We compute x*y + k*q with a value of k chosen so that the 16
+ * low bits of the result are 0. We can then shift the value.
+ * After the shift, result may still be larger than q, but it
+ * will be lower than 2*q, so a conditional subtraction works.
+ */
+
+ z = x * y;
+ w = ((z * Q0I) & 0xFFFF) * Q;
+
+ /*
+ * When adding z and w, the result will have its low 16 bits
+ * equal to 0. Since x, y and z are lower than q, the sum will
+ * be no more than (2^15 - 1) * q + (q - 1)^2, which will
+ * fit on 29 bits.
+ */
+ z = (z + w) >> 16;
+
+ /*
+ * After the shift, analysis shows that the value will be less
+ * than 2q. We do a subtraction then conditional subtraction to
+ * ensure the result is in the expected range.
+ */
+ z -= Q;
+ z += Q & -(z >> 31);
+ return z;
+}
+
+/*
+ * Montgomery squaring (computes (x^2)/R).
+ */
+static inline uint32_t
+mq_montysqr(uint32_t x) {
+ return mq_montymul(x, x);
+}
+
+/*
+ * Divide x by y modulo q = 12289.
+ */
+static inline uint32_t
+mq_div_12289(uint32_t x, uint32_t y) {
+ /*
+ * We invert y by computing y^(q-2) mod q.
+ *
+ * We use the following addition chain for exponent e = 12287:
+ *
+ * e0 = 1
+ * e1 = 2 * e0 = 2
+ * e2 = e1 + e0 = 3
+ * e3 = e2 + e1 = 5
+ * e4 = 2 * e3 = 10
+ * e5 = 2 * e4 = 20
+ * e6 = 2 * e5 = 40
+ * e7 = 2 * e6 = 80
+ * e8 = 2 * e7 = 160
+ * e9 = e8 + e2 = 163
+ * e10 = e9 + e8 = 323
+ * e11 = 2 * e10 = 646
+ * e12 = 2 * e11 = 1292
+ * e13 = e12 + e9 = 1455
+ * e14 = 2 * e13 = 2910
+ * e15 = 2 * e14 = 5820
+ * e16 = e15 + e10 = 6143
+ * e17 = 2 * e16 = 12286
+ * e18 = e17 + e0 = 12287
+ *
+ * Additions on exponents are converted to Montgomery
+ * multiplications. We define all intermediate results as so
+ * many local variables, and let the C compiler work out which
+ * must be kept around.
+ */
+ uint32_t y0, y1, y2, y3, y4, y5, y6, y7, y8, y9;
+ uint32_t y10, y11, y12, y13, y14, y15, y16, y17, y18;
+
+ y0 = mq_montymul(y, R2);
+ y1 = mq_montysqr(y0);
+ y2 = mq_montymul(y1, y0);
+ y3 = mq_montymul(y2, y1);
+ y4 = mq_montysqr(y3);
+ y5 = mq_montysqr(y4);
+ y6 = mq_montysqr(y5);
+ y7 = mq_montysqr(y6);
+ y8 = mq_montysqr(y7);
+ y9 = mq_montymul(y8, y2);
+ y10 = mq_montymul(y9, y8);
+ y11 = mq_montysqr(y10);
+ y12 = mq_montysqr(y11);
+ y13 = mq_montymul(y12, y9);
+ y14 = mq_montysqr(y13);
+ y15 = mq_montysqr(y14);
+ y16 = mq_montymul(y15, y10);
+ y17 = mq_montysqr(y16);
+ y18 = mq_montymul(y17, y0);
+
+ /*
+ * Final multiplication with x, which is not in Montgomery
+ * representation, computes the correct division result.
+ */
+ return mq_montymul(y18, x);
+}
+
+/*
+ * Compute NTT on a ring element.
+ */
+static void
+mq_NTT(uint16_t *a, unsigned logn) {
+ size_t n, t, m;
+
+ n = (size_t)1 << logn;
+ t = n;
+ for (m = 1; m < n; m <<= 1) {
+ size_t ht, i, j1;
+
+ ht = t >> 1;
+ for (i = 0, j1 = 0; i < m; i ++, j1 += t) {
+ size_t j, j2;
+ uint32_t s;
+
+ s = GMb[m + i];
+ j2 = j1 + ht;
+ for (j = j1; j < j2; j ++) {
+ uint32_t u, v;
+
+ u = a[j];
+ v = mq_montymul(a[j + ht], s);
+ a[j] = (uint16_t)mq_add(u, v);
+ a[j + ht] = (uint16_t)mq_sub(u, v);
+ }
+ }
+ t = ht;
+ }
+}
+
+/*
+ * Compute the inverse NTT on a ring element, binary case.
+ */
+static void
+mq_iNTT(uint16_t *a, unsigned logn) {
+ size_t n, t, m;
+ uint32_t ni;
+
+ n = (size_t)1 << logn;
+ t = 1;
+ m = n;
+ while (m > 1) {
+ size_t hm, dt, i, j1;
+
+ hm = m >> 1;
+ dt = t << 1;
+ for (i = 0, j1 = 0; i < hm; i ++, j1 += dt) {
+ size_t j, j2;
+ uint32_t s;
+
+ j2 = j1 + t;
+ s = iGMb[hm + i];
+ for (j = j1; j < j2; j ++) {
+ uint32_t u, v, w;
+
+ u = a[j];
+ v = a[j + t];
+ a[j] = (uint16_t)mq_add(u, v);
+ w = mq_sub(u, v);
+ a[j + t] = (uint16_t)
+ mq_montymul(w, s);
+ }
+ }
+ t = dt;
+ m = hm;
+ }
+
+ /*
+ * To complete the inverse NTT, we must now divide all values by
+ * n (the vector size). We thus need the inverse of n, i.e. we
+ * need to divide 1 by 2 logn times. But we also want it in
+ * Montgomery representation, i.e. we also want to multiply it
+ * by R = 2^16. In the common case, this should be a simple right
+ * shift. The loop below is generic and works also in corner cases;
+ * its computation time is negligible.
+ */
+ ni = R;
+ for (m = n; m > 1; m >>= 1) {
+ ni = mq_rshift1(ni);
+ }
+ for (m = 0; m < n; m ++) {
+ a[m] = (uint16_t)mq_montymul(a[m], ni);
+ }
+}
+
+/*
+ * Convert a polynomial (mod q) to Montgomery representation.
+ */
+static void
+mq_poly_tomonty(uint16_t *f, unsigned logn) {
+ size_t u, n;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ f[u] = (uint16_t)mq_montymul(f[u], R2);
+ }
+}
+
+/*
+ * Multiply two polynomials together (NTT representation, and using
+ * a Montgomery multiplication). Result f*g is written over f.
+ */
+static void
+mq_poly_montymul_ntt(uint16_t *f, const uint16_t *g, unsigned logn) {
+ size_t u, n;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ f[u] = (uint16_t)mq_montymul(f[u], g[u]);
+ }
+}
+
+/*
+ * Subtract polynomial g from polynomial f.
+ */
+static void
+mq_poly_sub(uint16_t *f, const uint16_t *g, unsigned logn) {
+ size_t u, n;
+
+ n = (size_t)1 << logn;
+ for (u = 0; u < n; u ++) {
+ f[u] = (uint16_t)mq_sub(f[u], g[u]);
+ }
+}
+
+/* ===================================================================== */
+
+/* see inner.h */
+void
+PQCLEAN_FALCON512_CLEAN_to_ntt_monty(uint16_t *h, unsigned logn) {
+ mq_NTT(h, logn);
+ mq_poly_tomonty(h, logn);
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON512_CLEAN_verify_raw(const uint16_t *c0, const int16_t *s2,
+ const uint16_t *h, unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *tt;
+
+ n = (size_t)1 << logn;
+ tt = (uint16_t *)tmp;
+
+ /*
+ * Reduce s2 elements modulo q ([0..q-1] range).
+ */
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)s2[u];
+ w += Q & -(w >> 31);
+ tt[u] = (uint16_t)w;
+ }
+
+ /*
+ * Compute -s1 = s2*h - c0 mod phi mod q (in tt[]).
+ */
+ mq_NTT(tt, logn);
+ mq_poly_montymul_ntt(tt, h, logn);
+ mq_iNTT(tt, logn);
+ mq_poly_sub(tt, c0, logn);
+
+ /*
+ * Normalize -s1 elements into the [-q/2..q/2] range.
+ */
+ for (u = 0; u < n; u ++) {
+ int32_t w;
+
+ w = (int32_t)tt[u];
+ w -= (int32_t)(Q & -(((Q >> 1) - (uint32_t)w) >> 31));
+ ((int16_t *)tt)[u] = (int16_t)w;
+ }
+
+ /*
+ * Signature is valid if and only if the aggregate (-s1,s2) vector
+ * is short enough.
+ */
+ return PQCLEAN_FALCON512_CLEAN_is_short((int16_t *)tt, s2, logn);
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON512_CLEAN_compute_public(uint16_t *h,
+ const int8_t *f, const int8_t *g, unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *tt;
+
+ n = (size_t)1 << logn;
+ tt = (uint16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ tt[u] = (uint16_t)mq_conv_small(f[u]);
+ h[u] = (uint16_t)mq_conv_small(g[u]);
+ }
+ mq_NTT(h, logn);
+ mq_NTT(tt, logn);
+ for (u = 0; u < n; u ++) {
+ if (tt[u] == 0) {
+ return 0;
+ }
+ h[u] = (uint16_t)mq_div_12289(h[u], tt[u]);
+ }
+ mq_iNTT(h, logn);
+ return 1;
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON512_CLEAN_complete_private(int8_t *G,
+ const int8_t *f, const int8_t *g, const int8_t *F,
+ unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *t1, *t2;
+
+ n = (size_t)1 << logn;
+ t1 = (uint16_t *)tmp;
+ t2 = t1 + n;
+ for (u = 0; u < n; u ++) {
+ t1[u] = (uint16_t)mq_conv_small(g[u]);
+ t2[u] = (uint16_t)mq_conv_small(F[u]);
+ }
+ mq_NTT(t1, logn);
+ mq_NTT(t2, logn);
+ mq_poly_tomonty(t1, logn);
+ mq_poly_montymul_ntt(t1, t2, logn);
+ for (u = 0; u < n; u ++) {
+ t2[u] = (uint16_t)mq_conv_small(f[u]);
+ }
+ mq_NTT(t2, logn);
+ for (u = 0; u < n; u ++) {
+ if (t2[u] == 0) {
+ return 0;
+ }
+ t1[u] = (uint16_t)mq_div_12289(t1[u], t2[u]);
+ }
+ mq_iNTT(t1, logn);
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+ int32_t gi;
+
+ w = t1[u];
+ w -= (Q & ~ -((w - (Q >> 1)) >> 31));
+ gi = *(int32_t *)&w;
+ if (gi < -127 || gi > +127) {
+ return 0;
+ }
+ G[u] = (int8_t)gi;
+ }
+ return 1;
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON512_CLEAN_is_invertible(
+ const int16_t *s2, unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *tt;
+ uint32_t r;
+
+ n = (size_t)1 << logn;
+ tt = (uint16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)s2[u];
+ w += Q & -(w >> 31);
+ tt[u] = (uint16_t)w;
+ }
+ mq_NTT(tt, logn);
+ r = 0;
+ for (u = 0; u < n; u ++) {
+ r |= (uint32_t)(tt[u] - 1);
+ }
+ return (int)(1u - (r >> 31));
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON512_CLEAN_verify_recover(uint16_t *h,
+ const uint16_t *c0, const int16_t *s1, const int16_t *s2,
+ unsigned logn, uint8_t *tmp) {
+ size_t u, n;
+ uint16_t *tt;
+ uint32_t r;
+
+ n = (size_t)1 << logn;
+
+ /*
+ * Reduce elements of s1 and s2 modulo q; then write s2 into tt[]
+ * and c0 - s1 into h[].
+ */
+ tt = (uint16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)s2[u];
+ w += Q & -(w >> 31);
+ tt[u] = (uint16_t)w;
+
+ w = (uint32_t)s1[u];
+ w += Q & -(w >> 31);
+ w = mq_sub(c0[u], w);
+ h[u] = (uint16_t)w;
+ }
+
+ /*
+ * Compute h = (c0 - s1) / s2. If one of the coefficients of s2
+ * is zero (in NTT representation) then the operation fails. We
+ * keep that information into a flag so that we do not deviate
+ * from strict constant-time processing; if all coefficients of
+ * s2 are non-zero, then the high bit of r will be zero.
+ */
+ mq_NTT(tt, logn);
+ mq_NTT(h, logn);
+ r = 0;
+ for (u = 0; u < n; u ++) {
+ r |= (uint32_t)(tt[u] - 1);
+ h[u] = (uint16_t)mq_div_12289(h[u], tt[u]);
+ }
+ mq_iNTT(h, logn);
+
+ /*
+ * Signature is acceptable if and only if it is short enough,
+ * and s2 was invertible mod phi mod q. The caller must still
+ * check that the rebuilt public key matches the expected
+ * value (e.g. through a hash).
+ */
+ r = ~r & (uint32_t) - PQCLEAN_FALCON512_CLEAN_is_short(s1, s2, logn);
+ return (int)(r >> 31);
+}
+
+/* see inner.h */
+int
+PQCLEAN_FALCON512_CLEAN_count_nttzero(const int16_t *sig, unsigned logn, uint8_t *tmp) {
+ uint16_t *s2;
+ size_t u, n;
+ uint32_t r;
+
+ n = (size_t)1 << logn;
+ s2 = (uint16_t *)tmp;
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)sig[u];
+ w += Q & -(w >> 31);
+ s2[u] = (uint16_t)w;
+ }
+ mq_NTT(s2, logn);
+ r = 0;
+ for (u = 0; u < n; u ++) {
+ uint32_t w;
+
+ w = (uint32_t)s2[u] - 1u;
+ r += (w >> 31);
+ }
+ return (int)r;
+}
diff --git a/lib/liboqs/src/sig/falcon/sig_falcon.h b/lib/liboqs/src/sig/falcon/sig_falcon.h
new file mode 100644
index 000000000..2cd661617
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/sig_falcon.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+
+#ifndef OQS_SIG_FALCON_H
+#define OQS_SIG_FALCON_H
+
+#include <oqs/oqs.h>
+
+#ifdef OQS_ENABLE_SIG_falcon_512
+#define OQS_SIG_falcon_512_length_public_key 897
+#define OQS_SIG_falcon_512_length_secret_key 1281
+#define OQS_SIG_falcon_512_length_signature 690
+
+OQS_SIG *OQS_SIG_falcon_512_new(void);
+OQS_API OQS_STATUS OQS_SIG_falcon_512_keypair(uint8_t *public_key, uint8_t *secret_key);
+OQS_API OQS_STATUS OQS_SIG_falcon_512_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key);
+OQS_API OQS_STATUS OQS_SIG_falcon_512_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key);
+#endif
+
+#ifdef OQS_ENABLE_SIG_falcon_1024
+#define OQS_SIG_falcon_1024_length_public_key 1793
+#define OQS_SIG_falcon_1024_length_secret_key 2305
+#define OQS_SIG_falcon_1024_length_signature 1330
+
+OQS_SIG *OQS_SIG_falcon_1024_new(void);
+OQS_API OQS_STATUS OQS_SIG_falcon_1024_keypair(uint8_t *public_key, uint8_t *secret_key);
+OQS_API OQS_STATUS OQS_SIG_falcon_1024_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key);
+OQS_API OQS_STATUS OQS_SIG_falcon_1024_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key);
+#endif
+
+#endif
diff --git a/lib/liboqs/src/sig/falcon/sig_falcon_1024.c b/lib/liboqs/src/sig/falcon/sig_falcon_1024.c
new file mode 100644
index 000000000..cdb3cd0d7
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/sig_falcon_1024.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: MIT
+
+#include <stdlib.h>
+
+#include <oqs/sig_falcon.h>
+
+#if defined(OQS_ENABLE_SIG_falcon_1024)
+
+OQS_SIG *OQS_SIG_falcon_1024_new(void) {
+
+ OQS_SIG *sig = malloc(sizeof(OQS_SIG));
+ if (sig == NULL) {
+ return NULL;
+ }
+ sig->method_name = OQS_SIG_alg_falcon_1024;
+ sig->alg_version = "supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/cea1fa5a/falcon";
+
+ sig->claimed_nist_level = 5;
+ sig->euf_cma = true;
+
+ sig->length_public_key = OQS_SIG_falcon_1024_length_public_key;
+ sig->length_secret_key = OQS_SIG_falcon_1024_length_secret_key;
+ sig->length_signature = OQS_SIG_falcon_1024_length_signature;
+
+ sig->keypair = OQS_SIG_falcon_1024_keypair;
+ sig->sign = OQS_SIG_falcon_1024_sign;
+ sig->verify = OQS_SIG_falcon_1024_verify;
+
+ return sig;
+}
+
+extern int PQCLEAN_FALCON1024_CLEAN_crypto_sign_keypair(uint8_t *pk, uint8_t *sk);
+extern int PQCLEAN_FALCON1024_CLEAN_crypto_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk);
+extern int PQCLEAN_FALCON1024_CLEAN_crypto_sign_verify(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk);
+
+#if defined(OQS_ENABLE_SIG_falcon_1024_avx2)
+extern int PQCLEAN_FALCON1024_AVX2_crypto_sign_keypair(uint8_t *pk, uint8_t *sk);
+extern int PQCLEAN_FALCON1024_AVX2_crypto_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk);
+extern int PQCLEAN_FALCON1024_AVX2_crypto_sign_verify(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk);
+#endif
+
+OQS_API OQS_STATUS OQS_SIG_falcon_1024_keypair(uint8_t *public_key, uint8_t *secret_key) {
+#if defined(OQS_ENABLE_SIG_falcon_1024_avx2)
+#if defined(OQS_DIST_BUILD)
+ if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) {
+#endif /* OQS_DIST_BUILD */
+ return (OQS_STATUS) PQCLEAN_FALCON1024_AVX2_crypto_sign_keypair(public_key, secret_key);
+#if defined(OQS_DIST_BUILD)
+ } else {
+ return (OQS_STATUS) PQCLEAN_FALCON1024_CLEAN_crypto_sign_keypair(public_key, secret_key);
+ }
+#endif /* OQS_DIST_BUILD */
+#else
+ return (OQS_STATUS) PQCLEAN_FALCON1024_CLEAN_crypto_sign_keypair(public_key, secret_key);
+#endif
+}
+
+OQS_API OQS_STATUS OQS_SIG_falcon_1024_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key) {
+#if defined(OQS_ENABLE_SIG_falcon_1024_avx2)
+#if defined(OQS_DIST_BUILD)
+ if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) {
+#endif /* OQS_DIST_BUILD */
+ return (OQS_STATUS) PQCLEAN_FALCON1024_AVX2_crypto_sign_signature(signature, signature_len, message, message_len, secret_key);
+#if defined(OQS_DIST_BUILD)
+ } else {
+ return (OQS_STATUS) PQCLEAN_FALCON1024_CLEAN_crypto_sign_signature(signature, signature_len, message, message_len, secret_key);
+ }
+#endif /* OQS_DIST_BUILD */
+#else
+ return (OQS_STATUS) PQCLEAN_FALCON1024_CLEAN_crypto_sign_signature(signature, signature_len, message, message_len, secret_key);
+#endif
+}
+
+OQS_API OQS_STATUS OQS_SIG_falcon_1024_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key) {
+#if defined(OQS_ENABLE_SIG_falcon_1024_avx2)
+#if defined(OQS_DIST_BUILD)
+ if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) {
+#endif /* OQS_DIST_BUILD */
+ return (OQS_STATUS) PQCLEAN_FALCON1024_AVX2_crypto_sign_verify(signature, signature_len, message, message_len, public_key);
+#if defined(OQS_DIST_BUILD)
+ } else {
+ return (OQS_STATUS) PQCLEAN_FALCON1024_CLEAN_crypto_sign_verify(signature, signature_len, message, message_len, public_key);
+ }
+#endif /* OQS_DIST_BUILD */
+#else
+ return (OQS_STATUS) PQCLEAN_FALCON1024_CLEAN_crypto_sign_verify(signature, signature_len, message, message_len, public_key);
+#endif
+}
+
+#endif
diff --git a/lib/liboqs/src/sig/falcon/sig_falcon_512.c b/lib/liboqs/src/sig/falcon/sig_falcon_512.c
new file mode 100644
index 000000000..1b09cff54
--- /dev/null
+++ b/lib/liboqs/src/sig/falcon/sig_falcon_512.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: MIT
+
+#include <stdlib.h>
+
+#include <oqs/sig_falcon.h>
+
+#if defined(OQS_ENABLE_SIG_falcon_512)
+
+OQS_SIG *OQS_SIG_falcon_512_new(void) {
+
+ OQS_SIG *sig = malloc(sizeof(OQS_SIG));
+ if (sig == NULL) {
+ return NULL;
+ }
+ sig->method_name = OQS_SIG_alg_falcon_512;
+ sig->alg_version = "supercop-20201018 via https://github.com/jschanck/package-pqclean/tree/cea1fa5a/falcon";
+
+ sig->claimed_nist_level = 1;
+ sig->euf_cma = true;
+
+ sig->length_public_key = OQS_SIG_falcon_512_length_public_key;
+ sig->length_secret_key = OQS_SIG_falcon_512_length_secret_key;
+ sig->length_signature = OQS_SIG_falcon_512_length_signature;
+
+ sig->keypair = OQS_SIG_falcon_512_keypair;
+ sig->sign = OQS_SIG_falcon_512_sign;
+ sig->verify = OQS_SIG_falcon_512_verify;
+
+ return sig;
+}
+
+extern int PQCLEAN_FALCON512_CLEAN_crypto_sign_keypair(uint8_t *pk, uint8_t *sk);
+extern int PQCLEAN_FALCON512_CLEAN_crypto_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk);
+extern int PQCLEAN_FALCON512_CLEAN_crypto_sign_verify(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk);
+
+#if defined(OQS_ENABLE_SIG_falcon_512_avx2)
+extern int PQCLEAN_FALCON512_AVX2_crypto_sign_keypair(uint8_t *pk, uint8_t *sk);
+extern int PQCLEAN_FALCON512_AVX2_crypto_sign_signature(uint8_t *sig, size_t *siglen, const uint8_t *m, size_t mlen, const uint8_t *sk);
+extern int PQCLEAN_FALCON512_AVX2_crypto_sign_verify(const uint8_t *sig, size_t siglen, const uint8_t *m, size_t mlen, const uint8_t *pk);
+#endif
+
+OQS_API OQS_STATUS OQS_SIG_falcon_512_keypair(uint8_t *public_key, uint8_t *secret_key) {
+#if defined(OQS_ENABLE_SIG_falcon_512_avx2)
+#if defined(OQS_DIST_BUILD)
+ if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) {
+#endif /* OQS_DIST_BUILD */
+ return (OQS_STATUS) PQCLEAN_FALCON512_AVX2_crypto_sign_keypair(public_key, secret_key);
+#if defined(OQS_DIST_BUILD)
+ } else {
+ return (OQS_STATUS) PQCLEAN_FALCON512_CLEAN_crypto_sign_keypair(public_key, secret_key);
+ }
+#endif /* OQS_DIST_BUILD */
+#else
+ return (OQS_STATUS) PQCLEAN_FALCON512_CLEAN_crypto_sign_keypair(public_key, secret_key);
+#endif
+}
+
+OQS_API OQS_STATUS OQS_SIG_falcon_512_sign(uint8_t *signature, size_t *signature_len, const uint8_t *message, size_t message_len, const uint8_t *secret_key) {
+#if defined(OQS_ENABLE_SIG_falcon_512_avx2)
+#if defined(OQS_DIST_BUILD)
+ if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) {
+#endif /* OQS_DIST_BUILD */
+ return (OQS_STATUS) PQCLEAN_FALCON512_AVX2_crypto_sign_signature(signature, signature_len, message, message_len, secret_key);
+#if defined(OQS_DIST_BUILD)
+ } else {
+ return (OQS_STATUS) PQCLEAN_FALCON512_CLEAN_crypto_sign_signature(signature, signature_len, message, message_len, secret_key);
+ }
+#endif /* OQS_DIST_BUILD */
+#else
+ return (OQS_STATUS) PQCLEAN_FALCON512_CLEAN_crypto_sign_signature(signature, signature_len, message, message_len, secret_key);
+#endif
+}
+
+OQS_API OQS_STATUS OQS_SIG_falcon_512_verify(const uint8_t *message, size_t message_len, const uint8_t *signature, size_t signature_len, const uint8_t *public_key) {
+#if defined(OQS_ENABLE_SIG_falcon_512_avx2)
+#if defined(OQS_DIST_BUILD)
+ if (OQS_CPU_has_extension(OQS_CPU_EXT_AVX2)) {
+#endif /* OQS_DIST_BUILD */
+ return (OQS_STATUS) PQCLEAN_FALCON512_AVX2_crypto_sign_verify(signature, signature_len, message, message_len, public_key);
+#if defined(OQS_DIST_BUILD)
+ } else {
+ return (OQS_STATUS) PQCLEAN_FALCON512_CLEAN_crypto_sign_verify(signature, signature_len, message, message_len, public_key);
+ }
+#endif /* OQS_DIST_BUILD */
+#else
+ return (OQS_STATUS) PQCLEAN_FALCON512_CLEAN_crypto_sign_verify(signature, signature_len, message, message_len, public_key);
+#endif
+}
+
+#endif