summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Beurdouche <benjamin.beurdouche@inria.fr>2017-11-21 18:54:40 +0100
committerBenjamin Beurdouche <benjamin.beurdouche@inria.fr>2017-11-21 18:54:40 +0100
commiteb92a5ddf1160c9a0ad21ec66d2e2191f7a37bac (patch)
treee282135eeceda5781c67d1943eb1622b5b4df252
parent7c603897952e662618783479b03a2475232826e4 (diff)
downloadnss-hg-eb92a5ddf1160c9a0ad21ec66d2e2191f7a37bac.tar.gz
Bug 1419173 - New version of HACL* base and Curve25519, r=franziskus
Summary: This patch moves NSS to track HACL* master instead of the nss-production branch. It also contains a new version of the Curve25519 HACL* code that resolved many of the style issues of the old version. Reviewers: franziskus Differential Revision: https://phabricator.services.mozilla.com/D268
-rw-r--r--automation/taskcluster/docker-hacl/Dockerfile26
-rwxr-xr-xautomation/taskcluster/scripts/run_hacl.sh2
-rw-r--r--lib/freebl/Makefile8
-rw-r--r--lib/freebl/ecl/curve25519_64.c4
-rw-r--r--lib/freebl/freebl.gyp11
-rw-r--r--lib/freebl/freebl_base.gypi3
-rw-r--r--lib/freebl/verified/FStar.c (renamed from lib/freebl/verified/fstar_uint128.h)148
-rw-r--r--lib/freebl/verified/FStar.h69
-rw-r--r--lib/freebl/verified/Hacl_Curve25519.c845
-rw-r--r--lib/freebl/verified/Hacl_Curve25519.h (renamed from lib/freebl/verified/hacl_curve25519_64.h)39
-rw-r--r--lib/freebl/verified/hacl_curve25519_64.c1044
-rw-r--r--lib/freebl/verified/kremlib.h574
-rw-r--r--lib/freebl/verified/kremlib_base.h191
13 files changed, 1629 insertions, 1335 deletions
diff --git a/automation/taskcluster/docker-hacl/Dockerfile b/automation/taskcluster/docker-hacl/Dockerfile
index e26e72dbd..b4c716720 100644
--- a/automation/taskcluster/docker-hacl/Dockerfile
+++ b/automation/taskcluster/docker-hacl/Dockerfile
@@ -4,18 +4,16 @@ MAINTAINER Franziskus Kiefer <franziskuskiefer@gmail.com>
# Based on the HACL* image from Benjamin Beurdouche and
# the original F* formula with Daniel Fabian
-# Pinned versions of HaCl* (F* and KreMLin are pinned as submodules)
+# Pinned versions of HACL* (F* and KreMLin are pinned as submodules)
ENV haclrepo https://github.com/mitls/hacl-star.git
# Define versions of dependencies
ENV opamv 4.04.2
-ENV z3v 4.5.1.1f29cebd4df6-x64-ubuntu-14.04
-ENV haclversion 0030539598cde15d1a0e5f93b32e121f7b7b5a1c
-ENV haclbranch production-nss
+ENV haclversion 050778364a97d596d097afdb430b29ad58ee6a42
# Install required packages and set versions
RUN apt-get -qq update
-RUN apt-get install --yes sudo libssl-dev libsqlite3-dev g++-5 gcc-5 m4 make opam pkg-config python libgmp3-dev cmake curl libtool-bin autoconf
+RUN apt-get install --yes sudo libssl-dev libsqlite3-dev g++-5 gcc-5 m4 make opam pkg-config python libgmp3-dev cmake curl libtool-bin autoconf wget
RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 200
RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-5 200
@@ -29,13 +27,6 @@ ADD bin /home/worker/bin
RUN chmod +x /home/worker/bin/*
USER worker
-# Add "known-good" version of Z3
-RUN curl -LO https://github.com/FStarLang/binaries/raw/master/z3-tested/z3-${z3v}.zip
-RUN unzip z3-${z3v}.zip
-RUN rm z3-${z3v}.zip
-RUN mv z3-${z3v} z3
-ENV PATH "/home/worker/z3/bin:$PATH"
-
# Prepare build (OCaml packages)
ENV OPAMYES true
RUN opam init
@@ -43,14 +34,18 @@ RUN echo ". /home/worker/.opam/opam-init/init.sh > /dev/null 2> /dev/null || tru
RUN opam switch -v ${opamv}
RUN opam install ocamlfind batteries sqlite3 fileutils yojson ppx_deriving_yojson zarith pprint menhir ulex process fix wasm stdint
-# Get the HaCl* code
+# Get the HACL* code
RUN git clone ${haclrepo} hacl-star
RUN git -C hacl-star checkout ${haclversion}
# Prepare submodules, and build, verify, test, and extract c code
-# This caches the extracted c code (pins the HaCl* version). All we need to do
+# This caches the extracted c code (pins the HACL* version). All we need to do
# on CI now is comparing the code in this docker image with the one in NSS.
-RUN opam config exec -- make -C hacl-star nss -j$(nproc)
+RUN opam config exec -- make -C hacl-star prepare -j$(nproc)
+ENV PATH "/home/worker/hacl-star/dependencies/z3/bin:$PATH"
+RUN make -C hacl-star verify-nss -j$(nproc)
+RUN make -C hacl-star -f Makefile.build snapshots/nss -j$(nproc)
+RUN KOPTS="-funroll-loops 5" make -C hacl-star/code/curve25519 test -j$(nproc)
# Get clang-format-3.9
RUN curl -LO http://releases.llvm.org/3.9.1/clang+llvm-3.9.1-x86_64-linux-gnu-ubuntu-16.04.tar.xz
@@ -65,6 +60,7 @@ RUN rm *.tar.xz*
# Cleanup
RUN rm -rf ~/.ccache ~/.cache
+RUN rm -rf /home/worker/hacl-star/dependencies
RUN sudo apt-get autoremove -y
RUN sudo apt-get clean
RUN sudo apt-get autoclean
diff --git a/automation/taskcluster/scripts/run_hacl.sh b/automation/taskcluster/scripts/run_hacl.sh
index 1e2291a54..a57090424 100755
--- a/automation/taskcluster/scripts/run_hacl.sh
+++ b/automation/taskcluster/scripts/run_hacl.sh
@@ -13,7 +13,7 @@ set -e -x -v
# successfully executed.
# Format the extracted C code.
-cd ~/hacl-star/snapshots/nss-production
+cd ~/hacl-star/snapshots/nss
cp ~/nss/.clang-format .
find . -type f -name '*.[ch]' -exec clang-format -i {} \+
diff --git a/lib/freebl/Makefile b/lib/freebl/Makefile
index bc1ea86a5..41332f202 100644
--- a/lib/freebl/Makefile
+++ b/lib/freebl/Makefile
@@ -510,6 +510,10 @@ ifdef USE_64
endif
endif
+ifndef HAVE_INT128_SUPPORT
+ DEFINES += -DKRML_NOUINT128
+endif
+
ifndef NSS_DISABLE_CHACHAPOLY
ifeq ($(CPU_ARCH),x86_64)
ifdef HAVE_INT128_SUPPORT
@@ -529,11 +533,11 @@ ifndef NSS_DISABLE_CHACHAPOLY
endif # x86_64
endif # NSS_DISABLE_CHACHAPOLY
-ifeq (,$(filter-out i386 x386 x86 x86_64,$(CPU_ARCH)))
+ifeq (,$(filter-out i386 x386 x86 x86_64 aarch64,$(CPU_ARCH)))
# All intel architectures get the 64 bit version
# With custom uint128 if necessary (faster than generic 32 bit version).
ECL_SRCS += curve25519_64.c
- VERIFIED_SRCS += hacl_curve25519_64.c
+ VERIFIED_SRCS += Hacl_Curve25519.c FStar.c
else
# All non intel architectures get the generic 32 bit implementation (slow!)
ECL_SRCS += curve25519_32.c
diff --git a/lib/freebl/ecl/curve25519_64.c b/lib/freebl/ecl/curve25519_64.c
index 21c5d2120..a2e4296bb 100644
--- a/lib/freebl/ecl/curve25519_64.c
+++ b/lib/freebl/ecl/curve25519_64.c
@@ -3,12 +3,12 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ecl-priv.h"
-#include "../verified/hacl_curve25519_64.h"
+#include "../verified/Hacl_Curve25519.h"
SECStatus
ec_Curve25519_mul(uint8_t *mypublic, const uint8_t *secret, const uint8_t *basepoint)
{
// Note: this cast is safe because HaCl* state has a post-condition that only "mypublic" changed.
- Curve25519_crypto_scalarmult(mypublic, (uint8_t *)secret, (uint8_t *)basepoint);
+ Hacl_Curve25519_crypto_scalarmult(mypublic, (uint8_t *)secret, (uint8_t *)basepoint);
return 0;
}
diff --git a/lib/freebl/freebl.gyp b/lib/freebl/freebl.gyp
index 5f59eef29..c92b26f06 100644
--- a/lib/freebl/freebl.gyp
+++ b/lib/freebl/freebl.gyp
@@ -181,6 +181,9 @@
}],
],
'target_defaults': {
+ 'variables': {
+ 'have_int128_support': '0',
+ },
'include_dirs': [
'mpi',
'ecl',
@@ -255,9 +258,17 @@
# The Makefile does version-tests on GCC, but we're not doing that here.
'HAVE_INT128_SUPPORT',
],
+ 'variables': {
+ 'have_int128_support': '1',
+ },
}],
],
}],
+ [ 'have_int128_support!=1', {
+ 'defines': [
+ 'KRML_NOUINT128',
+ ],
+ }],
[ 'OS=="linux"', {
'defines': [
'FREEBL_LOWHASH',
diff --git a/lib/freebl/freebl_base.gypi b/lib/freebl/freebl_base.gypi
index 6e3a13fee..faef1e02f 100644
--- a/lib/freebl/freebl_base.gypi
+++ b/lib/freebl/freebl_base.gypi
@@ -134,7 +134,8 @@
'sources': [
# All intel and 64-bit ARM architectures get the 64 bit version.
'ecl/curve25519_64.c',
- 'verified/hacl_curve25519_64.c',
+ 'verified/Hacl_Curve25519.c',
+ 'verified/FStar.c',
],
}, {
'sources': [
diff --git a/lib/freebl/verified/fstar_uint128.h b/lib/freebl/verified/FStar.c
index cd6ce2dde..4e5f6d50d 100644
--- a/lib/freebl/verified/fstar_uint128.h
+++ b/lib/freebl/verified/FStar.c
@@ -1,51 +1,35 @@
-// Copyright 2016-2017 Microsoft Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
/* This file was auto-generated by KreMLin! */
-#ifndef __FStar_UInt128_H
-#define __FStar_UInt128_H
+#include "FStar.h"
-typedef struct
-{
- uint64_t low;
- uint64_t high;
-} FStar_UInt128_uint128;
-
-typedef FStar_UInt128_uint128 FStar_UInt128_t;
-
-typedef struct
-{
- uint64_t fst;
- uint64_t snd;
- uint64_t thd;
- uint64_t f3;
-} K___uint64_t_uint64_t_uint64_t_uint64_t;
-
-static inline uint64_t
+static uint64_t
FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
{
- return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63;
+ return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63U;
}
-static inline uint64_t
+static uint64_t
FStar_UInt128_carry(uint64_t a, uint64_t b)
{
return FStar_UInt128_constant_time_carry(a, b);
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return (
@@ -54,7 +38,7 @@ FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return (
@@ -63,7 +47,7 @@ FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
.high = a.high + b.high + FStar_UInt128_carry(a.low + b.low, b.low) });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return (
@@ -72,7 +56,7 @@ FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) });
}
-static inline FStar_UInt128_uint128
+static FStar_UInt128_uint128
FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return (
@@ -81,54 +65,54 @@ FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
.high = a.high - b.high - FStar_UInt128_carry(a.low, a.low - b.low) });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return FStar_UInt128_sub_mod_impl(a, b);
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return ((FStar_UInt128_uint128){.low = a.low & b.low, .high = a.high & b.high });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return ((FStar_UInt128_uint128){.low = a.low ^ b.low, .high = a.high ^ b.high });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return ((FStar_UInt128_uint128){.low = a.low | b.low, .high = a.high | b.high });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_lognot(FStar_UInt128_uint128 a)
{
return ((FStar_UInt128_uint128){.low = ~a.low, .high = ~a.high });
}
-static uint32_t FStar_UInt128_u32_64 = (uint32_t)64;
+static uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
-static inline uint64_t
+static uint64_t
FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
{
return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s));
}
-static inline uint64_t
+static uint64_t
FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
{
return FStar_UInt128_add_u64_shift_left(hi, lo, s);
}
-static inline FStar_UInt128_uint128
+static FStar_UInt128_uint128
FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
{
- if (s == (uint32_t)0)
+ if (s == (uint32_t)0U)
return a;
else
return (
@@ -137,13 +121,13 @@ FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
.high = FStar_UInt128_add_u64_shift_left_respec(a.high, a.low, s) });
}
-static inline FStar_UInt128_uint128
+static FStar_UInt128_uint128
FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
{
- return ((FStar_UInt128_uint128){.low = (uint64_t)0, .high = a.low << (s - FStar_UInt128_u32_64) });
+ return ((FStar_UInt128_uint128){.low = (uint64_t)0U, .high = a.low << (s - FStar_UInt128_u32_64) });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
{
if (s < FStar_UInt128_u32_64)
@@ -152,22 +136,22 @@ FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
return FStar_UInt128_shift_left_large(a, s);
}
-static inline uint64_t
+static uint64_t
FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s)
{
return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s));
}
-static inline uint64_t
+static uint64_t
FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
{
return FStar_UInt128_add_u64_shift_right(hi, lo, s);
}
-static inline FStar_UInt128_uint128
+static FStar_UInt128_uint128
FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
{
- if (s == (uint32_t)0)
+ if (s == (uint32_t)0U)
return a;
else
return (
@@ -176,13 +160,13 @@ FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
.high = a.high >> s });
}
-static inline FStar_UInt128_uint128
+static FStar_UInt128_uint128
FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
{
- return ((FStar_UInt128_uint128){.low = a.high >> (s - FStar_UInt128_u32_64), .high = (uint64_t)0 });
+ return ((FStar_UInt128_uint128){.low = a.high >> (s - FStar_UInt128_u32_64), .high = (uint64_t)0U });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
{
if (s < FStar_UInt128_u32_64)
@@ -191,7 +175,7 @@ FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
return FStar_UInt128_shift_right_large(a, s);
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return (
@@ -200,44 +184,38 @@ FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
.high = FStar_UInt64_eq_mask(a.low, b.low) & FStar_UInt64_eq_mask(a.high, b.high) });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return (
(FStar_UInt128_uint128){
- .low = (FStar_UInt64_gte_mask(a.high,
- b.high) &
- ~FStar_UInt64_eq_mask(a.high, b.high)) |
- (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low)),
- .high = (FStar_UInt64_gte_mask(a.high,
- b.high) &
- ~FStar_UInt64_eq_mask(a.high, b.high)) |
- (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low)) });
+ .low = (FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high)) | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low)),
+ .high = (FStar_UInt64_gte_mask(a.high, b.high) & ~FStar_UInt64_eq_mask(a.high, b.high)) | (FStar_UInt64_eq_mask(a.high, b.high) & FStar_UInt64_gte_mask(a.low, b.low)) });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_uint64_to_uint128(uint64_t a)
{
- return ((FStar_UInt128_uint128){.low = a, .high = (uint64_t)0 });
+ return ((FStar_UInt128_uint128){.low = a, .high = (uint64_t)0U });
}
-static inline uint64_t
+uint64_t
FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
{
return a.low;
}
-static uint64_t FStar_UInt128_u64_l32_mask = (uint64_t)0xffffffff;
+static uint64_t FStar_UInt128_u64_l32_mask = (uint64_t)0xffffffffU;
-static inline uint64_t
+static uint64_t
FStar_UInt128_u64_mod_32(uint64_t a)
{
return a & FStar_UInt128_u64_l32_mask;
}
-static uint32_t FStar_UInt128_u32_32 = (uint32_t)32;
+static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
-static inline K___uint64_t_uint64_t_uint64_t_uint64_t
+static K___uint64_t_uint64_t_uint64_t_uint64_t
FStar_UInt128_mul_wide_impl_t_(uint64_t x, uint64_t y)
{
return (
@@ -248,13 +226,13 @@ FStar_UInt128_mul_wide_impl_t_(uint64_t x, uint64_t y)
.f3 = (x >> FStar_UInt128_u32_32) * FStar_UInt128_u64_mod_32(y) + (FStar_UInt128_u64_mod_32(x) * FStar_UInt128_u64_mod_32(y) >> FStar_UInt128_u32_32) });
}
-static inline uint64_t
+static uint64_t
FStar_UInt128_u32_combine_(uint64_t hi, uint64_t lo)
{
return lo + (hi << FStar_UInt128_u32_32);
}
-static inline FStar_UInt128_uint128
+static FStar_UInt128_uint128
FStar_UInt128_mul_wide_impl(uint64_t x, uint64_t y)
{
K___uint64_t_uint64_t_uint64_t_uint64_t scrut = FStar_UInt128_mul_wide_impl_t_(x, y);
@@ -270,22 +248,8 @@ FStar_UInt128_mul_wide_impl(uint64_t x, uint64_t y)
((u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_)) >> FStar_UInt128_u32_32) });
}
-static inline FStar_UInt128_uint128
+FStar_UInt128_uint128
FStar_UInt128_mul_wide(uint64_t x, uint64_t y)
{
return FStar_UInt128_mul_wide_impl(x, y);
}
-
-static inline FStar_UInt128_uint128
-FStar_Int_Cast_Full_uint64_to_uint128(uint64_t a)
-{
- return FStar_UInt128_uint64_to_uint128(a);
-}
-
-static inline uint64_t
-FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_uint128 a)
-{
- return FStar_UInt128_uint128_to_uint64(a);
-}
-
-#endif
diff --git a/lib/freebl/verified/FStar.h b/lib/freebl/verified/FStar.h
new file mode 100644
index 000000000..7b105b8f2
--- /dev/null
+++ b/lib/freebl/verified/FStar.h
@@ -0,0 +1,69 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file was auto-generated by KreMLin! */
+#ifndef __FStar_H
+#define __FStar_H
+
+#include "kremlib_base.h"
+
+typedef struct
+{
+ uint64_t low;
+ uint64_t high;
+} FStar_UInt128_uint128;
+
+typedef FStar_UInt128_uint128 FStar_UInt128_t;
+
+extern void FStar_UInt128_constant_time_carry_ok(uint64_t x0, uint64_t x1);
+
+FStar_UInt128_uint128 FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a);
+
+FStar_UInt128_uint128 FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s);
+
+FStar_UInt128_uint128 FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s);
+
+FStar_UInt128_uint128 FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+
+FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
+
+uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
+
+typedef struct
+{
+ uint64_t fst;
+ uint64_t snd;
+ uint64_t thd;
+ uint64_t f3;
+} K___uint64_t_uint64_t_uint64_t_uint64_t;
+
+FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
+#endif
diff --git a/lib/freebl/verified/Hacl_Curve25519.c b/lib/freebl/verified/Hacl_Curve25519.c
new file mode 100644
index 000000000..f2dcddc57
--- /dev/null
+++ b/lib/freebl/verified/Hacl_Curve25519.c
@@ -0,0 +1,845 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Hacl_Curve25519.h"
+
+static void
+Hacl_Bignum_Modulo_carry_top(uint64_t *b)
+{
+ uint64_t b4 = b[4U];
+ uint64_t b0 = b[0U];
+ uint64_t b4_ = b4 & (uint64_t)0x7ffffffffffffU;
+ uint64_t b0_ = b0 + (uint64_t)19U * (b4 >> (uint32_t)51U);
+ b[4U] = b4_;
+ b[0U] = b0_;
+}
+
+inline static void
+Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, FStar_UInt128_t *input)
+{
+ {
+ FStar_UInt128_t xi = input[0U];
+ output[0U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+ {
+ FStar_UInt128_t xi = input[1U];
+ output[1U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+ {
+ FStar_UInt128_t xi = input[2U];
+ output[2U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+ {
+ FStar_UInt128_t xi = input[3U];
+ output[3U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+ {
+ FStar_UInt128_t xi = input[4U];
+ output[4U] = FStar_UInt128_uint128_to_uint64(xi);
+ }
+}
+
+inline static void
+Hacl_Bignum_Fproduct_sum_scalar_multiplication_(
+ FStar_UInt128_t *output,
+ uint64_t *input,
+ uint64_t s)
+{
+ {
+ FStar_UInt128_t xi = output[0U];
+ uint64_t yi = input[0U];
+ output[0U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+ {
+ FStar_UInt128_t xi = output[1U];
+ uint64_t yi = input[1U];
+ output[1U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+ {
+ FStar_UInt128_t xi = output[2U];
+ uint64_t yi = input[2U];
+ output[2U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+ {
+ FStar_UInt128_t xi = output[3U];
+ uint64_t yi = input[3U];
+ output[3U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+ {
+ FStar_UInt128_t xi = output[4U];
+ uint64_t yi = input[4U];
+ output[4U] = FStar_UInt128_add_mod(xi, FStar_UInt128_mul_wide(yi, s));
+ }
+}
+
+inline static void
+Hacl_Bignum_Fproduct_carry_wide_(FStar_UInt128_t *tmp)
+{
+ {
+ uint32_t ctr = (uint32_t)0U;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0x7ffffffffffffU;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51U);
+ tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t)1U;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0x7ffffffffffffU;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51U);
+ tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t)2U;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0x7ffffffffffffU;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51U);
+ tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
+ }
+ {
+ uint32_t ctr = (uint32_t)3U;
+ FStar_UInt128_t tctr = tmp[ctr];
+ FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1U];
+ uint64_t r0 = FStar_UInt128_uint128_to_uint64(tctr) & (uint64_t)0x7ffffffffffffU;
+ FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51U);
+ tmp[ctr] = FStar_UInt128_uint64_to_uint128(r0);
+ tmp[ctr + (uint32_t)1U] = FStar_UInt128_add(tctrp1, c);
+ }
+}
+
+inline static void
+Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
+{
+ uint64_t tmp = output[4U];
+ {
+ uint32_t ctr = (uint32_t)5U - (uint32_t)0U - (uint32_t)1U;
+ uint64_t z = output[ctr - (uint32_t)1U];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t)5U - (uint32_t)1U - (uint32_t)1U;
+ uint64_t z = output[ctr - (uint32_t)1U];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t)5U - (uint32_t)2U - (uint32_t)1U;
+ uint64_t z = output[ctr - (uint32_t)1U];
+ output[ctr] = z;
+ }
+ {
+ uint32_t ctr = (uint32_t)5U - (uint32_t)3U - (uint32_t)1U;
+ uint64_t z = output[ctr - (uint32_t)1U];
+ output[ctr] = z;
+ }
+ output[0U] = tmp;
+ uint64_t b0 = output[0U];
+ output[0U] = (uint64_t)19U * b0;
+}
+
+static void
+Hacl_Bignum_Fmul_mul_shift_reduce_(FStar_UInt128_t *output, uint64_t *input, uint64_t *input21)
+{
+ {
+ uint64_t input2i = input21[0U];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[1U];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[2U];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ {
+ uint64_t input2i = input21[3U];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+ Hacl_Bignum_Fmul_shift_reduce(input);
+ }
+ uint32_t i = (uint32_t)4U;
+ uint64_t input2i = input21[i];
+ Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+inline static void
+Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input21)
+{
+ uint64_t tmp[5U] = { 0U };
+ memcpy(tmp, input, (uint32_t)5U * sizeof input[0U]);
+ KRML_CHECK_SIZE(FStar_UInt128_uint64_to_uint128((uint64_t)0U), (uint32_t)5U);
+ FStar_UInt128_t t[5U];
+ for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
+ t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+ Hacl_Bignum_Fmul_mul_shift_reduce_(t, tmp, input21);
+ Hacl_Bignum_Fproduct_carry_wide_(t);
+ FStar_UInt128_t b4 = t[4U];
+ FStar_UInt128_t b0 = t[0U];
+ FStar_UInt128_t
+ b4_ = FStar_UInt128_logand(b4, FStar_UInt128_uint64_to_uint128((uint64_t)0x7ffffffffffffU));
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t)19U,
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51U))));
+ t[4U] = b4_;
+ t[0U] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
+ uint64_t i0 = output[0U];
+ uint64_t i1 = output[1U];
+ uint64_t i0_ = i0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t)51U);
+ output[0U] = i0_;
+ output[1U] = i1_;
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare__(FStar_UInt128_t *tmp, uint64_t *output)
+{
+ uint64_t r0 = output[0U];
+ uint64_t r1 = output[1U];
+ uint64_t r2 = output[2U];
+ uint64_t r3 = output[3U];
+ uint64_t r4 = output[4U];
+ uint64_t d0 = r0 * (uint64_t)2U;
+ uint64_t d1 = r1 * (uint64_t)2U;
+ uint64_t d2 = r2 * (uint64_t)2U * (uint64_t)19U;
+ uint64_t d419 = r4 * (uint64_t)19U;
+ uint64_t d4 = d419 * (uint64_t)2U;
+ FStar_UInt128_t
+ s0 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(r0, r0),
+ FStar_UInt128_mul_wide(d4, r1)),
+ FStar_UInt128_mul_wide(d2, r3));
+ FStar_UInt128_t
+ s1 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r1),
+ FStar_UInt128_mul_wide(d4, r2)),
+ FStar_UInt128_mul_wide(r3 * (uint64_t)19U, r3));
+ FStar_UInt128_t
+ s2 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r2),
+ FStar_UInt128_mul_wide(r1, r1)),
+ FStar_UInt128_mul_wide(d4, r3));
+ FStar_UInt128_t
+ s3 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r3),
+ FStar_UInt128_mul_wide(d1, r2)),
+ FStar_UInt128_mul_wide(r4, d419));
+ FStar_UInt128_t
+ s4 =
+ FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r4),
+ FStar_UInt128_mul_wide(d1, r3)),
+ FStar_UInt128_mul_wide(r2, r2));
+ tmp[0U] = s0;
+ tmp[1U] = s1;
+ tmp[2U] = s2;
+ tmp[3U] = s3;
+ tmp[4U] = s4;
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare_(FStar_UInt128_t *tmp, uint64_t *output)
+{
+ Hacl_Bignum_Fsquare_fsquare__(tmp, output);
+ Hacl_Bignum_Fproduct_carry_wide_(tmp);
+ FStar_UInt128_t b4 = tmp[4U];
+ FStar_UInt128_t b0 = tmp[0U];
+ FStar_UInt128_t
+ b4_ = FStar_UInt128_logand(b4, FStar_UInt128_uint64_to_uint128((uint64_t)0x7ffffffffffffU));
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t)19U,
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51U))));
+ tmp[4U] = b4_;
+ tmp[0U] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
+ uint64_t i0 = output[0U];
+ uint64_t i1 = output[1U];
+ uint64_t i0_ = i0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t)51U);
+ output[0U] = i0_;
+ output[1U] = i1_;
+}
+
+static void
+Hacl_Bignum_Fsquare_fsquare_times_(uint64_t *input, FStar_UInt128_t *tmp, uint32_t count1)
+{
+ Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+ for (uint32_t i = (uint32_t)1U; i < count1; i = i + (uint32_t)1U)
+ Hacl_Bignum_Fsquare_fsquare_(tmp, input);
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare_times(uint64_t *output, uint64_t *input, uint32_t count1)
+{
+ KRML_CHECK_SIZE(FStar_UInt128_uint64_to_uint128((uint64_t)0U), (uint32_t)5U);
+ FStar_UInt128_t t[5U];
+ for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
+ t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+ memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
+ Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+}
+
+inline static void
+Hacl_Bignum_Fsquare_fsquare_times_inplace(uint64_t *output, uint32_t count1)
+{
+ KRML_CHECK_SIZE(FStar_UInt128_uint64_to_uint128((uint64_t)0U), (uint32_t)5U);
+ FStar_UInt128_t t[5U];
+ for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
+ t[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+ Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
+}
+
+inline static void
+Hacl_Bignum_Crecip_crecip(uint64_t *out, uint64_t *z)
+{
+ uint64_t buf[20U] = { 0U };
+ uint64_t *a = buf;
+ uint64_t *t00 = buf + (uint32_t)5U;
+ uint64_t *b0 = buf + (uint32_t)10U;
+ Hacl_Bignum_Fsquare_fsquare_times(a, z, (uint32_t)1U);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t)2U);
+ Hacl_Bignum_Fmul_fmul(b0, t00, z);
+ Hacl_Bignum_Fmul_fmul(a, b0, a);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t)1U);
+ Hacl_Bignum_Fmul_fmul(b0, t00, b0);
+ Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t)5U);
+ uint64_t *t01 = buf + (uint32_t)5U;
+ uint64_t *b1 = buf + (uint32_t)10U;
+ uint64_t *c0 = buf + (uint32_t)15U;
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)10U);
+ Hacl_Bignum_Fmul_fmul(c0, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t)20U);
+ Hacl_Bignum_Fmul_fmul(t01, t01, c0);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t)10U);
+ Hacl_Bignum_Fmul_fmul(b1, t01, b1);
+ Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)50U);
+ uint64_t *a0 = buf;
+ uint64_t *t0 = buf + (uint32_t)5U;
+ uint64_t *b = buf + (uint32_t)10U;
+ uint64_t *c = buf + (uint32_t)15U;
+ Hacl_Bignum_Fmul_fmul(c, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t)100U);
+ Hacl_Bignum_Fmul_fmul(t0, t0, c);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)50U);
+ Hacl_Bignum_Fmul_fmul(t0, t0, b);
+ Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)5U);
+ Hacl_Bignum_Fmul_fmul(out, t0, a0);
+}
+
+inline static void
+Hacl_Bignum_fsum(uint64_t *a, uint64_t *b)
+{
+ {
+ uint64_t xi = a[0U];
+ uint64_t yi = b[0U];
+ a[0U] = xi + yi;
+ }
+ {
+ uint64_t xi = a[1U];
+ uint64_t yi = b[1U];
+ a[1U] = xi + yi;
+ }
+ {
+ uint64_t xi = a[2U];
+ uint64_t yi = b[2U];
+ a[2U] = xi + yi;
+ }
+ {
+ uint64_t xi = a[3U];
+ uint64_t yi = b[3U];
+ a[3U] = xi + yi;
+ }
+ {
+ uint64_t xi = a[4U];
+ uint64_t yi = b[4U];
+ a[4U] = xi + yi;
+ }
+}
+
+inline static void
+Hacl_Bignum_fdifference(uint64_t *a, uint64_t *b)
+{
+ uint64_t tmp[5U] = { 0U };
+ memcpy(tmp, b, (uint32_t)5U * sizeof b[0U]);
+ uint64_t b0 = tmp[0U];
+ uint64_t b1 = tmp[1U];
+ uint64_t b2 = tmp[2U];
+ uint64_t b3 = tmp[3U];
+ uint64_t b4 = tmp[4U];
+ tmp[0U] = b0 + (uint64_t)0x3fffffffffff68U;
+ tmp[1U] = b1 + (uint64_t)0x3ffffffffffff8U;
+ tmp[2U] = b2 + (uint64_t)0x3ffffffffffff8U;
+ tmp[3U] = b3 + (uint64_t)0x3ffffffffffff8U;
+ tmp[4U] = b4 + (uint64_t)0x3ffffffffffff8U;
+ {
+ uint64_t xi = a[0U];
+ uint64_t yi = tmp[0U];
+ a[0U] = yi - xi;
+ }
+ {
+ uint64_t xi = a[1U];
+ uint64_t yi = tmp[1U];
+ a[1U] = yi - xi;
+ }
+ {
+ uint64_t xi = a[2U];
+ uint64_t yi = tmp[2U];
+ a[2U] = yi - xi;
+ }
+ {
+ uint64_t xi = a[3U];
+ uint64_t yi = tmp[3U];
+ a[3U] = yi - xi;
+ }
+ {
+ uint64_t xi = a[4U];
+ uint64_t yi = tmp[4U];
+ a[4U] = yi - xi;
+ }
+}
+
+inline static void
+Hacl_Bignum_fscalar(uint64_t *output, uint64_t *b, uint64_t s)
+{
+ KRML_CHECK_SIZE(FStar_UInt128_uint64_to_uint128((uint64_t)0U), (uint32_t)5U);
+ FStar_UInt128_t tmp[5U];
+ for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
+ tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+ {
+ uint64_t xi = b[0U];
+ tmp[0U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ {
+ uint64_t xi = b[1U];
+ tmp[1U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ {
+ uint64_t xi = b[2U];
+ tmp[2U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ {
+ uint64_t xi = b[3U];
+ tmp[3U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ {
+ uint64_t xi = b[4U];
+ tmp[4U] = FStar_UInt128_mul_wide(xi, s);
+ }
+ Hacl_Bignum_Fproduct_carry_wide_(tmp);
+ FStar_UInt128_t b4 = tmp[4U];
+ FStar_UInt128_t b0 = tmp[0U];
+ FStar_UInt128_t
+ b4_ = FStar_UInt128_logand(b4, FStar_UInt128_uint64_to_uint128((uint64_t)0x7ffffffffffffU));
+ FStar_UInt128_t
+ b0_ =
+ FStar_UInt128_add(b0,
+ FStar_UInt128_mul_wide((uint64_t)19U,
+ FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51U))));
+ tmp[4U] = b4_;
+ tmp[0U] = b0_;
+ Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
+}
+
+inline static void
+Hacl_Bignum_fmul(uint64_t *output, uint64_t *a, uint64_t *b)
+{
+ Hacl_Bignum_Fmul_fmul(output, a, b);
+}
+
+inline static void
+Hacl_Bignum_crecip(uint64_t *output, uint64_t *input)
+{
+ Hacl_Bignum_Crecip_crecip(output, input);
+}
+
+static void
+Hacl_EC_Point_swap_conditional_step(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
+{
+ uint32_t i = ctr - (uint32_t)1U;
+ uint64_t ai = a[i];
+ uint64_t bi = b[i];
+ uint64_t x = swap1 & (ai ^ bi);
+ uint64_t ai1 = ai ^ x;
+ uint64_t bi1 = bi ^ x;
+ a[i] = ai1;
+ b[i] = bi1;
+}
+
+static void
+Hacl_EC_Point_swap_conditional_(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
+{
+ if (!(ctr == (uint32_t)0U)) {
+ Hacl_EC_Point_swap_conditional_step(a, b, swap1, ctr);
+ uint32_t i = ctr - (uint32_t)1U;
+ Hacl_EC_Point_swap_conditional_(a, b, swap1, i);
+ }
+}
+
+static void
+Hacl_EC_Point_swap_conditional(uint64_t *a, uint64_t *b, uint64_t iswap)
+{
+ uint64_t swap1 = (uint64_t)0U - iswap;
+ Hacl_EC_Point_swap_conditional_(a, b, swap1, (uint32_t)5U);
+ Hacl_EC_Point_swap_conditional_(a + (uint32_t)5U, b + (uint32_t)5U, swap1, (uint32_t)5U);
+}
+
+static void
+Hacl_EC_Point_copy(uint64_t *output, uint64_t *input)
+{
+ memcpy(output, input, (uint32_t)5U * sizeof input[0U]);
+ memcpy(output + (uint32_t)5U,
+ input + (uint32_t)5U,
+ (uint32_t)5U * sizeof(input + (uint32_t)5U)[0U]);
+}
+
+static void
+Hacl_EC_AddAndDouble_fmonty(
+ uint64_t *pp,
+ uint64_t *ppq,
+ uint64_t *p,
+ uint64_t *pq,
+ uint64_t *qmqp)
+{
+ uint64_t *qx = qmqp;
+ uint64_t *x2 = pp;
+ uint64_t *z2 = pp + (uint32_t)5U;
+ uint64_t *x3 = ppq;
+ uint64_t *z3 = ppq + (uint32_t)5U;
+ uint64_t *x = p;
+ uint64_t *z = p + (uint32_t)5U;
+ uint64_t *xprime = pq;
+ uint64_t *zprime = pq + (uint32_t)5U;
+ uint64_t buf[40U] = { 0U };
+ uint64_t *origx = buf;
+ uint64_t *origxprime = buf + (uint32_t)5U;
+ uint64_t *xxprime0 = buf + (uint32_t)25U;
+ uint64_t *zzprime0 = buf + (uint32_t)30U;
+ memcpy(origx, x, (uint32_t)5U * sizeof x[0U]);
+ Hacl_Bignum_fsum(x, z);
+ Hacl_Bignum_fdifference(z, origx);
+ memcpy(origxprime, xprime, (uint32_t)5U * sizeof xprime[0U]);
+ Hacl_Bignum_fsum(xprime, zprime);
+ Hacl_Bignum_fdifference(zprime, origxprime);
+ Hacl_Bignum_fmul(xxprime0, xprime, z);
+ Hacl_Bignum_fmul(zzprime0, x, zprime);
+ uint64_t *origxprime0 = buf + (uint32_t)5U;
+ uint64_t *xx0 = buf + (uint32_t)15U;
+ uint64_t *zz0 = buf + (uint32_t)20U;
+ uint64_t *xxprime = buf + (uint32_t)25U;
+ uint64_t *zzprime = buf + (uint32_t)30U;
+ uint64_t *zzzprime = buf + (uint32_t)35U;
+ memcpy(origxprime0, xxprime, (uint32_t)5U * sizeof xxprime[0U]);
+ Hacl_Bignum_fsum(xxprime, zzprime);
+ Hacl_Bignum_fdifference(zzprime, origxprime0);
+ Hacl_Bignum_Fsquare_fsquare_times(x3, xxprime, (uint32_t)1U);
+ Hacl_Bignum_Fsquare_fsquare_times(zzzprime, zzprime, (uint32_t)1U);
+ Hacl_Bignum_fmul(z3, zzzprime, qx);
+ Hacl_Bignum_Fsquare_fsquare_times(xx0, x, (uint32_t)1U);
+ Hacl_Bignum_Fsquare_fsquare_times(zz0, z, (uint32_t)1U);
+ uint64_t *zzz = buf + (uint32_t)10U;
+ uint64_t *xx = buf + (uint32_t)15U;
+ uint64_t *zz = buf + (uint32_t)20U;
+ Hacl_Bignum_fmul(x2, xx, zz);
+ Hacl_Bignum_fdifference(zz, xx);
+ uint64_t scalar = (uint64_t)121665U;
+ Hacl_Bignum_fscalar(zzz, zz, scalar);
+ Hacl_Bignum_fsum(zzz, xx);
+ Hacl_Bignum_fmul(z2, zzz, zz);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt)
+{
+ uint64_t bit = (uint64_t)(byt >> (uint32_t)7U);
+ Hacl_EC_Point_swap_conditional(nq, nqpq, bit);
+ Hacl_EC_AddAndDouble_fmonty(nq2, nqpq2, nq, nqpq, q);
+ uint64_t bit0 = (uint64_t)(byt >> (uint32_t)7U);
+ Hacl_EC_Point_swap_conditional(nq2, nqpq2, bit0);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt)
+{
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
+ uint8_t byt1 = byt << (uint32_t)1U;
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
+}
+
+static void
+Hacl_EC_Ladder_SmallLoop_cmult_small_loop(
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint8_t byt,
+ uint32_t i)
+{
+ if (!(i == (uint32_t)0U)) {
+ uint32_t i_ = i - (uint32_t)1U;
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(nq, nqpq, nq2, nqpq2, q, byt);
+ uint8_t byt_ = byt << (uint32_t)2U;
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byt_, i_);
+ }
+}
+
+static void
+Hacl_EC_Ladder_BigLoop_cmult_big_loop(
+ uint8_t *n1,
+ uint64_t *nq,
+ uint64_t *nqpq,
+ uint64_t *nq2,
+ uint64_t *nqpq2,
+ uint64_t *q,
+ uint32_t i)
+{
+ if (!(i == (uint32_t)0U)) {
+ uint32_t i1 = i - (uint32_t)1U;
+ uint8_t byte = n1[i1];
+ Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byte, (uint32_t)4U);
+ Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, i1);
+ }
+}
+
+static void
+Hacl_EC_Ladder_cmult(uint64_t *result, uint8_t *n1, uint64_t *q)
+{
+ uint64_t point_buf[40U] = { 0U };
+ uint64_t *nq = point_buf;
+ uint64_t *nqpq = point_buf + (uint32_t)10U;
+ uint64_t *nq2 = point_buf + (uint32_t)20U;
+ uint64_t *nqpq2 = point_buf + (uint32_t)30U;
+ Hacl_EC_Point_copy(nqpq, q);
+ nq[0U] = (uint64_t)1U;
+ Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, (uint32_t)32U);
+ Hacl_EC_Point_copy(result, nq);
+}
+
+static void
+Hacl_EC_Format_fexpand(uint64_t *output, uint8_t *input)
+{
+ uint64_t i0 = load64_le(input);
+ uint8_t *x00 = input + (uint32_t)6U;
+ uint64_t i1 = load64_le(x00);
+ uint8_t *x01 = input + (uint32_t)12U;
+ uint64_t i2 = load64_le(x01);
+ uint8_t *x02 = input + (uint32_t)19U;
+ uint64_t i3 = load64_le(x02);
+ uint8_t *x0 = input + (uint32_t)24U;
+ uint64_t i4 = load64_le(x0);
+ uint64_t output0 = i0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t output1 = i1 >> (uint32_t)3U & (uint64_t)0x7ffffffffffffU;
+ uint64_t output2 = i2 >> (uint32_t)6U & (uint64_t)0x7ffffffffffffU;
+ uint64_t output3 = i3 >> (uint32_t)1U & (uint64_t)0x7ffffffffffffU;
+ uint64_t output4 = i4 >> (uint32_t)12U & (uint64_t)0x7ffffffffffffU;
+ output[0U] = output0;
+ output[1U] = output1;
+ output[2U] = output2;
+ output[3U] = output3;
+ output[4U] = output4;
+}
+
+static void
+Hacl_EC_Format_fcontract_first_carry_pass(uint64_t *input)
+{
+ uint64_t t0 = input[0U];
+ uint64_t t1 = input[1U];
+ uint64_t t2 = input[2U];
+ uint64_t t3 = input[3U];
+ uint64_t t4 = input[4U];
+ uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
+ uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
+ uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
+ uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
+ uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
+ uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
+ uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
+ input[0U] = t0_;
+ input[1U] = t1__;
+ input[2U] = t2__;
+ input[3U] = t3__;
+ input[4U] = t4_;
+}
+
+static void
+Hacl_EC_Format_fcontract_first_carry_full(uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_first_carry_pass(input);
+ Hacl_Bignum_Modulo_carry_top(input);
+}
+
+static void
+Hacl_EC_Format_fcontract_second_carry_pass(uint64_t *input)
+{
+ uint64_t t0 = input[0U];
+ uint64_t t1 = input[1U];
+ uint64_t t2 = input[2U];
+ uint64_t t3 = input[3U];
+ uint64_t t4 = input[4U];
+ uint64_t t1_ = t1 + (t0 >> (uint32_t)51U);
+ uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t t2_ = t2 + (t1_ >> (uint32_t)51U);
+ uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffffU;
+ uint64_t t3_ = t3 + (t2_ >> (uint32_t)51U);
+ uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffffU;
+ uint64_t t4_ = t4 + (t3_ >> (uint32_t)51U);
+ uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffffU;
+ input[0U] = t0_;
+ input[1U] = t1__;
+ input[2U] = t2__;
+ input[3U] = t3__;
+ input[4U] = t4_;
+}
+
+static void
+Hacl_EC_Format_fcontract_second_carry_full(uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_second_carry_pass(input);
+ Hacl_Bignum_Modulo_carry_top(input);
+ uint64_t i0 = input[0U];
+ uint64_t i1 = input[1U];
+ uint64_t i0_ = i0 & (uint64_t)0x7ffffffffffffU;
+ uint64_t i1_ = i1 + (i0 >> (uint32_t)51U);
+ input[0U] = i0_;
+ input[1U] = i1_;
+}
+
+static void
+Hacl_EC_Format_fcontract_trim(uint64_t *input)
+{
+ uint64_t a0 = input[0U];
+ uint64_t a1 = input[1U];
+ uint64_t a2 = input[2U];
+ uint64_t a3 = input[3U];
+ uint64_t a4 = input[4U];
+ uint64_t mask0 = FStar_UInt64_gte_mask(a0, (uint64_t)0x7ffffffffffedU);
+ uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t)0x7ffffffffffffU);
+ uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t)0x7ffffffffffffU);
+ uint64_t mask3 = FStar_UInt64_eq_mask(a3, (uint64_t)0x7ffffffffffffU);
+ uint64_t mask4 = FStar_UInt64_eq_mask(a4, (uint64_t)0x7ffffffffffffU);
+ uint64_t mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
+ uint64_t a0_ = a0 - ((uint64_t)0x7ffffffffffedU & mask);
+ uint64_t a1_ = a1 - ((uint64_t)0x7ffffffffffffU & mask);
+ uint64_t a2_ = a2 - ((uint64_t)0x7ffffffffffffU & mask);
+ uint64_t a3_ = a3 - ((uint64_t)0x7ffffffffffffU & mask);
+ uint64_t a4_ = a4 - ((uint64_t)0x7ffffffffffffU & mask);
+ input[0U] = a0_;
+ input[1U] = a1_;
+ input[2U] = a2_;
+ input[3U] = a3_;
+ input[4U] = a4_;
+}
+
+static void
+Hacl_EC_Format_fcontract_store(uint8_t *output, uint64_t *input)
+{
+ uint64_t t0 = input[0U];
+ uint64_t t1 = input[1U];
+ uint64_t t2 = input[2U];
+ uint64_t t3 = input[3U];
+ uint64_t t4 = input[4U];
+ uint64_t o0 = t1 << (uint32_t)51U | t0;
+ uint64_t o1 = t2 << (uint32_t)38U | t1 >> (uint32_t)13U;
+ uint64_t o2 = t3 << (uint32_t)25U | t2 >> (uint32_t)26U;
+ uint64_t o3 = t4 << (uint32_t)12U | t3 >> (uint32_t)39U;
+ uint8_t *b0 = output;
+ uint8_t *b1 = output + (uint32_t)8U;
+ uint8_t *b2 = output + (uint32_t)16U;
+ uint8_t *b3 = output + (uint32_t)24U;
+ store64_le(b0, o0);
+ store64_le(b1, o1);
+ store64_le(b2, o2);
+ store64_le(b3, o3);
+}
+
+static void
+Hacl_EC_Format_fcontract(uint8_t *output, uint64_t *input)
+{
+ Hacl_EC_Format_fcontract_first_carry_full(input);
+ Hacl_EC_Format_fcontract_second_carry_full(input);
+ Hacl_EC_Format_fcontract_trim(input);
+ Hacl_EC_Format_fcontract_store(output, input);
+}
+
+static void
+Hacl_EC_Format_scalar_of_point(uint8_t *scalar, uint64_t *point)
+{
+ uint64_t *x = point;
+ uint64_t *z = point + (uint32_t)5U;
+ uint64_t buf[10U] = { 0U };
+ uint64_t *zmone = buf;
+ uint64_t *sc = buf + (uint32_t)5U;
+ Hacl_Bignum_crecip(zmone, z);
+ Hacl_Bignum_fmul(sc, x, zmone);
+ Hacl_EC_Format_fcontract(scalar, sc);
+}
+
+void
+Hacl_EC_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
+{
+ uint64_t buf0[10U] = { 0U };
+ uint64_t *x0 = buf0;
+ uint64_t *z = buf0 + (uint32_t)5U;
+ Hacl_EC_Format_fexpand(x0, basepoint);
+ z[0U] = (uint64_t)1U;
+ uint64_t *q = buf0;
+ uint8_t e[32U] = { 0U };
+ memcpy(e, secret, (uint32_t)32U * sizeof secret[0U]);
+ uint8_t e0 = e[0U];
+ uint8_t e31 = e[31U];
+ uint8_t e01 = e0 & (uint8_t)248U;
+ uint8_t e311 = e31 & (uint8_t)127U;
+ uint8_t e312 = e311 | (uint8_t)64U;
+ e[0U] = e01;
+ e[31U] = e312;
+ uint8_t *scalar = e;
+ uint64_t buf[15U] = { 0U };
+ uint64_t *nq = buf;
+ uint64_t *x = nq;
+ x[0U] = (uint64_t)1U;
+ Hacl_EC_Ladder_cmult(nq, scalar, q);
+ Hacl_EC_Format_scalar_of_point(mypublic, nq);
+}
+
+void
+Hacl_Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
+{
+ Hacl_EC_crypto_scalarmult(mypublic, secret, basepoint);
+}
diff --git a/lib/freebl/verified/hacl_curve25519_64.h b/lib/freebl/verified/Hacl_Curve25519.h
index 79fbd44b8..0e443f177 100644
--- a/lib/freebl/verified/hacl_curve25519_64.h
+++ b/lib/freebl/verified/Hacl_Curve25519.h
@@ -1,22 +1,21 @@
-// Copyright 2016-2017 INRIA and Microsoft Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/* This file was auto-generated by KreMLin! */
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
#include "kremlib.h"
-#ifndef __Hacl_Curve25519_64_H
-#define __Hacl_Curve25519_64_H
+#ifndef __Hacl_Curve25519_H
+#define __Hacl_Curve25519_H
typedef uint64_t Hacl_Bignum_Constants_limb;
@@ -52,9 +51,7 @@ typedef uint8_t *Hacl_EC_Format_uint8_p;
void Hacl_EC_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint);
-typedef uint8_t *Curve25519_uint8_p;
+typedef uint8_t *Hacl_Curve25519_uint8_p;
-void *Curve25519_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b);
-
-void Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint);
+void Hacl_Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint);
#endif
diff --git a/lib/freebl/verified/hacl_curve25519_64.c b/lib/freebl/verified/hacl_curve25519_64.c
deleted file mode 100644
index 6e7e29484..000000000
--- a/lib/freebl/verified/hacl_curve25519_64.c
+++ /dev/null
@@ -1,1044 +0,0 @@
-// Copyright 2016-2017 INRIA and Microsoft Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "hacl_curve25519_64.h"
-
-static void
-Hacl_Bignum_Modulo_carry_top(uint64_t *b)
-{
- uint64_t b4 = b[4];
- uint64_t b0 = b[0];
- uint64_t mask = ((uint64_t)1 << (uint32_t)51) - (uint64_t)1;
- uint64_t b4_ = b4 & mask;
- uint64_t b0_ = b0 + (uint64_t)19 * (b4 >> (uint32_t)51);
- b[4] = b4_;
- b[0] = b0_;
-}
-
-inline static void
-Hacl_Bignum_Fproduct_copy_from_wide_(uint64_t *output, FStar_UInt128_t *input)
-{
- {
- FStar_UInt128_t uu____429 = input[0];
- uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
- output[0] = uu____428;
- }
- {
- FStar_UInt128_t uu____429 = input[1];
- uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
- output[1] = uu____428;
- }
- {
- FStar_UInt128_t uu____429 = input[2];
- uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
- output[2] = uu____428;
- }
- {
- FStar_UInt128_t uu____429 = input[3];
- uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
- output[3] = uu____428;
- }
- {
- FStar_UInt128_t uu____429 = input[4];
- uint64_t uu____428 = FStar_Int_Cast_Full_uint128_to_uint64(uu____429);
- output[4] = uu____428;
- }
-}
-
-inline static void
-Hacl_Bignum_Fproduct_shift(uint64_t *output)
-{
- uint64_t tmp = output[4];
- {
- uint32_t ctr = (uint32_t)5 - (uint32_t)0 - (uint32_t)1;
- uint64_t z = output[ctr - (uint32_t)1];
- output[ctr] = z;
- }
- {
- uint32_t ctr = (uint32_t)5 - (uint32_t)1 - (uint32_t)1;
- uint64_t z = output[ctr - (uint32_t)1];
- output[ctr] = z;
- }
- {
- uint32_t ctr = (uint32_t)5 - (uint32_t)2 - (uint32_t)1;
- uint64_t z = output[ctr - (uint32_t)1];
- output[ctr] = z;
- }
- {
- uint32_t ctr = (uint32_t)5 - (uint32_t)3 - (uint32_t)1;
- uint64_t z = output[ctr - (uint32_t)1];
- output[ctr] = z;
- }
- output[0] = tmp;
-}
-
-inline static void
-Hacl_Bignum_Fproduct_sum_scalar_multiplication_(
- FStar_UInt128_t *output,
- uint64_t *input,
- uint64_t s)
-{
- {
- FStar_UInt128_t uu____871 = output[0];
- uint64_t uu____874 = input[0];
- FStar_UInt128_t
- uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
- output[0] = uu____870;
- }
- {
- FStar_UInt128_t uu____871 = output[1];
- uint64_t uu____874 = input[1];
- FStar_UInt128_t
- uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
- output[1] = uu____870;
- }
- {
- FStar_UInt128_t uu____871 = output[2];
- uint64_t uu____874 = input[2];
- FStar_UInt128_t
- uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
- output[2] = uu____870;
- }
- {
- FStar_UInt128_t uu____871 = output[3];
- uint64_t uu____874 = input[3];
- FStar_UInt128_t
- uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
- output[3] = uu____870;
- }
- {
- FStar_UInt128_t uu____871 = output[4];
- uint64_t uu____874 = input[4];
- FStar_UInt128_t
- uu____870 = FStar_UInt128_add_mod(uu____871, FStar_UInt128_mul_wide(uu____874, s));
- output[4] = uu____870;
- }
-}
-
-inline static void
-Hacl_Bignum_Fproduct_carry_wide_(FStar_UInt128_t *tmp)
-{
- {
- uint32_t ctr = (uint32_t)0;
- FStar_UInt128_t tctr = tmp[ctr];
- FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1];
- uint64_t
- r0 =
- FStar_Int_Cast_Full_uint128_to_uint64(tctr) & (((uint64_t)1 << (uint32_t)51) - (uint64_t)1);
- FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51);
- tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
- tmp[ctr + (uint32_t)1] = FStar_UInt128_add(tctrp1, c);
- }
- {
- uint32_t ctr = (uint32_t)1;
- FStar_UInt128_t tctr = tmp[ctr];
- FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1];
- uint64_t
- r0 =
- FStar_Int_Cast_Full_uint128_to_uint64(tctr) & (((uint64_t)1 << (uint32_t)51) - (uint64_t)1);
- FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51);
- tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
- tmp[ctr + (uint32_t)1] = FStar_UInt128_add(tctrp1, c);
- }
- {
- uint32_t ctr = (uint32_t)2;
- FStar_UInt128_t tctr = tmp[ctr];
- FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1];
- uint64_t
- r0 =
- FStar_Int_Cast_Full_uint128_to_uint64(tctr) & (((uint64_t)1 << (uint32_t)51) - (uint64_t)1);
- FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51);
- tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
- tmp[ctr + (uint32_t)1] = FStar_UInt128_add(tctrp1, c);
- }
- {
- uint32_t ctr = (uint32_t)3;
- FStar_UInt128_t tctr = tmp[ctr];
- FStar_UInt128_t tctrp1 = tmp[ctr + (uint32_t)1];
- uint64_t
- r0 =
- FStar_Int_Cast_Full_uint128_to_uint64(tctr) & (((uint64_t)1 << (uint32_t)51) - (uint64_t)1);
- FStar_UInt128_t c = FStar_UInt128_shift_right(tctr, (uint32_t)51);
- tmp[ctr] = FStar_Int_Cast_Full_uint64_to_uint128(r0);
- tmp[ctr + (uint32_t)1] = FStar_UInt128_add(tctrp1, c);
- }
-}
-
-inline static void
-Hacl_Bignum_Fmul_shift_reduce(uint64_t *output)
-{
- Hacl_Bignum_Fproduct_shift(output);
- uint64_t b0 = output[0];
- output[0] = (uint64_t)19 * b0;
-}
-
-static void
-Hacl_Bignum_Fmul_mul_shift_reduce_(FStar_UInt128_t *output, uint64_t *input, uint64_t *input21)
-{
- {
- uint32_t ctr = (uint32_t)5 - (uint32_t)0 - (uint32_t)1;
- uint32_t i1 = ctr;
- uint32_t j = (uint32_t)4 - i1;
- uint64_t input2i = input21[j];
- Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
- if (ctr > (uint32_t)0)
- Hacl_Bignum_Fmul_shift_reduce(input);
- }
- {
- uint32_t ctr = (uint32_t)5 - (uint32_t)1 - (uint32_t)1;
- uint32_t i1 = ctr;
- uint32_t j = (uint32_t)4 - i1;
- uint64_t input2i = input21[j];
- Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
- if (ctr > (uint32_t)0)
- Hacl_Bignum_Fmul_shift_reduce(input);
- }
- {
- uint32_t ctr = (uint32_t)5 - (uint32_t)2 - (uint32_t)1;
- uint32_t i1 = ctr;
- uint32_t j = (uint32_t)4 - i1;
- uint64_t input2i = input21[j];
- Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
- if (ctr > (uint32_t)0)
- Hacl_Bignum_Fmul_shift_reduce(input);
- }
- {
- uint32_t ctr = (uint32_t)5 - (uint32_t)3 - (uint32_t)1;
- uint32_t i1 = ctr;
- uint32_t j = (uint32_t)4 - i1;
- uint64_t input2i = input21[j];
- Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
- if (ctr > (uint32_t)0)
- Hacl_Bignum_Fmul_shift_reduce(input);
- }
- {
- uint32_t ctr = (uint32_t)5 - (uint32_t)4 - (uint32_t)1;
- uint32_t i1 = ctr;
- uint32_t j = (uint32_t)4 - i1;
- uint64_t input2i = input21[j];
- Hacl_Bignum_Fproduct_sum_scalar_multiplication_(output, input, input2i);
- if (ctr > (uint32_t)0)
- Hacl_Bignum_Fmul_shift_reduce(input);
- }
-}
-
-inline static void
-Hacl_Bignum_Fmul_fmul_(uint64_t *output, uint64_t *input, uint64_t *input21)
-{
- KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)0), (uint32_t)5);
- FStar_UInt128_t t[5];
- for (uintmax_t _i = 0; _i < (uint32_t)5; ++_i)
- t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)0);
- Hacl_Bignum_Fmul_mul_shift_reduce_(t, input, input21);
- Hacl_Bignum_Fproduct_carry_wide_(t);
- FStar_UInt128_t b4 = t[4];
- FStar_UInt128_t b0 = t[0];
- FStar_UInt128_t
- mask =
- FStar_UInt128_sub(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)1),
- (uint32_t)51),
- FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)1));
- FStar_UInt128_t b4_ = FStar_UInt128_logand(b4, mask);
- FStar_UInt128_t
- b0_ =
- FStar_UInt128_add(b0,
- FStar_UInt128_mul_wide((uint64_t)19,
- FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51))));
- t[4] = b4_;
- t[0] = b0_;
- Hacl_Bignum_Fproduct_copy_from_wide_(output, t);
- uint64_t i0 = output[0];
- uint64_t i1 = output[1];
- uint64_t i0_ = i0 & (((uint64_t)1 << (uint32_t)51) - (uint64_t)1);
- uint64_t i1_ = i1 + (i0 >> (uint32_t)51);
- output[0] = i0_;
- output[1] = i1_;
-}
-
-inline static void
-Hacl_Bignum_Fmul_fmul(uint64_t *output, uint64_t *input, uint64_t *input21)
-{
- uint64_t tmp[5] = { 0 };
- memcpy(tmp, input, (uint32_t)5 * sizeof input[0]);
- Hacl_Bignum_Fmul_fmul_(output, tmp, input21);
-}
-
-inline static void
-Hacl_Bignum_Fsquare_upd_5(
- FStar_UInt128_t *tmp,
- FStar_UInt128_t s0,
- FStar_UInt128_t s1,
- FStar_UInt128_t s2,
- FStar_UInt128_t s3,
- FStar_UInt128_t s4)
-{
- tmp[0] = s0;
- tmp[1] = s1;
- tmp[2] = s2;
- tmp[3] = s3;
- tmp[4] = s4;
-}
-
-inline static void
-Hacl_Bignum_Fsquare_fsquare__(FStar_UInt128_t *tmp, uint64_t *output)
-{
- uint64_t r0 = output[0];
- uint64_t r1 = output[1];
- uint64_t r2 = output[2];
- uint64_t r3 = output[3];
- uint64_t r4 = output[4];
- uint64_t d0 = r0 * (uint64_t)2;
- uint64_t d1 = r1 * (uint64_t)2;
- uint64_t d2 = r2 * (uint64_t)2 * (uint64_t)19;
- uint64_t d419 = r4 * (uint64_t)19;
- uint64_t d4 = d419 * (uint64_t)2;
- FStar_UInt128_t
- s0 =
- FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(r0, r0),
- FStar_UInt128_mul_wide(d4, r1)),
- FStar_UInt128_mul_wide(d2, r3));
- FStar_UInt128_t
- s1 =
- FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r1),
- FStar_UInt128_mul_wide(d4, r2)),
- FStar_UInt128_mul_wide(r3 * (uint64_t)19, r3));
- FStar_UInt128_t
- s2 =
- FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r2),
- FStar_UInt128_mul_wide(r1, r1)),
- FStar_UInt128_mul_wide(d4, r3));
- FStar_UInt128_t
- s3 =
- FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r3),
- FStar_UInt128_mul_wide(d1, r2)),
- FStar_UInt128_mul_wide(r4, d419));
- FStar_UInt128_t
- s4 =
- FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, r4),
- FStar_UInt128_mul_wide(d1, r3)),
- FStar_UInt128_mul_wide(r2, r2));
- Hacl_Bignum_Fsquare_upd_5(tmp, s0, s1, s2, s3, s4);
-}
-
-inline static void
-Hacl_Bignum_Fsquare_fsquare_(FStar_UInt128_t *tmp, uint64_t *output)
-{
- Hacl_Bignum_Fsquare_fsquare__(tmp, output);
- Hacl_Bignum_Fproduct_carry_wide_(tmp);
- FStar_UInt128_t b4 = tmp[4];
- FStar_UInt128_t b0 = tmp[0];
- FStar_UInt128_t
- mask =
- FStar_UInt128_sub(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)1),
- (uint32_t)51),
- FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)1));
- FStar_UInt128_t b4_ = FStar_UInt128_logand(b4, mask);
- FStar_UInt128_t
- b0_ =
- FStar_UInt128_add(b0,
- FStar_UInt128_mul_wide((uint64_t)19,
- FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51))));
- tmp[4] = b4_;
- tmp[0] = b0_;
- Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
- uint64_t i0 = output[0];
- uint64_t i1 = output[1];
- uint64_t i0_ = i0 & (((uint64_t)1 << (uint32_t)51) - (uint64_t)1);
- uint64_t i1_ = i1 + (i0 >> (uint32_t)51);
- output[0] = i0_;
- output[1] = i1_;
-}
-
-inline static void
-Hacl_Bignum_Fsquare_fsquare_times_(uint64_t *output, FStar_UInt128_t *tmp, uint32_t count1)
-{
- if (count1 == (uint32_t)1)
- Hacl_Bignum_Fsquare_fsquare_(tmp, output);
- else {
- uint32_t i = count1 - (uint32_t)1;
- Hacl_Bignum_Fsquare_fsquare_(tmp, output);
- Hacl_Bignum_Fsquare_fsquare_times_(output, tmp, i);
- }
-}
-
-inline static void
-Hacl_Bignum_Fsquare_fsquare_times(uint64_t *output, uint64_t *input, uint32_t count1)
-{
- KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)0), (uint32_t)5);
- FStar_UInt128_t t[5];
- for (uintmax_t _i = 0; _i < (uint32_t)5; ++_i)
- t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)0);
- memcpy(output, input, (uint32_t)5 * sizeof input[0]);
- Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
-}
-
-inline static void
-Hacl_Bignum_Fsquare_fsquare_times_inplace(uint64_t *output, uint32_t count1)
-{
- KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)0), (uint32_t)5);
- FStar_UInt128_t t[5];
- for (uintmax_t _i = 0; _i < (uint32_t)5; ++_i)
- t[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)0);
- Hacl_Bignum_Fsquare_fsquare_times_(output, t, count1);
-}
-
-inline static void
-Hacl_Bignum_Crecip_crecip(uint64_t *out, uint64_t *z)
-{
- uint64_t buf[20] = { 0 };
- uint64_t *a = buf;
- uint64_t *t00 = buf + (uint32_t)5;
- uint64_t *b0 = buf + (uint32_t)10;
- (void)(buf + (uint32_t)15);
- Hacl_Bignum_Fsquare_fsquare_times(a, z, (uint32_t)1);
- Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t)2);
- Hacl_Bignum_Fmul_fmul(b0, t00, z);
- Hacl_Bignum_Fmul_fmul(a, b0, a);
- Hacl_Bignum_Fsquare_fsquare_times(t00, a, (uint32_t)1);
- Hacl_Bignum_Fmul_fmul(b0, t00, b0);
- Hacl_Bignum_Fsquare_fsquare_times(t00, b0, (uint32_t)5);
- uint64_t *t01 = buf + (uint32_t)5;
- uint64_t *b1 = buf + (uint32_t)10;
- uint64_t *c0 = buf + (uint32_t)15;
- Hacl_Bignum_Fmul_fmul(b1, t01, b1);
- Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)10);
- Hacl_Bignum_Fmul_fmul(c0, t01, b1);
- Hacl_Bignum_Fsquare_fsquare_times(t01, c0, (uint32_t)20);
- Hacl_Bignum_Fmul_fmul(t01, t01, c0);
- Hacl_Bignum_Fsquare_fsquare_times_inplace(t01, (uint32_t)10);
- Hacl_Bignum_Fmul_fmul(b1, t01, b1);
- Hacl_Bignum_Fsquare_fsquare_times(t01, b1, (uint32_t)50);
- uint64_t *a0 = buf;
- uint64_t *t0 = buf + (uint32_t)5;
- uint64_t *b = buf + (uint32_t)10;
- uint64_t *c = buf + (uint32_t)15;
- Hacl_Bignum_Fmul_fmul(c, t0, b);
- Hacl_Bignum_Fsquare_fsquare_times(t0, c, (uint32_t)100);
- Hacl_Bignum_Fmul_fmul(t0, t0, c);
- Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)50);
- Hacl_Bignum_Fmul_fmul(t0, t0, b);
- Hacl_Bignum_Fsquare_fsquare_times_inplace(t0, (uint32_t)5);
- Hacl_Bignum_Fmul_fmul(out, t0, a0);
-}
-
-inline static void
-Hacl_Bignum_fsum(uint64_t *a, uint64_t *b)
-{
- {
- uint64_t uu____871 = a[0];
- uint64_t uu____874 = b[0];
- uint64_t uu____870 = uu____871 + uu____874;
- a[0] = uu____870;
- }
- {
- uint64_t uu____871 = a[1];
- uint64_t uu____874 = b[1];
- uint64_t uu____870 = uu____871 + uu____874;
- a[1] = uu____870;
- }
- {
- uint64_t uu____871 = a[2];
- uint64_t uu____874 = b[2];
- uint64_t uu____870 = uu____871 + uu____874;
- a[2] = uu____870;
- }
- {
- uint64_t uu____871 = a[3];
- uint64_t uu____874 = b[3];
- uint64_t uu____870 = uu____871 + uu____874;
- a[3] = uu____870;
- }
- {
- uint64_t uu____871 = a[4];
- uint64_t uu____874 = b[4];
- uint64_t uu____870 = uu____871 + uu____874;
- a[4] = uu____870;
- }
-}
-
-inline static void
-Hacl_Bignum_fdifference(uint64_t *a, uint64_t *b)
-{
- uint64_t tmp[5] = { 0 };
- memcpy(tmp, b, (uint32_t)5 * sizeof b[0]);
- uint64_t b0 = tmp[0];
- uint64_t b1 = tmp[1];
- uint64_t b2 = tmp[2];
- uint64_t b3 = tmp[3];
- uint64_t b4 = tmp[4];
- tmp[0] = b0 + (uint64_t)0x3fffffffffff68;
- tmp[1] = b1 + (uint64_t)0x3ffffffffffff8;
- tmp[2] = b2 + (uint64_t)0x3ffffffffffff8;
- tmp[3] = b3 + (uint64_t)0x3ffffffffffff8;
- tmp[4] = b4 + (uint64_t)0x3ffffffffffff8;
- {
- uint64_t uu____871 = a[0];
- uint64_t uu____874 = tmp[0];
- uint64_t uu____870 = uu____874 - uu____871;
- a[0] = uu____870;
- }
- {
- uint64_t uu____871 = a[1];
- uint64_t uu____874 = tmp[1];
- uint64_t uu____870 = uu____874 - uu____871;
- a[1] = uu____870;
- }
- {
- uint64_t uu____871 = a[2];
- uint64_t uu____874 = tmp[2];
- uint64_t uu____870 = uu____874 - uu____871;
- a[2] = uu____870;
- }
- {
- uint64_t uu____871 = a[3];
- uint64_t uu____874 = tmp[3];
- uint64_t uu____870 = uu____874 - uu____871;
- a[3] = uu____870;
- }
- {
- uint64_t uu____871 = a[4];
- uint64_t uu____874 = tmp[4];
- uint64_t uu____870 = uu____874 - uu____871;
- a[4] = uu____870;
- }
-}
-
-inline static void
-Hacl_Bignum_fscalar(uint64_t *output, uint64_t *b, uint64_t s)
-{
- KRML_CHECK_SIZE(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)0), (uint32_t)5);
- FStar_UInt128_t tmp[5];
- for (uintmax_t _i = 0; _i < (uint32_t)5; ++_i)
- tmp[_i] = FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)0);
- {
- uint64_t uu____429 = b[0];
- FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
- tmp[0] = uu____428;
- }
- {
- uint64_t uu____429 = b[1];
- FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
- tmp[1] = uu____428;
- }
- {
- uint64_t uu____429 = b[2];
- FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
- tmp[2] = uu____428;
- }
- {
- uint64_t uu____429 = b[3];
- FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
- tmp[3] = uu____428;
- }
- {
- uint64_t uu____429 = b[4];
- FStar_UInt128_t uu____428 = FStar_UInt128_mul_wide(uu____429, s);
- tmp[4] = uu____428;
- }
- Hacl_Bignum_Fproduct_carry_wide_(tmp);
- FStar_UInt128_t b4 = tmp[4];
- FStar_UInt128_t b0 = tmp[0];
- FStar_UInt128_t
- mask =
- FStar_UInt128_sub(FStar_UInt128_shift_left(FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)1),
- (uint32_t)51),
- FStar_Int_Cast_Full_uint64_to_uint128((uint64_t)1));
- FStar_UInt128_t b4_ = FStar_UInt128_logand(b4, mask);
- FStar_UInt128_t
- b0_ =
- FStar_UInt128_add(b0,
- FStar_UInt128_mul_wide((uint64_t)19,
- FStar_Int_Cast_Full_uint128_to_uint64(FStar_UInt128_shift_right(b4, (uint32_t)51))));
- tmp[4] = b4_;
- tmp[0] = b0_;
- Hacl_Bignum_Fproduct_copy_from_wide_(output, tmp);
-}
-
-inline static void
-Hacl_Bignum_fmul(uint64_t *output, uint64_t *a, uint64_t *b)
-{
- Hacl_Bignum_Fmul_fmul(output, a, b);
-}
-
-inline static void
-Hacl_Bignum_crecip(uint64_t *output, uint64_t *input)
-{
- Hacl_Bignum_Crecip_crecip(output, input);
-}
-
-static void
-Hacl_EC_Point_swap_conditional_step(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
-{
- uint32_t i = ctr - (uint32_t)1;
- uint64_t ai = a[i];
- uint64_t bi = b[i];
- uint64_t x = swap1 & (ai ^ bi);
- uint64_t ai1 = ai ^ x;
- uint64_t bi1 = bi ^ x;
- a[i] = ai1;
- b[i] = bi1;
-}
-
-static void
-Hacl_EC_Point_swap_conditional_(uint64_t *a, uint64_t *b, uint64_t swap1, uint32_t ctr)
-{
- if (ctr == (uint32_t)0) {
-
- } else {
- Hacl_EC_Point_swap_conditional_step(a, b, swap1, ctr);
- uint32_t i = ctr - (uint32_t)1;
- Hacl_EC_Point_swap_conditional_(a, b, swap1, i);
- }
-}
-
-static void
-Hacl_EC_Point_swap_conditional(uint64_t *a, uint64_t *b, uint64_t iswap)
-{
- uint64_t swap1 = (uint64_t)0 - iswap;
- Hacl_EC_Point_swap_conditional_(a, b, swap1, (uint32_t)5);
- Hacl_EC_Point_swap_conditional_(a + (uint32_t)5, b + (uint32_t)5, swap1, (uint32_t)5);
-}
-
-static void
-Hacl_EC_Point_copy(uint64_t *output, uint64_t *input)
-{
- memcpy(output, input, (uint32_t)5 * sizeof input[0]);
- memcpy(output + (uint32_t)5,
- input + (uint32_t)5,
- (uint32_t)5 * sizeof(input + (uint32_t)5)[0]);
-}
-
-static void
-Hacl_EC_AddAndDouble_fmonty(
- uint64_t *pp,
- uint64_t *ppq,
- uint64_t *p,
- uint64_t *pq,
- uint64_t *qmqp)
-{
- uint64_t *qx = qmqp;
- uint64_t *x2 = pp;
- uint64_t *z2 = pp + (uint32_t)5;
- uint64_t *x3 = ppq;
- uint64_t *z3 = ppq + (uint32_t)5;
- uint64_t *x = p;
- uint64_t *z = p + (uint32_t)5;
- uint64_t *xprime = pq;
- uint64_t *zprime = pq + (uint32_t)5;
- uint64_t buf[40] = { 0 };
- (void)(buf + (uint32_t)5);
- (void)(buf + (uint32_t)10);
- (void)(buf + (uint32_t)15);
- (void)(buf + (uint32_t)20);
- (void)(buf + (uint32_t)25);
- (void)(buf + (uint32_t)30);
- (void)(buf + (uint32_t)35);
- uint64_t *origx = buf;
- uint64_t *origxprime = buf + (uint32_t)5;
- (void)(buf + (uint32_t)10);
- (void)(buf + (uint32_t)15);
- (void)(buf + (uint32_t)20);
- uint64_t *xxprime0 = buf + (uint32_t)25;
- uint64_t *zzprime0 = buf + (uint32_t)30;
- (void)(buf + (uint32_t)35);
- memcpy(origx, x, (uint32_t)5 * sizeof x[0]);
- Hacl_Bignum_fsum(x, z);
- Hacl_Bignum_fdifference(z, origx);
- memcpy(origxprime, xprime, (uint32_t)5 * sizeof xprime[0]);
- Hacl_Bignum_fsum(xprime, zprime);
- Hacl_Bignum_fdifference(zprime, origxprime);
- Hacl_Bignum_fmul(xxprime0, xprime, z);
- Hacl_Bignum_fmul(zzprime0, x, zprime);
- uint64_t *origxprime0 = buf + (uint32_t)5;
- (void)(buf + (uint32_t)10);
- uint64_t *xx0 = buf + (uint32_t)15;
- uint64_t *zz0 = buf + (uint32_t)20;
- uint64_t *xxprime = buf + (uint32_t)25;
- uint64_t *zzprime = buf + (uint32_t)30;
- uint64_t *zzzprime = buf + (uint32_t)35;
- memcpy(origxprime0, xxprime, (uint32_t)5 * sizeof xxprime[0]);
- Hacl_Bignum_fsum(xxprime, zzprime);
- Hacl_Bignum_fdifference(zzprime, origxprime0);
- Hacl_Bignum_Fsquare_fsquare_times(x3, xxprime, (uint32_t)1);
- Hacl_Bignum_Fsquare_fsquare_times(zzzprime, zzprime, (uint32_t)1);
- Hacl_Bignum_fmul(z3, zzzprime, qx);
- Hacl_Bignum_Fsquare_fsquare_times(xx0, x, (uint32_t)1);
- Hacl_Bignum_Fsquare_fsquare_times(zz0, z, (uint32_t)1);
- (void)(buf + (uint32_t)5);
- uint64_t *zzz = buf + (uint32_t)10;
- uint64_t *xx = buf + (uint32_t)15;
- uint64_t *zz = buf + (uint32_t)20;
- (void)(buf + (uint32_t)25);
- (void)(buf + (uint32_t)30);
- (void)(buf + (uint32_t)35);
- Hacl_Bignum_fmul(x2, xx, zz);
- Hacl_Bignum_fdifference(zz, xx);
- uint64_t scalar = (uint64_t)121665;
- Hacl_Bignum_fscalar(zzz, zz, scalar);
- Hacl_Bignum_fsum(zzz, xx);
- Hacl_Bignum_fmul(z2, zzz, zz);
-}
-
-static void
-Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_1(
- uint64_t *nq,
- uint64_t *nqpq,
- uint64_t *nq2,
- uint64_t *nqpq2,
- uint64_t *q,
- uint8_t byt)
-{
- uint64_t bit = (uint64_t)(byt >> (uint32_t)7);
- Hacl_EC_Point_swap_conditional(nq, nqpq, bit);
-}
-
-static void
-Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_2(
- uint64_t *nq,
- uint64_t *nqpq,
- uint64_t *nq2,
- uint64_t *nqpq2,
- uint64_t *q,
- uint8_t byt)
-{
- Hacl_EC_AddAndDouble_fmonty(nq2, nqpq2, nq, nqpq, q);
-}
-
-static void
-Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(
- uint64_t *nq,
- uint64_t *nqpq,
- uint64_t *nq2,
- uint64_t *nqpq2,
- uint64_t *q,
- uint8_t byt)
-{
- Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_1(nq, nqpq, nq2, nqpq2, q, byt);
- Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_2(nq, nqpq, nq2, nqpq2, q, byt);
- Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step_1(nq2, nqpq2, nq, nqpq, q, byt);
-}
-
-static void
-Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(
- uint64_t *nq,
- uint64_t *nqpq,
- uint64_t *nq2,
- uint64_t *nqpq2,
- uint64_t *q,
- uint8_t byt)
-{
- Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
- uint8_t byt1 = byt << (uint32_t)1;
- Hacl_EC_Ladder_SmallLoop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
-}
-
-static void
-Hacl_EC_Ladder_SmallLoop_cmult_small_loop(
- uint64_t *nq,
- uint64_t *nqpq,
- uint64_t *nq2,
- uint64_t *nqpq2,
- uint64_t *q,
- uint8_t byt,
- uint32_t i)
-{
- if (i == (uint32_t)0) {
-
- } else {
- uint32_t i_ = i - (uint32_t)1;
- Hacl_EC_Ladder_SmallLoop_cmult_small_loop_double_step(nq, nqpq, nq2, nqpq2, q, byt);
- uint8_t byt_ = byt << (uint32_t)2;
- Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byt_, i_);
- }
-}
-
-static void
-Hacl_EC_Ladder_BigLoop_cmult_big_loop(
- uint8_t *n1,
- uint64_t *nq,
- uint64_t *nqpq,
- uint64_t *nq2,
- uint64_t *nqpq2,
- uint64_t *q,
- uint32_t i)
-{
- if (i == (uint32_t)0) {
-
- } else {
- uint32_t i1 = i - (uint32_t)1;
- uint8_t byte = n1[i1];
- Hacl_EC_Ladder_SmallLoop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, byte, (uint32_t)4);
- Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, i1);
- }
-}
-
-static void
-Hacl_EC_Ladder_cmult_(uint64_t *result, uint64_t *point_buf, uint8_t *n1, uint64_t *q)
-{
- uint64_t *nq = point_buf;
- uint64_t *nqpq = point_buf + (uint32_t)10;
- uint64_t *nq2 = point_buf + (uint32_t)20;
- uint64_t *nqpq2 = point_buf + (uint32_t)30;
- Hacl_EC_Point_copy(nqpq, q);
- nq[0] = (uint64_t)1;
- Hacl_EC_Ladder_BigLoop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, (uint32_t)32);
- Hacl_EC_Point_copy(result, nq);
-}
-
-static void
-Hacl_EC_Ladder_cmult(uint64_t *result, uint8_t *n1, uint64_t *q)
-{
- uint64_t point_buf[40] = { 0 };
- Hacl_EC_Ladder_cmult_(result, point_buf, n1, q);
-}
-
-static void
-Hacl_EC_Format_upd_5(
- uint64_t *output,
- uint64_t output0,
- uint64_t output1,
- uint64_t output2,
- uint64_t output3,
- uint64_t output4)
-{
- output[0] = output0;
- output[1] = output1;
- output[2] = output2;
- output[3] = output3;
- output[4] = output4;
-}
-
-static void
-Hacl_EC_Format_upd_5_(
- uint64_t *output,
- uint64_t output0,
- uint64_t output1,
- uint64_t output2,
- uint64_t output3,
- uint64_t output4)
-{
- output[0] = output0;
- output[1] = output1;
- output[2] = output2;
- output[3] = output3;
- output[4] = output4;
-}
-
-static void
-Hacl_EC_Format_fexpand(uint64_t *output, uint8_t *input)
-{
- uint64_t mask_511 = (uint64_t)0x7ffffffffffff;
- uint64_t i0 = load64_le(input);
- uint8_t *x00 = input + (uint32_t)6;
- uint64_t i1 = load64_le(x00);
- uint8_t *x01 = input + (uint32_t)12;
- uint64_t i2 = load64_le(x01);
- uint8_t *x02 = input + (uint32_t)19;
- uint64_t i3 = load64_le(x02);
- uint8_t *x0 = input + (uint32_t)24;
- uint64_t i4 = load64_le(x0);
- uint64_t output0 = i0 & mask_511;
- uint64_t output1 = i1 >> (uint32_t)3 & mask_511;
- uint64_t output2 = i2 >> (uint32_t)6 & mask_511;
- uint64_t output3 = i3 >> (uint32_t)1 & mask_511;
- uint64_t output4 = i4 >> (uint32_t)12 & mask_511;
- Hacl_EC_Format_upd_5(output, output0, output1, output2, output3, output4);
-}
-
-static void
-Hacl_EC_Format_store_4(uint8_t *output, uint64_t v0, uint64_t v1, uint64_t v2, uint64_t v3)
-{
- uint8_t *b0 = output;
- uint8_t *b1 = output + (uint32_t)8;
- uint8_t *b2 = output + (uint32_t)16;
- uint8_t *b3 = output + (uint32_t)24;
- store64_le(b0, v0);
- store64_le(b1, v1);
- store64_le(b2, v2);
- store64_le(b3, v3);
-}
-
-static void
-Hacl_EC_Format_fcontract_first_carry_pass(uint64_t *input)
-{
- uint64_t t0 = input[0];
- uint64_t t1 = input[1];
- uint64_t t2 = input[2];
- uint64_t t3 = input[3];
- uint64_t t4 = input[4];
- uint64_t t1_ = t1 + (t0 >> (uint32_t)51);
- uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffff;
- uint64_t t2_ = t2 + (t1_ >> (uint32_t)51);
- uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffff;
- uint64_t t3_ = t3 + (t2_ >> (uint32_t)51);
- uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffff;
- uint64_t t4_ = t4 + (t3_ >> (uint32_t)51);
- uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffff;
- Hacl_EC_Format_upd_5_(input, t0_, t1__, t2__, t3__, t4_);
-}
-
-static void
-Hacl_EC_Format_fcontract_first_carry_full(uint64_t *input)
-{
- Hacl_EC_Format_fcontract_first_carry_pass(input);
- Hacl_Bignum_Modulo_carry_top(input);
-}
-
-static void
-Hacl_EC_Format_fcontract_second_carry_pass(uint64_t *input)
-{
- uint64_t t0 = input[0];
- uint64_t t1 = input[1];
- uint64_t t2 = input[2];
- uint64_t t3 = input[3];
- uint64_t t4 = input[4];
- uint64_t t1_ = t1 + (t0 >> (uint32_t)51);
- uint64_t t0_ = t0 & (uint64_t)0x7ffffffffffff;
- uint64_t t2_ = t2 + (t1_ >> (uint32_t)51);
- uint64_t t1__ = t1_ & (uint64_t)0x7ffffffffffff;
- uint64_t t3_ = t3 + (t2_ >> (uint32_t)51);
- uint64_t t2__ = t2_ & (uint64_t)0x7ffffffffffff;
- uint64_t t4_ = t4 + (t3_ >> (uint32_t)51);
- uint64_t t3__ = t3_ & (uint64_t)0x7ffffffffffff;
- Hacl_EC_Format_upd_5_(input, t0_, t1__, t2__, t3__, t4_);
-}
-
-static void
-Hacl_EC_Format_fcontract_second_carry_full(uint64_t *input)
-{
- Hacl_EC_Format_fcontract_second_carry_pass(input);
- Hacl_Bignum_Modulo_carry_top(input);
- uint64_t i0 = input[0];
- uint64_t i1 = input[1];
- uint64_t i0_ = i0 & (((uint64_t)1 << (uint32_t)51) - (uint64_t)1);
- uint64_t i1_ = i1 + (i0 >> (uint32_t)51);
- input[0] = i0_;
- input[1] = i1_;
-}
-
-static void
-Hacl_EC_Format_fcontract_trim(uint64_t *input)
-{
- uint64_t a0 = input[0];
- uint64_t a1 = input[1];
- uint64_t a2 = input[2];
- uint64_t a3 = input[3];
- uint64_t a4 = input[4];
- uint64_t mask0 = FStar_UInt64_gte_mask(a0, (uint64_t)0x7ffffffffffed);
- uint64_t mask1 = FStar_UInt64_eq_mask(a1, (uint64_t)0x7ffffffffffff);
- uint64_t mask2 = FStar_UInt64_eq_mask(a2, (uint64_t)0x7ffffffffffff);
- uint64_t mask3 = FStar_UInt64_eq_mask(a3, (uint64_t)0x7ffffffffffff);
- uint64_t mask4 = FStar_UInt64_eq_mask(a4, (uint64_t)0x7ffffffffffff);
- uint64_t mask = mask0 & mask1 & mask2 & mask3 & mask4;
- uint64_t a0_ = a0 - ((uint64_t)0x7ffffffffffed & mask);
- uint64_t a1_ = a1 - ((uint64_t)0x7ffffffffffff & mask);
- uint64_t a2_ = a2 - ((uint64_t)0x7ffffffffffff & mask);
- uint64_t a3_ = a3 - ((uint64_t)0x7ffffffffffff & mask);
- uint64_t a4_ = a4 - ((uint64_t)0x7ffffffffffff & mask);
- Hacl_EC_Format_upd_5_(input, a0_, a1_, a2_, a3_, a4_);
-}
-
-static void
-Hacl_EC_Format_fcontract_store(uint8_t *output, uint64_t *input)
-{
- uint64_t t0 = input[0];
- uint64_t t1 = input[1];
- uint64_t t2 = input[2];
- uint64_t t3 = input[3];
- uint64_t t4 = input[4];
- uint64_t o0 = t1 << (uint32_t)51 | t0;
- uint64_t o1 = t2 << (uint32_t)38 | t1 >> (uint32_t)13;
- uint64_t o2 = t3 << (uint32_t)25 | t2 >> (uint32_t)26;
- uint64_t o3 = t4 << (uint32_t)12 | t3 >> (uint32_t)39;
- Hacl_EC_Format_store_4(output, o0, o1, o2, o3);
-}
-
-static void
-Hacl_EC_Format_fcontract(uint8_t *output, uint64_t *input)
-{
- Hacl_EC_Format_fcontract_first_carry_full(input);
- Hacl_EC_Format_fcontract_second_carry_full(input);
- Hacl_EC_Format_fcontract_trim(input);
- Hacl_EC_Format_fcontract_store(output, input);
-}
-
-static void
-Hacl_EC_Format_scalar_of_point(uint8_t *scalar, uint64_t *point)
-{
- uint64_t *x = point;
- uint64_t *z = point + (uint32_t)5;
- uint64_t buf[10] = { 0 };
- uint64_t *zmone = buf;
- uint64_t *sc = buf + (uint32_t)5;
- Hacl_Bignum_crecip(zmone, z);
- Hacl_Bignum_fmul(sc, x, zmone);
- Hacl_EC_Format_fcontract(scalar, sc);
-}
-
-static void
-Hacl_EC_crypto_scalarmult__(
- uint8_t *mypublic,
- uint8_t *scalar,
- uint8_t *basepoint,
- uint64_t *q)
-{
- uint64_t buf[15] = { 0 };
- uint64_t *nq = buf;
- uint64_t *x = nq;
- (void)(nq + (uint32_t)5);
- (void)(buf + (uint32_t)5);
- x[0] = (uint64_t)1;
- Hacl_EC_Ladder_cmult(nq, scalar, q);
- Hacl_EC_Format_scalar_of_point(mypublic, nq);
-}
-
-static void
-Hacl_EC_crypto_scalarmult_(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint, uint64_t *q)
-{
- uint8_t e[32] = { 0 };
- memcpy(e, secret, (uint32_t)32 * sizeof secret[0]);
- uint8_t e0 = e[0];
- uint8_t e31 = e[31];
- uint8_t e01 = e0 & (uint8_t)248;
- uint8_t e311 = e31 & (uint8_t)127;
- uint8_t e312 = e311 | (uint8_t)64;
- e[0] = e01;
- e[31] = e312;
- uint8_t *scalar = e;
- Hacl_EC_crypto_scalarmult__(mypublic, scalar, basepoint, q);
-}
-
-void
-Hacl_EC_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
-{
- uint64_t buf[10] = { 0 };
- uint64_t *x = buf;
- uint64_t *z = buf + (uint32_t)5;
- Hacl_EC_Format_fexpand(x, basepoint);
- z[0] = (uint64_t)1;
- uint64_t *q = buf;
- Hacl_EC_crypto_scalarmult_(mypublic, secret, basepoint, q);
-}
-
-void *
-Curve25519_op_String_Access(FStar_Monotonic_HyperStack_mem h, uint8_t *b)
-{
- return (void *)(uint8_t)0;
-}
-
-void
-Curve25519_crypto_scalarmult(uint8_t *mypublic, uint8_t *secret, uint8_t *basepoint)
-{
- Hacl_EC_crypto_scalarmult(mypublic, secret, basepoint);
-}
diff --git a/lib/freebl/verified/kremlib.h b/lib/freebl/verified/kremlib.h
index 5f1f1dc8e..c5ba5de2f 100644
--- a/lib/freebl/verified/kremlib.h
+++ b/lib/freebl/verified/kremlib.h
@@ -1,88 +1,191 @@
-// Copyright 2016-2017 Microsoft Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
#ifndef __KREMLIB_H
#define __KREMLIB_H
-#include <inttypes.h>
-#include <stdlib.h>
-
-#include <stdbool.h>
-#include <stdio.h>
-#include <string.h>
-#include <time.h>
-
-// Define __cdecl and friends when using GCC, so that we can safely compile code
-// that contains __cdecl on all platforms.
-#ifndef _MSC_VER
-// Use the gcc predefined macros if on a platform/architectures that set them.
-// Otherwise define them to be empty.
-#ifndef __cdecl
-#define __cdecl
-#endif
-#ifndef __stdcall
-#define __stdcall
-#endif
-#ifndef __fastcall
-#define __fastcall
-#endif
+#include "kremlib_base.h"
+
+/* For tests only: we might need this function to be forward-declared, because
+ * the dependency on WasmSupport appears very late, after SimplifyWasm, and
+ * sadly, after the topological order has been done. */
+void WasmSupport_check_buffer_size(uint32_t s);
+
+/******************************************************************************/
+/* Stubs to ease compilation of non-Low* code */
+/******************************************************************************/
+
+/* Some types that KreMLin has no special knowledge of; many of them appear in
+ * signatures of ghost functions, meaning that it suffices to give them (any)
+ * definition. */
+typedef void *FStar_Seq_Base_seq, *Prims_prop, *FStar_HyperStack_mem,
+ *FStar_Set_set, *Prims_st_pre_h, *FStar_Heap_heap, *Prims_all_pre_h,
+ *FStar_TSet_set, *Prims_list, *FStar_Map_t, *FStar_UInt63_t_,
+ *FStar_Int63_t_, *FStar_UInt63_t, *FStar_Int63_t, *FStar_UInt_uint_t,
+ *FStar_Int_int_t, *FStar_HyperStack_stackref, *FStar_Bytes_bytes,
+ *FStar_HyperHeap_rid, *FStar_Heap_aref, *FStar_Monotonic_Heap_heap,
+ *FStar_Monotonic_Heap_aref, *FStar_Monotonic_HyperHeap_rid,
+ *FStar_Monotonic_HyperStack_mem, *FStar_Char_char_;
+
+typedef const char *Prims_string;
+
+/* For "bare" targets that do not have a C stdlib, the user might want to use
+ * [-add-include '"mydefinitions.h"'] and override these. */
+#ifndef KRML_HOST_PRINTF
+#define KRML_HOST_PRINTF printf
#endif
-// GCC-specific attribute syntax; everyone else gets the standard C inline
-// attribute.
-#ifdef __GNU_C__
-#ifndef __clang__
-#define force_inline inline __attribute__((always_inline))
-#else
-#define force_inline inline
+#ifndef KRML_HOST_EXIT
+#define KRML_HOST_EXIT exit
#endif
-#else
-#define force_inline inline
+
+#ifndef KRML_HOST_MALLOC
+#define KRML_HOST_MALLOC malloc
#endif
-// Uppercase issue; we have to define lowercase version of the C macros (as we
-// have no way to refer to an uppercase *variable* in F*).
-extern int exit_success;
-extern int exit_failure;
-
-// Some types that KreMLin has no special knowledge of; many of them appear in
-// signatures of ghost functions, meaning that it suffices to give them (any)
-// definition.
-typedef void *Prims_pos, *Prims_nat, *Prims_nonzero, *FStar_Seq_Base_seq,
- *Prims_int, *Prims_prop, *FStar_HyperStack_mem, *FStar_Set_set,
- *Prims_st_pre_h, *FStar_Heap_heap, *Prims_all_pre_h, *FStar_TSet_set,
- *Prims_string, *Prims_list, *FStar_Map_t, *FStar_UInt63_t_, *FStar_Int63_t_,
- *FStar_UInt63_t, *FStar_Int63_t, *FStar_UInt_uint_t, *FStar_Int_int_t,
- *FStar_HyperStack_stackref, *FStar_Bytes_bytes, *FStar_HyperHeap_rid,
- *FStar_Heap_aref, *FStar_Monotonic_Heap_heap,
- *FStar_Monotonic_HyperHeap_rid, *FStar_Monotonic_HyperStack_mem;
-
-#define KRML_CHECK_SIZE(elt, size) \
- if (((size_t)size) > SIZE_MAX / sizeof(elt)) { \
- printf("Maximum allocatable size exceeded, aborting before overflow at " \
- "%s:%d\n", \
- __FILE__, __LINE__); \
- exit(253); \
+/* In statement position, exiting is easy. */
+#define KRML_EXIT \
+ do { \
+ KRML_HOST_PRINTF("Unimplemented function at %s:%d\n", __FILE__, __LINE__); \
+ KRML_HOST_EXIT(254); \
+ } while (0)
+
+/* In expression position, use the comma-operator and a malloc to return an
+ * expression of the right size. KreMLin passes t as the parameter to the macro.
+ */
+#define KRML_EABORT(t, msg) \
+ (KRML_HOST_PRINTF("KreMLin abort at %s:%d\n%s\n", __FILE__, __LINE__, msg), \
+ KRML_HOST_EXIT(255), *((t *)KRML_HOST_MALLOC(sizeof(t))))
+
+/* In FStar.Buffer.fst, the size of arrays is uint32_t, but it's a number of
+ * *elements*. Do an ugly, run-time check (some of which KreMLin can eliminate).
+ */
+#define KRML_CHECK_SIZE(elt, size) \
+ if (((size_t)size) > SIZE_MAX / sizeof(elt)) { \
+ KRML_HOST_PRINTF( \
+ "Maximum allocatable size exceeded, aborting before overflow at " \
+ "%s:%d\n", \
+ __FILE__, __LINE__); \
+ KRML_HOST_EXIT(253); \
}
-// Endian-ness
+/* A series of GCC atrocities to trace function calls (kremlin's [-d c-calls]
+ * option). Useful when trying to debug, say, Wasm, to compare traces. */
+/* clang-format off */
+#ifdef __GNUC__
+#define KRML_FORMAT(X) _Generic((X), \
+ uint8_t : "0x%08" PRIx8, \
+ uint16_t: "0x%08" PRIx16, \
+ uint32_t: "0x%08" PRIx32, \
+ uint64_t: "0x%08" PRIx64, \
+ int8_t : "0x%08" PRIx8, \
+ int16_t : "0x%08" PRIx16, \
+ int32_t : "0x%08" PRIx32, \
+ int64_t : "0x%08" PRIx64, \
+ default : "%s")
+
+#define KRML_FORMAT_ARG(X) _Generic((X), \
+ uint8_t : X, \
+ uint16_t: X, \
+ uint32_t: X, \
+ uint64_t: X, \
+ int8_t : X, \
+ int16_t : X, \
+ int32_t : X, \
+ int64_t : X, \
+ default : "unknown")
+/* clang-format on */
+
+#define KRML_DEBUG_RETURN(X) \
+ ({ \
+ __auto_type _ret = (X); \
+ KRML_HOST_PRINTF("returning: "); \
+ KRML_HOST_PRINTF(KRML_FORMAT(_ret), KRML_FORMAT_ARG(_ret)); \
+ KRML_HOST_PRINTF(" \n"); \
+ _ret; \
+ })
+#endif
+
+#define FStar_Buffer_eqb(b1, b2, n) \
+ (memcmp((b1), (b2), (n) * sizeof((b1)[0])) == 0)
+
+/* Stubs to make ST happy. Important note: you must generate a use of the macro
+ * argument, otherwise, you may have FStar_ST_recall(f) as the only use of f;
+ * KreMLin will think that this is a valid use, but then the C compiler, after
+ * macro expansion, will error out. */
+#define FStar_HyperHeap_root 0
+#define FStar_Pervasives_Native_fst(x) (x).fst
+#define FStar_Pervasives_Native_snd(x) (x).snd
+#define FStar_Seq_Base_createEmpty(x) 0
+#define FStar_Seq_Base_create(len, init) 0
+#define FStar_Seq_Base_upd(s, i, e) 0
+#define FStar_Seq_Base_eq(l1, l2) 0
+#define FStar_Seq_Base_length(l1) 0
+#define FStar_Seq_Base_append(x, y) 0
+#define FStar_Seq_Base_slice(x, y, z) 0
+#define FStar_Seq_Properties_snoc(x, y) 0
+#define FStar_Seq_Properties_cons(x, y) 0
+#define FStar_Seq_Base_index(x, y) 0
+#define FStar_HyperStack_is_eternal_color(x) 0
+#define FStar_Monotonic_HyperHeap_root 0
+#define FStar_Buffer_to_seq_full(x) 0
+#define FStar_Buffer_recall(x)
+#define FStar_HyperStack_ST_op_Colon_Equals(x, v) KRML_EXIT
+#define FStar_HyperStack_ST_op_Bang(x) 0
+#define FStar_HyperStack_ST_salloc(x) 0
+#define FStar_HyperStack_ST_ralloc(x, y) 0
+#define FStar_HyperStack_ST_new_region(x) (0)
+#define FStar_Monotonic_RRef_m_alloc(x) \
+ { \
+ 0 \
+ }
-// ... for Linux
+#define FStar_HyperStack_ST_recall(x) \
+ do { \
+ (void)(x); \
+ } while (0)
+
+#define FStar_HyperStack_ST_recall_region(x) \
+ do { \
+ (void)(x); \
+ } while (0)
+
+#define FStar_Monotonic_RRef_m_recall(x1, x2) \
+ do { \
+ (void)(x1); \
+ (void)(x2); \
+ } while (0)
+
+#define FStar_Monotonic_RRef_m_write(x1, x2, x3, x4, x5) \
+ do { \
+ (void)(x1); \
+ (void)(x2); \
+ (void)(x3); \
+ (void)(x4); \
+ (void)(x5); \
+ } while (0)
+
+/******************************************************************************/
+/* Endian-ness macros that can only be implemented in C */
+/******************************************************************************/
+
+/* ... for Linux */
#if defined(__linux__) || defined(__CYGWIN__)
#include <endian.h>
-// ... for OSX
+/* ... for OSX */
#elif defined(__APPLE__)
#include <libkern/OSByteOrder.h>
#define htole64(x) OSSwapHostToLittleInt64(x)
@@ -100,14 +203,33 @@ typedef void *Prims_pos, *Prims_nat, *Prims_nonzero, *FStar_Seq_Base_seq,
#define htobe32(x) OSSwapHostToBigInt32(x)
#define be32toh(x) OSSwapBigToHostInt32(x)
-// ... for Windows
-#elif (defined(_WIN16) || defined(_WIN32) || defined(_WIN64)) && \
- !defined(__WINDOWS__)
-#include <windows.h>
+/* ... for Solaris */
+#elif defined(__sun__)
+#include <sys/byteorder.h>
+#define htole64(x) LE_64(x)
+#define le64toh(x) LE_IN64(x)
+#define htobe64(x) BE_64(x)
+#define be64toh(x) BE_IN64(x)
+
+#define htole16(x) LE_16(x)
+#define le16toh(x) LE_IN16(x)
+#define htobe16(x) BE_16(x)
+#define be16toh(x) BE_IN16(x)
+
+#define htole32(x) LE_32(x)
+#define le32toh(x) LE_IN32(x)
+#define htobe32(x) BE_32(x)
+#define be32toh(x) BE_IN32(x)
+
+/* ... for the BSDs */
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
+#include <sys/endian.h>
+#elif defined(__OpenBSD__)
+#include <endian.h>
-#if BYTE_ORDER == LITTLE_ENDIAN
+/* ... for Windows (MSVC)... not targeting XBOX 360! */
+#elif defined(_MSC_VER)
-#if defined(_MSC_VER)
#include <stdlib.h>
#define htobe16(x) _byteswap_ushort(x)
#define htole16(x) (x)
@@ -124,7 +246,9 @@ typedef void *Prims_pos, *Prims_nat, *Prims_nonzero, *FStar_Seq_Base_seq,
#define be64toh(x) _byteswap_uint64(x)
#define le64toh(x) (x)
-#elif defined(__GNUC__) || defined(__clang__)
+/* ... for Windows (GCC-like, e.g. mingw or clang) */
+#elif (defined(_WIN32) || defined(_WIN64)) && \
+ (defined(__GNUC__) || defined(__clang__))
#define htobe16(x) __builtin_bswap16(x)
#define htole16(x) (x)
@@ -140,14 +264,67 @@ typedef void *Prims_pos, *Prims_nat, *Prims_nonzero, *FStar_Seq_Base_seq,
#define htole64(x) (x)
#define be64toh(x) __builtin_bswap64(x)
#define le64toh(x) (x)
-#endif
-#endif
+/* ... generic big-endian fallback code */
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+/* byte swapping code inspired by:
+ * https://github.com/rweather/arduinolibs/blob/master/libraries/Crypto/utility/EndianUtil.h
+ * */
+
+#define htobe32(x) (x)
+#define be32toh(x) (x)
+#define htole32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+#define le32toh(x) (htole32((x)))
+
+#define htobe64(x) (x)
+#define be64toh(x) (x)
+#define htole64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+#define le64toh(x) (htole64((x)))
+
+/* ... generic little-endian fallback code */
+#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#endif
+#define htole32(x) (x)
+#define le32toh(x) (x)
+#define htobe32(x) \
+ (__extension__({ \
+ uint32_t _temp = (x); \
+ ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \
+ ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \
+ }))
+#define be32toh(x) (htobe32((x)))
-// Loads and stores. These avoid undefined behavior due to unaligned memory
-// accesses, via memcpy.
+#define htole64(x) (x)
+#define le64toh(x) (x)
+#define htobe64(x) \
+ (__extension__({ \
+ uint64_t __temp = (x); \
+ uint32_t __low = htobe32((uint32_t)__temp); \
+ uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \
+ (((uint64_t)__low) << 32) | __high; \
+ }))
+#define be64toh(x) (htobe64((x)))
+
+/* ... couldn't determine endian-ness of the target platform */
+#else
+#error "Please define __BYTE_ORDER__!"
+
+#endif /* defined(__linux__) || ... */
+
+/* Loads and stores. These avoid undefined behavior due to unaligned memory
+ * accesses, via memcpy. */
inline static uint16_t
load16(uint8_t *b)
@@ -206,101 +383,139 @@ store64(uint8_t *b, uint64_t i)
#define load64_be(b) (be64toh(load64(b)))
#define store64_be(b, i) (store64(b, htobe64(i)))
-// Integer types
-typedef uint64_t FStar_UInt64_t, FStar_UInt64_t_;
-typedef int64_t FStar_Int64_t, FStar_Int64_t_;
-typedef uint32_t FStar_UInt32_t, FStar_UInt32_t_;
-typedef int32_t FStar_Int32_t, FStar_Int32_t_;
-typedef uint16_t FStar_UInt16_t, FStar_UInt16_t_;
-typedef int16_t FStar_Int16_t, FStar_Int16_t_;
-typedef uint8_t FStar_UInt8_t, FStar_UInt8_t_;
-typedef int8_t FStar_Int8_t, FStar_Int8_t_;
+/******************************************************************************/
+/* Checked integers to ease the compilation of non-Low* code */
+/******************************************************************************/
+
+typedef int32_t Prims_pos, Prims_nat, Prims_nonzero, Prims_int,
+ krml_checked_int_t;
-// Constant time comparisons
-static inline uint8_t
-FStar_UInt8_eq_mask(uint8_t x, uint8_t y)
+inline static bool
+Prims_op_GreaterThanOrEqual(int32_t x, int32_t y)
{
- x = ~(x ^ y);
- x &= x << 4;
- x &= x << 2;
- x &= x << 1;
- return (int8_t)x >> 7;
+ return x >= y;
}
-static inline uint8_t
-FStar_UInt8_gte_mask(uint8_t x, uint8_t y)
+inline static bool
+Prims_op_LessThanOrEqual(int32_t x, int32_t y)
{
- return ~(uint8_t)(((int32_t)x - y) >> 31);
+ return x <= y;
}
-static inline uint16_t
-FStar_UInt16_eq_mask(uint16_t x, uint16_t y)
+inline static bool
+Prims_op_GreaterThan(int32_t x, int32_t y)
{
- x = ~(x ^ y);
- x &= x << 8;
- x &= x << 4;
- x &= x << 2;
- x &= x << 1;
- return (int16_t)x >> 15;
+ return x > y;
}
-static inline uint16_t
-FStar_UInt16_gte_mask(uint16_t x, uint16_t y)
+inline static bool
+Prims_op_LessThan(int32_t x, int32_t y)
{
- return ~(uint16_t)(((int32_t)x - y) >> 31);
+ return x < y;
}
-static inline uint32_t
-FStar_UInt32_eq_mask(uint32_t x, uint32_t y)
+#define RETURN_OR(x) \
+ do { \
+ int64_t __ret = x; \
+ if (__ret < INT32_MIN || INT32_MAX < __ret) { \
+ KRML_HOST_PRINTF("Prims.{int,nat,pos} integer overflow at %s:%d\n", \
+ __FILE__, __LINE__); \
+ KRML_HOST_EXIT(252); \
+ } \
+ return (int32_t)__ret; \
+ } while (0)
+
+inline static int32_t
+Prims_pow2(int32_t x)
{
- x = ~(x ^ y);
- x &= x << 16;
- x &= x << 8;
- x &= x << 4;
- x &= x << 2;
- x &= x << 1;
- return ((int32_t)x) >> 31;
+ RETURN_OR((int64_t)1 << (int64_t)x);
}
-static inline uint32_t
-FStar_UInt32_gte_mask(uint32_t x, uint32_t y)
+inline static int32_t
+Prims_op_Multiply(int32_t x, int32_t y)
{
- return ~((uint32_t)(((int64_t)x - y) >> 63));
+ RETURN_OR((int64_t)x * (int64_t)y);
}
-static inline uint64_t
-FStar_UInt64_eq_mask(uint64_t x, uint64_t y)
+inline static int32_t
+Prims_op_Addition(int32_t x, int32_t y)
{
- x = ~(x ^ y);
- x &= x << 32;
- x &= x << 16;
- x &= x << 8;
- x &= x << 4;
- x &= x << 2;
- x &= x << 1;
- return ((int64_t)x) >> 63;
+ RETURN_OR((int64_t)x + (int64_t)y);
}
-static inline uint64_t
-FStar_UInt64_gte_mask(uint64_t x, uint64_t y)
+inline static int32_t
+Prims_op_Subtraction(int32_t x, int32_t y)
{
- uint64_t low63 =
- ~((uint64_t)((int64_t)((int64_t)(x & UINT64_C(0x7fffffffffffffff)) -
- (int64_t)(y & UINT64_C(0x7fffffffffffffff))) >>
- 63));
- uint64_t high_bit =
- ~((uint64_t)((int64_t)((int64_t)(x & UINT64_C(0x8000000000000000)) -
- (int64_t)(y & UINT64_C(0x8000000000000000))) >>
- 63));
- return low63 & high_bit;
+ RETURN_OR((int64_t)x - (int64_t)y);
}
-// Platform-specific 128-bit arithmetic. These are static functions in a header,
-// so that each translation unit gets its own copy and the C compiler can
-// optimize.
-#ifdef HAVE_INT128_SUPPORT
+inline static int32_t
+Prims_op_Division(int32_t x, int32_t y)
+{
+ RETURN_OR((int64_t)x / (int64_t)y);
+}
+
+inline static int32_t
+Prims_op_Modulus(int32_t x, int32_t y)
+{
+ RETURN_OR((int64_t)x % (int64_t)y);
+}
+
+inline static int8_t
+FStar_UInt8_uint_to_t(int8_t x)
+{
+ return x;
+}
+inline static int16_t
+FStar_UInt16_uint_to_t(int16_t x)
+{
+ return x;
+}
+inline static int32_t
+FStar_UInt32_uint_to_t(int32_t x)
+{
+ return x;
+}
+inline static int64_t
+FStar_UInt64_uint_to_t(int64_t x)
+{
+ return x;
+}
+
+inline static int8_t
+FStar_UInt8_v(int8_t x)
+{
+ return x;
+}
+inline static int16_t
+FStar_UInt16_v(int16_t x)
+{
+ return x;
+}
+inline static int32_t
+FStar_UInt32_v(int32_t x)
+{
+ return x;
+}
+inline static int64_t
+FStar_UInt64_v(int64_t x)
+{
+ return x;
+}
+
+/* Platform-specific 128-bit arithmetic. These are static functions in a header,
+ * so that each translation unit gets its own copy and the C compiler can
+ * optimize. */
+#ifndef KRML_NOUINT128
typedef unsigned __int128 FStar_UInt128_t, FStar_UInt128_t_, uint128_t;
+static inline void
+print128(const char *where, uint128_t n)
+{
+ KRML_HOST_PRINTF("%s: [%" PRIu64 ",%" PRIu64 "]\n", where,
+ (uint64_t)(n >> 64), (uint64_t)n);
+}
+
static inline uint128_t
load128_le(uint8_t *b)
{
@@ -344,8 +559,6 @@ store128_be(uint8_t *b, uint128_t n)
#define FStar_UInt128_shift_right(x, y) ((x) >> (y))
#define FStar_UInt128_uint64_to_uint128(x) ((uint128_t)(x))
#define FStar_UInt128_uint128_to_uint64(x) ((uint64_t)(x))
-#define FStar_Int_Cast_Full_uint64_to_uint128(x) ((uint128_t)(x))
-#define FStar_Int_Cast_Full_uint128_to_uint64(x) ((uint64_t)(x))
#define FStar_UInt128_mul_wide(x, y) ((uint128_t)(x) * (y))
#define FStar_UInt128_op_Hat_Hat(x, y) ((x) ^ (y))
@@ -368,12 +581,20 @@ FStar_UInt128_gte_mask(uint128_t x, uint128_t y)
return ((uint128_t)mask) << 64 | mask;
}
-#else // defined(HAVE_INT128_SUPPORT)
+#else /* !defined(KRML_NOUINT128) */
-#include "fstar_uint128.h"
+/* This is a bad circular dependency... should fix it properly. */
+#include "FStar.h"
typedef FStar_UInt128_uint128 FStar_UInt128_t_, uint128_t;
+/* A series of definitions written using pointers. */
+static inline void
+print128_(const char *where, uint128_t *n)
+{
+ KRML_HOST_PRINTF("%s: [0x%08" PRIx64 ",0x%08" PRIx64 "]\n", where, n->high, n->low);
+}
+
static inline void
load128_le_(uint8_t *b, uint128_t *r)
{
@@ -402,11 +623,50 @@ store128_be_(uint8_t *b, uint128_t *n)
store64_be(b + 8, n->low);
}
-/* #define print128 print128_ */
+#ifndef KRML_NOSTRUCT_PASSING
+
+static inline void
+print128(const char *where, uint128_t n)
+{
+ print128_(where, &n);
+}
+
+static inline uint128_t
+load128_le(uint8_t *b)
+{
+ uint128_t r;
+ load128_le_(b, &r);
+ return r;
+}
+
+static inline void
+store128_le(uint8_t *b, uint128_t n)
+{
+ store128_le_(b, &n);
+}
+
+static inline uint128_t
+load128_be(uint8_t *b)
+{
+ uint128_t r;
+ load128_be_(b, &r);
+ return r;
+}
+
+static inline void
+store128_be(uint8_t *b, uint128_t n)
+{
+ store128_be_(b, &n);
+}
+
+#else /* !defined(KRML_STRUCT_PASSING) */
+
+#define print128 print128_
#define load128_le load128_le_
#define store128_le store128_le_
#define load128_be load128_be_
#define store128_be store128_be_
-#endif // HAVE_INT128_SUPPORT
-#endif // __KREMLIB_H
+#endif /* KRML_STRUCT_PASSING */
+#endif /* KRML_UINT128 */
+#endif /* __KREMLIB_H */
diff --git a/lib/freebl/verified/kremlib_base.h b/lib/freebl/verified/kremlib_base.h
new file mode 100644
index 000000000..61bac11d4
--- /dev/null
+++ b/lib/freebl/verified/kremlib_base.h
@@ -0,0 +1,191 @@
+/* Copyright 2016-2017 INRIA and Microsoft Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __KREMLIB_BASE_H
+#define __KREMLIB_BASE_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+/******************************************************************************/
+/* Some macros to ease compatibility */
+/******************************************************************************/
+
+/* Define __cdecl and friends when using GCC, so that we can safely compile code
+ * that contains __cdecl on all platforms. Note that this is in a separate
+ * header so that Dafny-generated code can include just this file. */
+#ifndef _MSC_VER
+/* Use the gcc predefined macros if on a platform/architectures that set them.
+ * Otherwise define them to be empty. */
+#ifndef __cdecl
+#define __cdecl
+#endif
+#ifndef __stdcall
+#define __stdcall
+#endif
+#ifndef __fastcall
+#define __fastcall
+#endif
+#endif
+
+#ifdef __GNUC__
+#define inline __inline__
+#endif
+
+/* GCC-specific attribute syntax; everyone else gets the standard C inline
+ * attribute. */
+#ifdef __GNU_C__
+#ifndef __clang__
+#define force_inline inline __attribute__((always_inline))
+#else
+#define force_inline inline
+#endif
+#else
+#define force_inline inline
+#endif
+
+/******************************************************************************/
+/* Implementing C.fst */
+/******************************************************************************/
+
+/* Uppercase issue; we have to define lowercase versions of the C macros (as we
+ * have no way to refer to an uppercase *variable* in F*). */
+extern int exit_success;
+extern int exit_failure;
+
+/* This one allows the user to write C.EXIT_SUCCESS. */
+typedef int exit_code;
+
+void print_string(const char *s);
+void print_bytes(uint8_t *b, uint32_t len);
+
+/* The universal null pointer defined in C.Nullity.fst */
+#define C_Nullity_null(X) 0
+
+/* If some globals need to be initialized before the main, then kremlin will
+ * generate and try to link last a function with this type: */
+void kremlinit_globals(void);
+
+/******************************************************************************/
+/* Implementation of machine integers (possibly of 128-bit integers) */
+/******************************************************************************/
+
+/* Integer types */
+typedef uint64_t FStar_UInt64_t, FStar_UInt64_t_;
+typedef int64_t FStar_Int64_t, FStar_Int64_t_;
+typedef uint32_t FStar_UInt32_t, FStar_UInt32_t_;
+typedef int32_t FStar_Int32_t, FStar_Int32_t_;
+typedef uint16_t FStar_UInt16_t, FStar_UInt16_t_;
+typedef int16_t FStar_Int16_t, FStar_Int16_t_;
+typedef uint8_t FStar_UInt8_t, FStar_UInt8_t_;
+typedef int8_t FStar_Int8_t, FStar_Int8_t_;
+
+static inline uint32_t
+rotate32_left(uint32_t x, uint32_t n)
+{
+ /* assert (n<32); */
+ return (x << n) | (x >> (32 - n));
+}
+static inline uint32_t
+rotate32_right(uint32_t x, uint32_t n)
+{
+ /* assert (n<32); */
+ return (x >> n) | (x << (32 - n));
+}
+
+/* Constant time comparisons */
+static inline uint8_t
+FStar_UInt8_eq_mask(uint8_t x, uint8_t y)
+{
+ x = ~(x ^ y);
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return (int8_t)x >> 7;
+}
+
+static inline uint8_t
+FStar_UInt8_gte_mask(uint8_t x, uint8_t y)
+{
+ return ~(uint8_t)(((int32_t)x - y) >> 31);
+}
+
+static inline uint16_t
+FStar_UInt16_eq_mask(uint16_t x, uint16_t y)
+{
+ x = ~(x ^ y);
+ x &= x << 8;
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return (int16_t)x >> 15;
+}
+
+static inline uint16_t
+FStar_UInt16_gte_mask(uint16_t x, uint16_t y)
+{
+ return ~(uint16_t)(((int32_t)x - y) >> 31);
+}
+
+static inline uint32_t
+FStar_UInt32_eq_mask(uint32_t x, uint32_t y)
+{
+ x = ~(x ^ y);
+ x &= x << 16;
+ x &= x << 8;
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return ((int32_t)x) >> 31;
+}
+
+static inline uint32_t
+FStar_UInt32_gte_mask(uint32_t x, uint32_t y)
+{
+ return ~((uint32_t)(((int64_t)x - y) >> 63));
+}
+
+static inline uint64_t
+FStar_UInt64_eq_mask(uint64_t x, uint64_t y)
+{
+ x = ~(x ^ y);
+ x &= x << 32;
+ x &= x << 16;
+ x &= x << 8;
+ x &= x << 4;
+ x &= x << 2;
+ x &= x << 1;
+ return ((int64_t)x) >> 63;
+}
+
+static inline uint64_t
+FStar_UInt64_gte_mask(uint64_t x, uint64_t y)
+{
+ uint64_t low63 =
+ ~((uint64_t)((int64_t)((int64_t)(x & UINT64_C(0x7fffffffffffffff)) -
+ (int64_t)(y & UINT64_C(0x7fffffffffffffff))) >>
+ 63));
+ uint64_t high_bit =
+ ~((uint64_t)((int64_t)((int64_t)(x & UINT64_C(0x8000000000000000)) -
+ (int64_t)(y & UINT64_C(0x8000000000000000))) >>
+ 63));
+ return low63 & high_bit;
+}
+
+#endif