summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Jacobs <kjacobs@mozilla.com>2020-03-03 15:47:36 -0800
committerKevin Jacobs <kjacobs@mozilla.com>2020-03-03 15:47:36 -0800
commit6c11bb6e251f48a9a1cc8422f21e68caa023d699 (patch)
treed21852ed1c002beef860e1aa14618e4415a62c66
parent20ac22ba66bba2acfee5d6bf104389698e674a95 (diff)
downloadnss-hg-6c11bb6e251f48a9a1cc8422f21e68caa023d699.tar.gz
Backed out changeset b6677ae9067e (Bug 1612493) for Windows build failures.NSS_3_51_BETA2
-rwxr-xr-xautomation/taskcluster/scripts/run_hacl.sh2
-rw-r--r--lib/freebl/verified/Hacl_Chacha20.c165
-rw-r--r--lib/freebl/verified/Hacl_Chacha20Poly1305_128.c389
-rw-r--r--lib/freebl/verified/Hacl_Chacha20Poly1305_32.c236
-rw-r--r--lib/freebl/verified/Hacl_Chacha20_Vec128.c190
-rw-r--r--lib/freebl/verified/Hacl_Curve25519_51.c213
-rw-r--r--lib/freebl/verified/Hacl_Kremlib.h16
-rw-r--r--lib/freebl/verified/Hacl_Poly1305_128.c596
-rw-r--r--lib/freebl/verified/Hacl_Poly1305_32.c248
-rw-r--r--lib/freebl/verified/kremlin/include/kremlin/internal/types.h14
-rw-r--r--lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128.h55
-rw-r--r--lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128_Verified.h83
-rw-r--r--lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt_8_16_32_64.h21
-rw-r--r--lib/freebl/verified/kremlin/kremlib/dist/minimal/LowStar_Endianness.h13
-rw-r--r--lib/freebl/verified/kremlin/kremlib/dist/minimal/fstar_uint128_gcc64.h5
-rw-r--r--lib/freebl/verified/libintvector.h230
16 files changed, 1254 insertions, 1222 deletions
diff --git a/automation/taskcluster/scripts/run_hacl.sh b/automation/taskcluster/scripts/run_hacl.sh
index 4ad6ca7f1..968632d81 100755
--- a/automation/taskcluster/scripts/run_hacl.sh
+++ b/automation/taskcluster/scripts/run_hacl.sh
@@ -13,7 +13,7 @@ set -e -x -v
# HACL CI.
# When bug 1593647 is resolved, extract the code on CI again.
git clone -q "https://github.com/project-everest/hacl-star" ~/hacl-star
-git -C ~/hacl-star checkout -q 4faa2f0e26b544c81e5d2e0af773cc8008863470
+git -C ~/hacl-star checkout -q 186a985597d57e3b587ceb0ef6deb0b5de706ae2
# Format the C snapshot.
cd ~/hacl-star/dist/mozilla
diff --git a/lib/freebl/verified/Hacl_Chacha20.c b/lib/freebl/verified/Hacl_Chacha20.c
index a78417253..c18421aa0 100644
--- a/lib/freebl/verified/Hacl_Chacha20.c
+++ b/lib/freebl/verified/Hacl_Chacha20.c
@@ -27,8 +27,13 @@ uint32_t
Hacl_Impl_Chacha20_Vec_chacha20_constants[4U] =
{ (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
-static inline void
-quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+inline static void
+Hacl_Impl_Chacha20_Core32_quarter_round(
+ uint32_t *st,
+ uint32_t a,
+ uint32_t b,
+ uint32_t c,
+ uint32_t d)
{
uint32_t sta = st[a];
uint32_t stb0 = st[b];
@@ -64,42 +69,74 @@ quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
st[b] = std22;
}
-static inline void
-double_round(uint32_t *st)
+inline static void
+Hacl_Impl_Chacha20_Core32_double_round(uint32_t *st)
{
- quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
- quarter_round(st, (uint32_t)1U, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U);
- quarter_round(st, (uint32_t)2U, (uint32_t)6U, (uint32_t)10U, (uint32_t)14U);
- quarter_round(st, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U, (uint32_t)15U);
- quarter_round(st, (uint32_t)0U, (uint32_t)5U, (uint32_t)10U, (uint32_t)15U);
- quarter_round(st, (uint32_t)1U, (uint32_t)6U, (uint32_t)11U, (uint32_t)12U);
- quarter_round(st, (uint32_t)2U, (uint32_t)7U, (uint32_t)8U, (uint32_t)13U);
- quarter_round(st, (uint32_t)3U, (uint32_t)4U, (uint32_t)9U, (uint32_t)14U);
+ Hacl_Impl_Chacha20_Core32_quarter_round(st,
+ (uint32_t)0U,
+ (uint32_t)4U,
+ (uint32_t)8U,
+ (uint32_t)12U);
+ Hacl_Impl_Chacha20_Core32_quarter_round(st,
+ (uint32_t)1U,
+ (uint32_t)5U,
+ (uint32_t)9U,
+ (uint32_t)13U);
+ Hacl_Impl_Chacha20_Core32_quarter_round(st,
+ (uint32_t)2U,
+ (uint32_t)6U,
+ (uint32_t)10U,
+ (uint32_t)14U);
+ Hacl_Impl_Chacha20_Core32_quarter_round(st,
+ (uint32_t)3U,
+ (uint32_t)7U,
+ (uint32_t)11U,
+ (uint32_t)15U);
+ Hacl_Impl_Chacha20_Core32_quarter_round(st,
+ (uint32_t)0U,
+ (uint32_t)5U,
+ (uint32_t)10U,
+ (uint32_t)15U);
+ Hacl_Impl_Chacha20_Core32_quarter_round(st,
+ (uint32_t)1U,
+ (uint32_t)6U,
+ (uint32_t)11U,
+ (uint32_t)12U);
+ Hacl_Impl_Chacha20_Core32_quarter_round(st,
+ (uint32_t)2U,
+ (uint32_t)7U,
+ (uint32_t)8U,
+ (uint32_t)13U);
+ Hacl_Impl_Chacha20_Core32_quarter_round(st,
+ (uint32_t)3U,
+ (uint32_t)4U,
+ (uint32_t)9U,
+ (uint32_t)14U);
}
-static inline void
-rounds(uint32_t *st)
+inline static void
+Hacl_Impl_Chacha20_rounds(uint32_t *st)
{
- double_round(st);
- double_round(st);
- double_round(st);
- double_round(st);
- double_round(st);
- double_round(st);
- double_round(st);
- double_round(st);
- double_round(st);
- double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
+ Hacl_Impl_Chacha20_Core32_double_round(st);
}
-static inline void
-chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
+inline static void
+Hacl_Impl_Chacha20_chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
{
- memcpy(k, ctx, (uint32_t)16U * sizeof(ctx[0U]));
+ memcpy(k, ctx, (uint32_t)16U * sizeof ctx[0U]);
uint32_t ctr_u32 = ctr;
k[12U] = k[12U] + ctr_u32;
- rounds(k);
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
+ Hacl_Impl_Chacha20_rounds(k);
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
uint32_t *os = k;
uint32_t x = k[i] + ctx[i];
os[i] = x;
@@ -108,20 +145,20 @@ chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
}
static uint32_t
- chacha20_constants[4U] =
+ Hacl_Impl_Chacha20_chacha20_constants[4U] =
{ (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
-static inline void
-chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
+inline static void
+Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
{
uint32_t *uu____0 = ctx;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U) {
uint32_t *os = uu____0;
- uint32_t x = chacha20_constants[i];
+ uint32_t x = Hacl_Impl_Chacha20_chacha20_constants[i];
os[i] = x;
}
uint32_t *uu____1 = ctx + (uint32_t)4U;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i = i + (uint32_t)1U) {
uint32_t *os = uu____1;
uint8_t *bj = k + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
@@ -131,7 +168,7 @@ chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
}
ctx[12U] = ctr;
uint32_t *uu____2 = ctx + (uint32_t)13U;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i = i + (uint32_t)1U) {
uint32_t *os = uu____2;
uint8_t *bj = n1 + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
@@ -141,13 +178,17 @@ chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
}
}
-static inline void
-chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr1, uint8_t *text)
+inline static void
+Hacl_Impl_Chacha20_chacha20_encrypt_block(
+ uint32_t *ctx,
+ uint8_t *out,
+ uint32_t incr1,
+ uint8_t *text)
{
uint32_t k[16U] = { 0U };
- chacha20_core(k, ctx, incr1);
+ Hacl_Impl_Chacha20_chacha20_core(k, ctx, incr1);
uint32_t bl[16U] = { 0U };
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
uint32_t *os = bl;
uint8_t *bj = text + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
@@ -155,36 +196,48 @@ chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr1, uint8_t *tex
uint32_t x = r;
os[i] = x;
}
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
uint32_t *os = bl;
uint32_t x = bl[i] ^ k[i];
os[i] = x;
}
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
store32_le(out + i * (uint32_t)4U, bl[i]);
}
}
-static inline void
-chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr1, uint8_t *text)
+inline static void
+Hacl_Impl_Chacha20_chacha20_encrypt_last(
+ uint32_t *ctx,
+ uint32_t len,
+ uint8_t *out,
+ uint32_t incr1,
+ uint8_t *text)
{
uint8_t plain[64U] = { 0U };
- memcpy(plain, text, len * sizeof(text[0U]));
- chacha20_encrypt_block(ctx, plain, incr1, plain);
- memcpy(out, plain, len * sizeof(plain[0U]));
+ memcpy(plain, text, len * sizeof text[0U]);
+ Hacl_Impl_Chacha20_chacha20_encrypt_block(ctx, plain, incr1, plain);
+ memcpy(out, plain, len * sizeof plain[0U]);
}
-static inline void
-chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
+inline static void
+Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
{
uint32_t rem1 = len % (uint32_t)64U;
uint32_t nb = len / (uint32_t)64U;
uint32_t rem2 = len % (uint32_t)64U;
- for (uint32_t i = (uint32_t)0U; i < nb; i++) {
- chacha20_encrypt_block(ctx, out + i * (uint32_t)64U, i, text + i * (uint32_t)64U);
+ for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
+ Hacl_Impl_Chacha20_chacha20_encrypt_block(ctx,
+ out + i * (uint32_t)64U,
+ i,
+ text + i * (uint32_t)64U);
}
if (rem2 > (uint32_t)0U) {
- chacha20_encrypt_last(ctx, rem1, out + nb * (uint32_t)64U, nb, text + nb * (uint32_t)64U);
+ Hacl_Impl_Chacha20_chacha20_encrypt_last(ctx,
+ rem1,
+ out + nb * (uint32_t)64U,
+ nb,
+ text + nb * (uint32_t)64U);
}
}
@@ -198,8 +251,8 @@ Hacl_Chacha20_chacha20_encrypt(
uint32_t ctr)
{
uint32_t ctx[16U] = { 0U };
- chacha20_init(ctx, key, n1, ctr);
- chacha20_update(ctx, len, out, text);
+ Hacl_Impl_Chacha20_chacha20_init(ctx, key, n1, ctr);
+ Hacl_Impl_Chacha20_chacha20_update(ctx, len, out, text);
}
void
@@ -212,6 +265,6 @@ Hacl_Chacha20_chacha20_decrypt(
uint32_t ctr)
{
uint32_t ctx[16U] = { 0U };
- chacha20_init(ctx, key, n1, ctr);
- chacha20_update(ctx, len, out, cipher);
+ Hacl_Impl_Chacha20_chacha20_init(ctx, key, n1, ctr);
+ Hacl_Impl_Chacha20_chacha20_update(ctx, len, out, cipher);
}
diff --git a/lib/freebl/verified/Hacl_Chacha20Poly1305_128.c b/lib/freebl/verified/Hacl_Chacha20Poly1305_128.c
index da5612959..41334e4ce 100644
--- a/lib/freebl/verified/Hacl_Chacha20Poly1305_128.c
+++ b/lib/freebl/verified/Hacl_Chacha20Poly1305_128.c
@@ -23,8 +23,11 @@
#include "Hacl_Chacha20Poly1305_128.h"
-static inline void
-poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text)
+inline static void
+Hacl_Chacha20Poly1305_128_poly1305_padded_128(
+ Lib_IntVector_Intrinsics_vec128 *ctx,
+ uint32_t len,
+ uint8_t *text)
{
uint32_t n1 = len / (uint32_t)16U;
uint32_t r = len % (uint32_t)16U;
@@ -42,7 +45,7 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
uint32_t len1 = len0 - bs;
uint8_t *text1 = t00 + bs;
uint32_t nb = len1 / bs;
- for (uint32_t i = (uint32_t)0U; i < nb; i++) {
+ for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
uint8_t *block = text1 + i * bs;
Lib_IntVector_Intrinsics_vec128 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
@@ -197,43 +200,57 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a34;
Lib_IntVector_Intrinsics_vec128 t4 = a44;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c01);
+ Lib_IntVector_Intrinsics_vec128
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
+ Lib_IntVector_Intrinsics_vec128
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o00 = x02;
- Lib_IntVector_Intrinsics_vec128 o10 = x12;
- Lib_IntVector_Intrinsics_vec128 o20 = x21;
- Lib_IntVector_Intrinsics_vec128 o30 = x32;
- Lib_IntVector_Intrinsics_vec128 o40 = x42;
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o00 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o10 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o20 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o30 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o40 = tmp4;
acc0[0U] = o00;
acc0[1U] = o10;
acc0[2U] = o20;
@@ -266,7 +283,7 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
uint8_t *t10 = blocks + len0;
uint32_t nb = len1 / (uint32_t)16U;
uint32_t rem2 = len1 % (uint32_t)16U;
- for (uint32_t i = (uint32_t)0U; i < nb; i++) {
+ for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
uint8_t *block = t10 + i * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec128 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
@@ -431,43 +448,57 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
+ Lib_IntVector_Intrinsics_vec128
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t11, c01);
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
+ Lib_IntVector_Intrinsics_vec128
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o0 = x02;
- Lib_IntVector_Intrinsics_vec128 o1 = x12;
- Lib_IntVector_Intrinsics_vec128 o2 = x21;
- Lib_IntVector_Intrinsics_vec128 o3 = x32;
- Lib_IntVector_Intrinsics_vec128 o4 = x42;
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
acc0[0U] = o0;
acc0[1U] = o1;
acc0[2U] = o2;
@@ -480,7 +511,7 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
uint8_t tmp[16U] = { 0U };
- memcpy(tmp, last1, rem2 * sizeof(last1[0U]));
+ memcpy(tmp, last1, rem2 * sizeof last1[0U]);
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@@ -641,43 +672,57 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t11, c01);
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o0 = x02;
- Lib_IntVector_Intrinsics_vec128 o1 = x12;
- Lib_IntVector_Intrinsics_vec128 o2 = x21;
- Lib_IntVector_Intrinsics_vec128 o3 = x32;
- Lib_IntVector_Intrinsics_vec128 o4 = x42;
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+ Lib_IntVector_Intrinsics_vec128
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
acc0[0U] = o0;
acc0[1U] = o1;
acc0[2U] = o2;
@@ -685,7 +730,7 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
acc0[4U] = o4;
}
uint8_t tmp[16U] = { 0U };
- memcpy(tmp, rem1, r * sizeof(rem1[0U]));
+ memcpy(tmp, rem1, r * sizeof rem1[0U]);
if (r > (uint32_t)0U) {
Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec128 *acc = ctx;
@@ -852,43 +897,57 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
+ Lib_IntVector_Intrinsics_vec128
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c01);
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
+ Lib_IntVector_Intrinsics_vec128
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o0 = x02;
- Lib_IntVector_Intrinsics_vec128 o1 = x12;
- Lib_IntVector_Intrinsics_vec128 o2 = x21;
- Lib_IntVector_Intrinsics_vec128 o3 = x32;
- Lib_IntVector_Intrinsics_vec128 o4 = x42;
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -898,8 +957,8 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
}
}
-static inline void
-poly1305_do_128(
+inline static void
+Hacl_Chacha20Poly1305_128_poly1305_do_128(
uint8_t *k,
uint32_t aadlen,
uint8_t *aad,
@@ -912,8 +971,8 @@ poly1305_do_128(
ctx[_i] = Lib_IntVector_Intrinsics_vec128_zero;
uint8_t block[16U] = { 0U };
Hacl_Poly1305_128_poly1305_init(ctx, k);
- poly1305_padded_128(ctx, aadlen, aad);
- poly1305_padded_128(ctx, mlen, m);
+ Hacl_Chacha20Poly1305_128_poly1305_padded_128(ctx, aadlen, aad);
+ Hacl_Chacha20Poly1305_128_poly1305_padded_128(ctx, mlen, m);
store64_le(block, (uint64_t)aadlen);
store64_le(block + (uint32_t)8U, (uint64_t)mlen);
Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
@@ -1081,43 +1140,57 @@ poly1305_do_128(
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ c01 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c01);
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ c11 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c11);
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c21 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c21);
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ c31 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c31);
+ Lib_IntVector_Intrinsics_vec128
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+ Lib_IntVector_Intrinsics_vec128
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o0 = x02;
- Lib_IntVector_Intrinsics_vec128 o1 = x12;
- Lib_IntVector_Intrinsics_vec128 o2 = x21;
- Lib_IntVector_Intrinsics_vec128 o3 = x32;
- Lib_IntVector_Intrinsics_vec128 o4 = x42;
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -1141,7 +1214,7 @@ Hacl_Chacha20Poly1305_128_aead_encrypt(
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
uint8_t *key = tmp;
- poly1305_do_128(key, aadlen, aad, mlen, cipher, mac);
+ Hacl_Chacha20Poly1305_128_poly1305_do_128(key, aadlen, aad, mlen, cipher, mac);
}
uint32_t
@@ -1159,9 +1232,9 @@ Hacl_Chacha20Poly1305_128_aead_decrypt(
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
uint8_t *key = tmp;
- poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac);
+ Hacl_Chacha20Poly1305_128_poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac);
uint8_t res = (uint8_t)255U;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
res = uu____0 & res;
}
diff --git a/lib/freebl/verified/Hacl_Chacha20Poly1305_32.c b/lib/freebl/verified/Hacl_Chacha20Poly1305_32.c
index a1e48c18e..f6524fe95 100644
--- a/lib/freebl/verified/Hacl_Chacha20Poly1305_32.c
+++ b/lib/freebl/verified/Hacl_Chacha20Poly1305_32.c
@@ -23,8 +23,8 @@
#include "Hacl_Chacha20Poly1305_32.h"
-static inline void
-poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
+static void
+Hacl_Chacha20Poly1305_32_poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
{
uint32_t n1 = len / (uint32_t)16U;
uint32_t r = len % (uint32_t)16U;
@@ -34,7 +34,7 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t *acc0 = ctx;
uint32_t nb = n1 * (uint32_t)16U / (uint32_t)16U;
uint32_t rem2 = n1 * (uint32_t)16U % (uint32_t)16U;
- for (uint32_t i = (uint32_t)0U; i < nb; i++) {
+ for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
uint8_t *block = blocks + i * (uint32_t)16U;
uint64_t e[5U] = { 0U };
uint64_t u0 = load64_le(block);
@@ -118,35 +118,30 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
- uint64_t mask261 = (uint64_t)0x3ffffffU;
- uint64_t z0 = t0 >> (uint32_t)26U;
- uint64_t z1 = t3 >> (uint32_t)26U;
- uint64_t x0 = t0 & mask261;
- uint64_t x3 = t3 & mask261;
- uint64_t x1 = t1 + z0;
- uint64_t x4 = t4 + z1;
- uint64_t z01 = x1 >> (uint32_t)26U;
- uint64_t z11 = x4 >> (uint32_t)26U;
- uint64_t t = z11 << (uint32_t)2U;
- uint64_t z12 = z11 + t;
- uint64_t x11 = x1 & mask261;
- uint64_t x41 = x4 & mask261;
- uint64_t x2 = t2 + z01;
- uint64_t x01 = x0 + z12;
- uint64_t z02 = x2 >> (uint32_t)26U;
- uint64_t z13 = x01 >> (uint32_t)26U;
- uint64_t x21 = x2 & mask261;
- uint64_t x02 = x01 & mask261;
- uint64_t x31 = x3 + z02;
- uint64_t x12 = x11 + z13;
- uint64_t z03 = x31 >> (uint32_t)26U;
- uint64_t x32 = x31 & mask261;
- uint64_t x42 = x41 + z03;
- uint64_t o0 = x02;
- uint64_t o1 = x12;
- uint64_t o2 = x21;
- uint64_t o3 = x32;
- uint64_t o4 = x42;
+ uint64_t l = t0 + (uint64_t)0U;
+ uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
+ uint64_t c01 = l >> (uint32_t)26U;
+ uint64_t l0 = t1 + c01;
+ uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
+ uint64_t c11 = l0 >> (uint32_t)26U;
+ uint64_t l1 = t2 + c11;
+ uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
+ uint64_t c21 = l1 >> (uint32_t)26U;
+ uint64_t l2 = t3 + c21;
+ uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
+ uint64_t c31 = l2 >> (uint32_t)26U;
+ uint64_t l3 = t4 + c31;
+ uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
+ uint64_t c4 = l3 >> (uint32_t)26U;
+ uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
+ uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
+ uint64_t c5 = l4 >> (uint32_t)26U;
+ uint64_t tmp11 = tmp1 + c5;
+ uint64_t o0 = tmp01;
+ uint64_t o1 = tmp11;
+ uint64_t o2 = tmp2;
+ uint64_t o3 = tmp3;
+ uint64_t o4 = tmp4;
acc0[0U] = o0;
acc0[1U] = o1;
acc0[2U] = o2;
@@ -157,7 +152,7 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
uint8_t *last1 = blocks + nb * (uint32_t)16U;
uint64_t e[5U] = { 0U };
uint8_t tmp[16U] = { 0U };
- memcpy(tmp, last1, rem2 * sizeof(last1[0U]));
+ memcpy(tmp, last1, rem2 * sizeof last1[0U]);
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@@ -239,35 +234,30 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
- uint64_t mask261 = (uint64_t)0x3ffffffU;
- uint64_t z0 = t0 >> (uint32_t)26U;
- uint64_t z1 = t3 >> (uint32_t)26U;
- uint64_t x0 = t0 & mask261;
- uint64_t x3 = t3 & mask261;
- uint64_t x1 = t1 + z0;
- uint64_t x4 = t4 + z1;
- uint64_t z01 = x1 >> (uint32_t)26U;
- uint64_t z11 = x4 >> (uint32_t)26U;
- uint64_t t = z11 << (uint32_t)2U;
- uint64_t z12 = z11 + t;
- uint64_t x11 = x1 & mask261;
- uint64_t x41 = x4 & mask261;
- uint64_t x2 = t2 + z01;
- uint64_t x01 = x0 + z12;
- uint64_t z02 = x2 >> (uint32_t)26U;
- uint64_t z13 = x01 >> (uint32_t)26U;
- uint64_t x21 = x2 & mask261;
- uint64_t x02 = x01 & mask261;
- uint64_t x31 = x3 + z02;
- uint64_t x12 = x11 + z13;
- uint64_t z03 = x31 >> (uint32_t)26U;
- uint64_t x32 = x31 & mask261;
- uint64_t x42 = x41 + z03;
- uint64_t o0 = x02;
- uint64_t o1 = x12;
- uint64_t o2 = x21;
- uint64_t o3 = x32;
- uint64_t o4 = x42;
+ uint64_t l = t0 + (uint64_t)0U;
+ uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
+ uint64_t c01 = l >> (uint32_t)26U;
+ uint64_t l0 = t1 + c01;
+ uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
+ uint64_t c11 = l0 >> (uint32_t)26U;
+ uint64_t l1 = t2 + c11;
+ uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
+ uint64_t c21 = l1 >> (uint32_t)26U;
+ uint64_t l2 = t3 + c21;
+ uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
+ uint64_t c31 = l2 >> (uint32_t)26U;
+ uint64_t l3 = t4 + c31;
+ uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
+ uint64_t c4 = l3 >> (uint32_t)26U;
+ uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
+ uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
+ uint64_t c5 = l4 >> (uint32_t)26U;
+ uint64_t tmp11 = tmp1 + c5;
+ uint64_t o0 = tmp01;
+ uint64_t o1 = tmp11;
+ uint64_t o2 = tmp2;
+ uint64_t o3 = tmp3;
+ uint64_t o4 = tmp4;
acc0[0U] = o0;
acc0[1U] = o1;
acc0[2U] = o2;
@@ -275,7 +265,7 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
acc0[4U] = o4;
}
uint8_t tmp[16U] = { 0U };
- memcpy(tmp, rem1, r * sizeof(rem1[0U]));
+ memcpy(tmp, rem1, r * sizeof rem1[0U]);
if (r > (uint32_t)0U) {
uint64_t *pre = ctx + (uint32_t)5U;
uint64_t *acc = ctx;
@@ -361,35 +351,30 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
- uint64_t mask261 = (uint64_t)0x3ffffffU;
- uint64_t z0 = t0 >> (uint32_t)26U;
- uint64_t z1 = t3 >> (uint32_t)26U;
- uint64_t x0 = t0 & mask261;
- uint64_t x3 = t3 & mask261;
- uint64_t x1 = t1 + z0;
- uint64_t x4 = t4 + z1;
- uint64_t z01 = x1 >> (uint32_t)26U;
- uint64_t z11 = x4 >> (uint32_t)26U;
- uint64_t t = z11 << (uint32_t)2U;
- uint64_t z12 = z11 + t;
- uint64_t x11 = x1 & mask261;
- uint64_t x41 = x4 & mask261;
- uint64_t x2 = t2 + z01;
- uint64_t x01 = x0 + z12;
- uint64_t z02 = x2 >> (uint32_t)26U;
- uint64_t z13 = x01 >> (uint32_t)26U;
- uint64_t x21 = x2 & mask261;
- uint64_t x02 = x01 & mask261;
- uint64_t x31 = x3 + z02;
- uint64_t x12 = x11 + z13;
- uint64_t z03 = x31 >> (uint32_t)26U;
- uint64_t x32 = x31 & mask261;
- uint64_t x42 = x41 + z03;
- uint64_t o0 = x02;
- uint64_t o1 = x12;
- uint64_t o2 = x21;
- uint64_t o3 = x32;
- uint64_t o4 = x42;
+ uint64_t l = t0 + (uint64_t)0U;
+ uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
+ uint64_t c01 = l >> (uint32_t)26U;
+ uint64_t l0 = t1 + c01;
+ uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
+ uint64_t c11 = l0 >> (uint32_t)26U;
+ uint64_t l1 = t2 + c11;
+ uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
+ uint64_t c21 = l1 >> (uint32_t)26U;
+ uint64_t l2 = t3 + c21;
+ uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
+ uint64_t c31 = l2 >> (uint32_t)26U;
+ uint64_t l3 = t4 + c31;
+ uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
+ uint64_t c4 = l3 >> (uint32_t)26U;
+ uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
+ uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
+ uint64_t c5 = l4 >> (uint32_t)26U;
+ uint64_t tmp11 = tmp1 + c5;
+ uint64_t o0 = tmp01;
+ uint64_t o1 = tmp11;
+ uint64_t o2 = tmp2;
+ uint64_t o3 = tmp3;
+ uint64_t o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -399,8 +384,8 @@ poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
}
}
-static inline void
-poly1305_do_32(
+static void
+Hacl_Chacha20Poly1305_32_poly1305_do_32(
uint8_t *k,
uint32_t aadlen,
uint8_t *aad,
@@ -411,8 +396,8 @@ poly1305_do_32(
uint64_t ctx[25U] = { 0U };
uint8_t block[16U] = { 0U };
Hacl_Poly1305_32_poly1305_init(ctx, k);
- poly1305_padded_32(ctx, aadlen, aad);
- poly1305_padded_32(ctx, mlen, m);
+ Hacl_Chacha20Poly1305_32_poly1305_padded_32(ctx, aadlen, aad);
+ Hacl_Chacha20Poly1305_32_poly1305_padded_32(ctx, mlen, m);
store64_le(block, (uint64_t)aadlen);
store64_le(block + (uint32_t)8U, (uint64_t)mlen);
uint64_t *pre = ctx + (uint32_t)5U;
@@ -499,35 +484,30 @@ poly1305_do_32(
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
- uint64_t mask261 = (uint64_t)0x3ffffffU;
- uint64_t z0 = t0 >> (uint32_t)26U;
- uint64_t z1 = t3 >> (uint32_t)26U;
- uint64_t x0 = t0 & mask261;
- uint64_t x3 = t3 & mask261;
- uint64_t x1 = t1 + z0;
- uint64_t x4 = t4 + z1;
- uint64_t z01 = x1 >> (uint32_t)26U;
- uint64_t z11 = x4 >> (uint32_t)26U;
- uint64_t t = z11 << (uint32_t)2U;
- uint64_t z12 = z11 + t;
- uint64_t x11 = x1 & mask261;
- uint64_t x41 = x4 & mask261;
- uint64_t x2 = t2 + z01;
- uint64_t x01 = x0 + z12;
- uint64_t z02 = x2 >> (uint32_t)26U;
- uint64_t z13 = x01 >> (uint32_t)26U;
- uint64_t x21 = x2 & mask261;
- uint64_t x02 = x01 & mask261;
- uint64_t x31 = x3 + z02;
- uint64_t x12 = x11 + z13;
- uint64_t z03 = x31 >> (uint32_t)26U;
- uint64_t x32 = x31 & mask261;
- uint64_t x42 = x41 + z03;
- uint64_t o0 = x02;
- uint64_t o1 = x12;
- uint64_t o2 = x21;
- uint64_t o3 = x32;
- uint64_t o4 = x42;
+ uint64_t l = t0 + (uint64_t)0U;
+ uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
+ uint64_t c01 = l >> (uint32_t)26U;
+ uint64_t l0 = t1 + c01;
+ uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
+ uint64_t c11 = l0 >> (uint32_t)26U;
+ uint64_t l1 = t2 + c11;
+ uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
+ uint64_t c21 = l1 >> (uint32_t)26U;
+ uint64_t l2 = t3 + c21;
+ uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
+ uint64_t c31 = l2 >> (uint32_t)26U;
+ uint64_t l3 = t4 + c31;
+ uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
+ uint64_t c4 = l3 >> (uint32_t)26U;
+ uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
+ uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
+ uint64_t c5 = l4 >> (uint32_t)26U;
+ uint64_t tmp11 = tmp1 + c5;
+ uint64_t o0 = tmp01;
+ uint64_t o1 = tmp11;
+ uint64_t o2 = tmp2;
+ uint64_t o3 = tmp3;
+ uint64_t o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -551,7 +531,7 @@ Hacl_Chacha20Poly1305_32_aead_encrypt(
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
uint8_t *key = tmp;
- poly1305_do_32(key, aadlen, aad, mlen, cipher, mac);
+ Hacl_Chacha20Poly1305_32_poly1305_do_32(key, aadlen, aad, mlen, cipher, mac);
}
uint32_t
@@ -569,9 +549,9 @@ Hacl_Chacha20Poly1305_32_aead_decrypt(
uint8_t tmp[64U] = { 0U };
Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n1, (uint32_t)0U);
uint8_t *key = tmp;
- poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac);
+ Hacl_Chacha20Poly1305_32_poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac);
uint8_t res = (uint8_t)255U;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
res = uu____0 & res;
}
diff --git a/lib/freebl/verified/Hacl_Chacha20_Vec128.c b/lib/freebl/verified/Hacl_Chacha20_Vec128.c
index c4c12255d..718473be8 100644
--- a/lib/freebl/verified/Hacl_Chacha20_Vec128.c
+++ b/lib/freebl/verified/Hacl_Chacha20_Vec128.c
@@ -23,8 +23,8 @@
#include "Hacl_Chacha20_Vec128.h"
-static inline void
-double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
+static void
+Hacl_Chacha20_Vec128_double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
{
st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
Lib_IntVector_Intrinsics_vec128 std = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
@@ -124,27 +124,27 @@ double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, (uint32_t)7U);
}
-static inline void
-chacha20_core_128(
+static void
+Hacl_Chacha20_Vec128_chacha20_core_128(
Lib_IntVector_Intrinsics_vec128 *k,
Lib_IntVector_Intrinsics_vec128 *ctx,
uint32_t ctr)
{
- memcpy(k, ctx, (uint32_t)16U * sizeof(ctx[0U]));
+ memcpy(k, ctx, (uint32_t)16U * sizeof ctx[0U]);
uint32_t ctr_u32 = (uint32_t)4U * ctr;
Lib_IntVector_Intrinsics_vec128 cv = Lib_IntVector_Intrinsics_vec128_load32(ctr_u32);
k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv);
- double_round_128(k);
- double_round_128(k);
- double_round_128(k);
- double_round_128(k);
- double_round_128(k);
- double_round_128(k);
- double_round_128(k);
- double_round_128(k);
- double_round_128(k);
- double_round_128(k);
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ Hacl_Chacha20_Vec128_double_round_128(k);
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
Lib_IntVector_Intrinsics_vec128 *os = k;
Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(k[i], ctx[i]);
os[i] = x;
@@ -152,18 +152,22 @@ chacha20_core_128(
k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv);
}
-static inline void
-chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n1, uint32_t ctr)
+static void
+Hacl_Chacha20_Vec128_chacha20_init_128(
+ Lib_IntVector_Intrinsics_vec128 *ctx,
+ uint8_t *k,
+ uint8_t *n1,
+ uint32_t ctr)
{
uint32_t ctx1[16U] = { 0U };
uint32_t *uu____0 = ctx1;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U) {
uint32_t *os = uu____0;
uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
os[i] = x;
}
uint32_t *uu____1 = ctx1 + (uint32_t)4U;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)8U; i = i + (uint32_t)1U) {
uint32_t *os = uu____1;
uint8_t *bj = k + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
@@ -173,7 +177,7 @@ chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n1,
}
ctx1[12U] = ctr;
uint32_t *uu____2 = ctx1 + (uint32_t)13U;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)3U; i = i + (uint32_t)1U) {
uint32_t *os = uu____2;
uint8_t *bj = n1 + i * (uint32_t)4U;
uint32_t u = load32_le(bj);
@@ -181,7 +185,7 @@ chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n1,
uint32_t x = r;
os[i] = x;
}
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
Lib_IntVector_Intrinsics_vec128 *os = ctx;
uint32_t x = ctx1[i];
Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_load32(x);
@@ -189,10 +193,10 @@ chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n1,
}
Lib_IntVector_Intrinsics_vec128
ctr1 =
- Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)0U,
- (uint32_t)1U,
+ Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)3U,
(uint32_t)2U,
- (uint32_t)3U);
+ (uint32_t)1U,
+ (uint32_t)0U);
Lib_IntVector_Intrinsics_vec128 c12 = ctx[12U];
ctx[12U] = Lib_IntVector_Intrinsics_vec128_add32(c12, ctr1);
}
@@ -209,17 +213,26 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
Lib_IntVector_Intrinsics_vec128 ctx[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
ctx[_i] = Lib_IntVector_Intrinsics_vec128_zero;
- chacha20_init_128(ctx, key, n1, ctr);
- uint32_t rem1 = len % (uint32_t)256U;
- uint32_t nb = len / (uint32_t)256U;
- uint32_t rem2 = len % (uint32_t)256U;
- for (uint32_t i = (uint32_t)0U; i < nb; i++) {
- uint8_t *uu____0 = out + i * (uint32_t)256U;
- uint8_t *uu____1 = text + i * (uint32_t)256U;
+ Hacl_Chacha20_Vec128_chacha20_init_128(ctx, key, n1, ctr);
+ uint32_t rem1 = len % ((uint32_t)4U * (uint32_t)64U);
+ uint32_t nb = len / ((uint32_t)4U * (uint32_t)64U);
+ uint32_t rem2 = len % ((uint32_t)4U * (uint32_t)64U);
+ for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0 = i0 + (uint32_t)1U) {
+ uint8_t *uu____0 = out + i0 * (uint32_t)4U * (uint32_t)64U;
+ uint8_t *uu____1 = text + i0 * (uint32_t)256U;
Lib_IntVector_Intrinsics_vec128 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
- chacha20_core_128(k, ctx, i);
+ Hacl_Chacha20_Vec128_chacha20_core_128(k, ctx, i0);
+ Lib_IntVector_Intrinsics_vec128 bl[16U];
+ for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
+ bl[_i] = Lib_IntVector_Intrinsics_vec128_zero;
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128 *os = bl;
+ Lib_IntVector_Intrinsics_vec128
+ x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i * (uint32_t)4U * (uint32_t)4U);
+ os[i] = x;
+ }
Lib_IntVector_Intrinsics_vec128 v00 = k[0U];
Lib_IntVector_Intrinsics_vec128 v16 = k[1U];
Lib_IntVector_Intrinsics_vec128 v20 = k[2U];
@@ -332,22 +345,33 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
k[13U] = v7;
k[14U] = v11;
k[15U] = v15;
- for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
- Lib_IntVector_Intrinsics_vec128
- x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i0 * (uint32_t)16U);
- Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
- Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i0 * (uint32_t)16U, y);
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128 *os = bl;
+ Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_xor(bl[i], k[i]);
+ os[i] = x;
+ }
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i * (uint32_t)16U, bl[i]);
}
}
if (rem2 > (uint32_t)0U) {
- uint8_t *uu____2 = out + nb * (uint32_t)256U;
- uint8_t *uu____3 = text + nb * (uint32_t)256U;
+ uint8_t *uu____2 = out + nb * (uint32_t)4U * (uint32_t)64U;
+ uint8_t *uu____3 = text + nb * (uint32_t)4U * (uint32_t)64U;
uint8_t plain[256U] = { 0U };
- memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
+ memcpy(plain, uu____3, rem1 * sizeof uu____3[0U]);
Lib_IntVector_Intrinsics_vec128 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
- chacha20_core_128(k, ctx, nb);
+ Hacl_Chacha20_Vec128_chacha20_core_128(k, ctx, nb);
+ Lib_IntVector_Intrinsics_vec128 bl[16U];
+ for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
+ bl[_i] = Lib_IntVector_Intrinsics_vec128_zero;
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128 *os = bl;
+ Lib_IntVector_Intrinsics_vec128
+ x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)4U * (uint32_t)4U);
+ os[i] = x;
+ }
Lib_IntVector_Intrinsics_vec128 v00 = k[0U];
Lib_IntVector_Intrinsics_vec128 v16 = k[1U];
Lib_IntVector_Intrinsics_vec128 v20 = k[2U];
@@ -460,13 +484,15 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
k[13U] = v7;
k[14U] = v11;
k[15U] = v15;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
- Lib_IntVector_Intrinsics_vec128
- x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)16U);
- Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
- Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, y);
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128 *os = bl;
+ Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_xor(bl[i], k[i]);
+ os[i] = x;
+ }
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, bl[i]);
}
- memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
+ memcpy(uu____2, plain, rem1 * sizeof plain[0U]);
}
}
@@ -482,17 +508,26 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
Lib_IntVector_Intrinsics_vec128 ctx[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
ctx[_i] = Lib_IntVector_Intrinsics_vec128_zero;
- chacha20_init_128(ctx, key, n1, ctr);
- uint32_t rem1 = len % (uint32_t)256U;
- uint32_t nb = len / (uint32_t)256U;
- uint32_t rem2 = len % (uint32_t)256U;
- for (uint32_t i = (uint32_t)0U; i < nb; i++) {
- uint8_t *uu____0 = out + i * (uint32_t)256U;
- uint8_t *uu____1 = cipher + i * (uint32_t)256U;
+ Hacl_Chacha20_Vec128_chacha20_init_128(ctx, key, n1, ctr);
+ uint32_t rem1 = len % ((uint32_t)4U * (uint32_t)64U);
+ uint32_t nb = len / ((uint32_t)4U * (uint32_t)64U);
+ uint32_t rem2 = len % ((uint32_t)4U * (uint32_t)64U);
+ for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0 = i0 + (uint32_t)1U) {
+ uint8_t *uu____0 = out + i0 * (uint32_t)4U * (uint32_t)64U;
+ uint8_t *uu____1 = cipher + i0 * (uint32_t)256U;
Lib_IntVector_Intrinsics_vec128 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
- chacha20_core_128(k, ctx, i);
+ Hacl_Chacha20_Vec128_chacha20_core_128(k, ctx, i0);
+ Lib_IntVector_Intrinsics_vec128 bl[16U];
+ for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
+ bl[_i] = Lib_IntVector_Intrinsics_vec128_zero;
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128 *os = bl;
+ Lib_IntVector_Intrinsics_vec128
+ x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i * (uint32_t)4U * (uint32_t)4U);
+ os[i] = x;
+ }
Lib_IntVector_Intrinsics_vec128 v00 = k[0U];
Lib_IntVector_Intrinsics_vec128 v16 = k[1U];
Lib_IntVector_Intrinsics_vec128 v20 = k[2U];
@@ -605,22 +640,33 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
k[13U] = v7;
k[14U] = v11;
k[15U] = v15;
- for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)16U; i0++) {
- Lib_IntVector_Intrinsics_vec128
- x = Lib_IntVector_Intrinsics_vec128_load_le(uu____1 + i0 * (uint32_t)16U);
- Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
- Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i0 * (uint32_t)16U, y);
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128 *os = bl;
+ Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_xor(bl[i], k[i]);
+ os[i] = x;
+ }
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128_store_le(uu____0 + i * (uint32_t)16U, bl[i]);
}
}
if (rem2 > (uint32_t)0U) {
- uint8_t *uu____2 = out + nb * (uint32_t)256U;
- uint8_t *uu____3 = cipher + nb * (uint32_t)256U;
+ uint8_t *uu____2 = out + nb * (uint32_t)4U * (uint32_t)64U;
+ uint8_t *uu____3 = cipher + nb * (uint32_t)4U * (uint32_t)64U;
uint8_t plain[256U] = { 0U };
- memcpy(plain, uu____3, rem1 * sizeof(uu____3[0U]));
+ memcpy(plain, uu____3, rem1 * sizeof uu____3[0U]);
Lib_IntVector_Intrinsics_vec128 k[16U];
for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
k[_i] = Lib_IntVector_Intrinsics_vec128_zero;
- chacha20_core_128(k, ctx, nb);
+ Hacl_Chacha20_Vec128_chacha20_core_128(k, ctx, nb);
+ Lib_IntVector_Intrinsics_vec128 bl[16U];
+ for (uint32_t _i = 0U; _i < (uint32_t)16U; ++_i)
+ bl[_i] = Lib_IntVector_Intrinsics_vec128_zero;
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128 *os = bl;
+ Lib_IntVector_Intrinsics_vec128
+ x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)4U * (uint32_t)4U);
+ os[i] = x;
+ }
Lib_IntVector_Intrinsics_vec128 v00 = k[0U];
Lib_IntVector_Intrinsics_vec128 v16 = k[1U];
Lib_IntVector_Intrinsics_vec128 v20 = k[2U];
@@ -733,12 +779,14 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
k[13U] = v7;
k[14U] = v11;
k[15U] = v15;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
- Lib_IntVector_Intrinsics_vec128
- x = Lib_IntVector_Intrinsics_vec128_load_le(plain + i * (uint32_t)16U);
- Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
- Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, y);
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128 *os = bl;
+ Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_xor(bl[i], k[i]);
+ os[i] = x;
+ }
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i = i + (uint32_t)1U) {
+ Lib_IntVector_Intrinsics_vec128_store_le(plain + i * (uint32_t)16U, bl[i]);
}
- memcpy(uu____2, plain, rem1 * sizeof(plain[0U]));
+ memcpy(uu____2, plain, rem1 * sizeof plain[0U]);
}
}
diff --git a/lib/freebl/verified/Hacl_Curve25519_51.c b/lib/freebl/verified/Hacl_Curve25519_51.c
index 1c5bd7b37..6cd48027b 100644
--- a/lib/freebl/verified/Hacl_Curve25519_51.c
+++ b/lib/freebl/verified/Hacl_Curve25519_51.c
@@ -23,8 +23,8 @@
#include "Hacl_Curve25519_51.h"
-static inline void
-fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2)
+inline static void
+Hacl_Impl_Curve25519_Field51_fadd(uint64_t *out, uint64_t *f1, uint64_t *f2)
{
uint64_t f10 = f1[0U];
uint64_t f20 = f2[0U];
@@ -43,8 +43,8 @@ fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2)
out[4U] = f14 + f24;
}
-static inline void
-fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2)
+inline static void
+Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1, uint64_t *f2)
{
uint64_t f10 = f1[0U];
uint64_t f20 = f2[0U];
@@ -63,8 +63,12 @@ fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2)
out[4U] = f14 + (uint64_t)0x3ffffffffffff8U - f24;
}
-static inline void
-fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2)
+inline static void
+Hacl_Impl_Curve25519_Field51_fmul(
+ uint64_t *out,
+ uint64_t *f1,
+ uint64_t *f2,
+ FStar_UInt128_uint128 *uu____2959)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
@@ -141,8 +145,12 @@ fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2)
out[4U] = o4;
}
-static inline void
-fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2)
+inline static void
+Hacl_Impl_Curve25519_Field51_fmul2(
+ uint64_t *out,
+ uint64_t *f1,
+ uint64_t *f2,
+ FStar_UInt128_uint128 *uu____4281)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
@@ -302,8 +310,8 @@ fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2)
out[9U] = o24;
}
-static inline void
-fmul1(uint64_t *out, uint64_t *f1, uint64_t f2)
+inline static void
+Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f1, uint64_t f2)
{
uint64_t f10 = f1[0U];
uint64_t f11 = f1[1U];
@@ -346,8 +354,11 @@ fmul1(uint64_t *out, uint64_t *f1, uint64_t f2)
out[4U] = o4;
}
-static inline void
-fsqr0(uint64_t *out, uint64_t *f)
+inline static void
+Hacl_Impl_Curve25519_Field51_fsqr(
+ uint64_t *out,
+ uint64_t *f,
+ FStar_UInt128_uint128 *uu____6941)
{
uint64_t f0 = f[0U];
uint64_t f1 = f[1U];
@@ -421,8 +432,11 @@ fsqr0(uint64_t *out, uint64_t *f)
out[4U] = o4;
}
-static inline void
-fsqr20(uint64_t *out, uint64_t *f)
+inline static void
+Hacl_Impl_Curve25519_Field51_fsqr2(
+ uint64_t *out,
+ uint64_t *f,
+ FStar_UInt128_uint128 *uu____7692)
{
uint64_t f10 = f[0U];
uint64_t f11 = f[1U];
@@ -577,7 +591,7 @@ fsqr20(uint64_t *out, uint64_t *f)
}
static void
-store_felem(uint64_t *u64s, uint64_t *f)
+Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint64_t *f)
{
uint64_t f0 = f[0U];
uint64_t f1 = f[1U];
@@ -637,21 +651,32 @@ store_felem(uint64_t *u64s, uint64_t *f)
u64s[3U] = o3;
}
-static inline void
-cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2)
+inline static void
+Hacl_Impl_Curve25519_Field51_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2)
{
uint64_t mask = (uint64_t)0U - bit;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)10U; i = i + (uint32_t)1U) {
uint64_t dummy = mask & (p1[i] ^ p2[i]);
p1[i] = p1[i] ^ dummy;
p2[i] = p2[i] ^ dummy;
}
}
-static uint8_t g25519[32U] = { (uint8_t)9U };
+static uint8_t
+ Hacl_Curve25519_51_g25519[32U] =
+ {
+ (uint8_t)9U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
+ (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
+ (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
+ (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U,
+ (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U
+ };
static void
-point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_uint128 *tmp2)
+Hacl_Curve25519_51_point_add_and_double(
+ uint64_t *q,
+ uint64_t *p01_tmp1,
+ FStar_UInt128_uint128 *tmp2)
{
uint64_t *nq = p01_tmp1;
uint64_t *nq_p1 = p01_tmp1 + (uint32_t)10U;
@@ -664,39 +689,39 @@ point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_uint128 *tmp
uint64_t *b = tmp1 + (uint32_t)5U;
uint64_t *ab = tmp1;
uint64_t *dc = tmp1 + (uint32_t)10U;
- fadd0(a, x2, z2);
- fsub0(b, x2, z2);
+ Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
+ Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
uint64_t *x3 = nq_p1;
uint64_t *z31 = nq_p1 + (uint32_t)5U;
uint64_t *d0 = dc;
uint64_t *c0 = dc + (uint32_t)5U;
- fadd0(c0, x3, z31);
- fsub0(d0, x3, z31);
- fmul20(dc, dc, ab);
- fadd0(x3, d0, c0);
- fsub0(z31, d0, c0);
+ Hacl_Impl_Curve25519_Field51_fadd(c0, x3, z31);
+ Hacl_Impl_Curve25519_Field51_fsub(d0, x3, z31);
+ Hacl_Impl_Curve25519_Field51_fmul2(dc, dc, ab, tmp2);
+ Hacl_Impl_Curve25519_Field51_fadd(x3, d0, c0);
+ Hacl_Impl_Curve25519_Field51_fsub(z31, d0, c0);
uint64_t *a1 = tmp1;
uint64_t *b1 = tmp1 + (uint32_t)5U;
uint64_t *d = tmp1 + (uint32_t)10U;
uint64_t *c = tmp1 + (uint32_t)15U;
uint64_t *ab1 = tmp1;
uint64_t *dc1 = tmp1 + (uint32_t)10U;
- fsqr20(dc1, ab1);
- fsqr20(nq_p1, nq_p1);
+ Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab1, tmp2);
+ Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, nq_p1, tmp2);
a1[0U] = c[0U];
a1[1U] = c[1U];
a1[2U] = c[2U];
a1[3U] = c[3U];
a1[4U] = c[4U];
- fsub0(c, d, c);
- fmul1(b1, c, (uint64_t)121665U);
- fadd0(b1, b1, d);
- fmul20(nq, dc1, ab1);
- fmul0(z3, z3, x1);
+ Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
+ Hacl_Impl_Curve25519_Field51_fmul1(b1, c, (uint64_t)121665U);
+ Hacl_Impl_Curve25519_Field51_fadd(b1, b1, d);
+ Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2);
+ Hacl_Impl_Curve25519_Field51_fmul(z3, z3, x1, tmp2);
}
static void
-point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
+Hacl_Curve25519_51_point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
{
uint64_t *x2 = nq;
uint64_t *z2 = nq + (uint32_t)5U;
@@ -706,22 +731,22 @@ point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
uint64_t *c = tmp1 + (uint32_t)15U;
uint64_t *ab = tmp1;
uint64_t *dc = tmp1 + (uint32_t)10U;
- fadd0(a, x2, z2);
- fsub0(b, x2, z2);
- fsqr20(dc, ab);
+ Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
+ Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
+ Hacl_Impl_Curve25519_Field51_fsqr2(dc, ab, tmp2);
a[0U] = c[0U];
a[1U] = c[1U];
a[2U] = c[2U];
a[3U] = c[3U];
a[4U] = c[4U];
- fsub0(c, d, c);
- fmul1(b, c, (uint64_t)121665U);
- fadd0(b, b, d);
- fmul20(nq, dc, ab);
+ Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
+ Hacl_Impl_Curve25519_Field51_fmul1(b, c, (uint64_t)121665U);
+ Hacl_Impl_Curve25519_Field51_fadd(b, b, d);
+ Hacl_Impl_Curve25519_Field51_fmul2(nq, dc, ab, tmp2);
}
static void
-montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
+Hacl_Curve25519_51_montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
{
FStar_UInt128_uint128 tmp2[10U];
for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
@@ -731,7 +756,7 @@ montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
uint64_t *p01 = p01_tmp1_swap;
uint64_t *p03 = p01;
uint64_t *p11 = p01 + (uint32_t)10U;
- memcpy(p11, init1, (uint32_t)10U * sizeof(init1[0U]));
+ memcpy(p11, init1, (uint32_t)10U * sizeof init1[0U]);
uint64_t *x0 = p03;
uint64_t *z0 = p03 + (uint32_t)5U;
x0[0U] = (uint64_t)1U;
@@ -749,10 +774,10 @@ montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
uint64_t *nq1 = p01_tmp1_swap;
uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)10U;
uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U;
- cswap20((uint64_t)1U, nq1, nq_p11);
- point_add_and_double(init1, p01_tmp11, tmp2);
+ Hacl_Impl_Curve25519_Field51_cswap2((uint64_t)1U, nq1, nq_p11);
+ Hacl_Curve25519_51_point_add_and_double(init1, p01_tmp11, tmp2);
swap1[0U] = (uint64_t)1U;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i = i + (uint32_t)1U) {
uint64_t *p01_tmp12 = p01_tmp1_swap;
uint64_t *swap2 = p01_tmp1_swap + (uint32_t)40U;
uint64_t *nq2 = p01_tmp12;
@@ -761,31 +786,35 @@ montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init1)
bit =
(uint64_t)(key[((uint32_t)253U - i) / (uint32_t)8U] >> ((uint32_t)253U - i) % (uint32_t)8U & (uint8_t)1U);
uint64_t sw = swap2[0U] ^ bit;
- cswap20(sw, nq2, nq_p12);
- point_add_and_double(init1, p01_tmp12, tmp2);
+ Hacl_Impl_Curve25519_Field51_cswap2(sw, nq2, nq_p12);
+ Hacl_Curve25519_51_point_add_and_double(init1, p01_tmp12, tmp2);
swap2[0U] = bit;
}
uint64_t sw = swap1[0U];
- cswap20(sw, nq1, nq_p11);
+ Hacl_Impl_Curve25519_Field51_cswap2(sw, nq1, nq_p11);
uint64_t *nq10 = p01_tmp1;
uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
- point_double(nq10, tmp1, tmp2);
- point_double(nq10, tmp1, tmp2);
- point_double(nq10, tmp1, tmp2);
- memcpy(out, p0, (uint32_t)10U * sizeof(p0[0U]));
+ Hacl_Curve25519_51_point_double(nq10, tmp1, tmp2);
+ Hacl_Curve25519_51_point_double(nq10, tmp1, tmp2);
+ Hacl_Curve25519_51_point_double(nq10, tmp1, tmp2);
+ memcpy(out, p0, (uint32_t)10U * sizeof p0[0U]);
}
static void
-fsquare_times(uint64_t *o, uint64_t *inp, FStar_UInt128_uint128 *tmp, uint32_t n1)
+Hacl_Curve25519_51_fsquare_times(
+ uint64_t *o,
+ uint64_t *inp,
+ FStar_UInt128_uint128 *tmp,
+ uint32_t n1)
{
- fsqr0(o, inp);
- for (uint32_t i = (uint32_t)0U; i < n1 - (uint32_t)1U; i++) {
- fsqr0(o, o);
+ Hacl_Impl_Curve25519_Field51_fsqr(o, inp, tmp);
+ for (uint32_t i = (uint32_t)0U; i < n1 - (uint32_t)1U; i = i + (uint32_t)1U) {
+ Hacl_Impl_Curve25519_Field51_fsqr(o, o, tmp);
}
}
static void
-finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tmp)
+Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tmp)
{
uint64_t t1[20U] = { 0U };
uint64_t *a = t1;
@@ -793,34 +822,34 @@ finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tmp)
uint64_t *c = t1 + (uint32_t)10U;
uint64_t *t00 = t1 + (uint32_t)15U;
FStar_UInt128_uint128 *tmp1 = tmp;
- fsquare_times(a, i, tmp1, (uint32_t)1U);
- fsquare_times(t00, a, tmp1, (uint32_t)2U);
- fmul0(b, t00, i);
- fmul0(a, b, a);
- fsquare_times(t00, a, tmp1, (uint32_t)1U);
- fmul0(b, t00, b);
- fsquare_times(t00, b, tmp1, (uint32_t)5U);
- fmul0(b, t00, b);
- fsquare_times(t00, b, tmp1, (uint32_t)10U);
- fmul0(c, t00, b);
- fsquare_times(t00, c, tmp1, (uint32_t)20U);
- fmul0(t00, t00, c);
- fsquare_times(t00, t00, tmp1, (uint32_t)10U);
- fmul0(b, t00, b);
- fsquare_times(t00, b, tmp1, (uint32_t)50U);
- fmul0(c, t00, b);
- fsquare_times(t00, c, tmp1, (uint32_t)100U);
- fmul0(t00, t00, c);
- fsquare_times(t00, t00, tmp1, (uint32_t)50U);
- fmul0(t00, t00, b);
- fsquare_times(t00, t00, tmp1, (uint32_t)5U);
+ Hacl_Curve25519_51_fsquare_times(a, i, tmp1, (uint32_t)1U);
+ Hacl_Curve25519_51_fsquare_times(t00, a, tmp1, (uint32_t)2U);
+ Hacl_Impl_Curve25519_Field51_fmul(b, t00, i, tmp);
+ Hacl_Impl_Curve25519_Field51_fmul(a, b, a, tmp);
+ Hacl_Curve25519_51_fsquare_times(t00, a, tmp1, (uint32_t)1U);
+ Hacl_Impl_Curve25519_Field51_fmul(b, t00, b, tmp);
+ Hacl_Curve25519_51_fsquare_times(t00, b, tmp1, (uint32_t)5U);
+ Hacl_Impl_Curve25519_Field51_fmul(b, t00, b, tmp);
+ Hacl_Curve25519_51_fsquare_times(t00, b, tmp1, (uint32_t)10U);
+ Hacl_Impl_Curve25519_Field51_fmul(c, t00, b, tmp);
+ Hacl_Curve25519_51_fsquare_times(t00, c, tmp1, (uint32_t)20U);
+ Hacl_Impl_Curve25519_Field51_fmul(t00, t00, c, tmp);
+ Hacl_Curve25519_51_fsquare_times(t00, t00, tmp1, (uint32_t)10U);
+ Hacl_Impl_Curve25519_Field51_fmul(b, t00, b, tmp);
+ Hacl_Curve25519_51_fsquare_times(t00, b, tmp1, (uint32_t)50U);
+ Hacl_Impl_Curve25519_Field51_fmul(c, t00, b, tmp);
+ Hacl_Curve25519_51_fsquare_times(t00, c, tmp1, (uint32_t)100U);
+ Hacl_Impl_Curve25519_Field51_fmul(t00, t00, c, tmp);
+ Hacl_Curve25519_51_fsquare_times(t00, t00, tmp1, (uint32_t)50U);
+ Hacl_Impl_Curve25519_Field51_fmul(t00, t00, b, tmp);
+ Hacl_Curve25519_51_fsquare_times(t00, t00, tmp1, (uint32_t)5U);
uint64_t *a0 = t1;
uint64_t *t0 = t1 + (uint32_t)15U;
- fmul0(o, t0, a0);
+ Hacl_Impl_Curve25519_Field51_fmul(o, t0, a0, tmp);
}
static void
-encode_point(uint8_t *o, uint64_t *i)
+Hacl_Curve25519_51_encode_point(uint8_t *o, uint64_t *i)
{
uint64_t *x = i;
uint64_t *z = i + (uint32_t)5U;
@@ -829,10 +858,10 @@ encode_point(uint8_t *o, uint64_t *i)
FStar_UInt128_uint128 tmp_w[10U];
for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
tmp_w[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
- finv(tmp, z, tmp_w);
- fmul0(tmp, tmp, x);
- store_felem(u64s, tmp);
- for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0++) {
+ Hacl_Curve25519_51_finv(tmp, z, tmp_w);
+ Hacl_Impl_Curve25519_Field51_fmul(tmp, tmp, x, tmp_w);
+ Hacl_Impl_Curve25519_Field51_store_felem(u64s, tmp);
+ for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0 = i0 + (uint32_t)1U) {
store64_le(o + i0 * (uint32_t)8U, u64s[i0]);
}
}
@@ -842,7 +871,7 @@ Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
{
uint64_t init1[10U] = { 0U };
uint64_t tmp[4U] = { 0U };
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)4U; i = i + (uint32_t)1U) {
uint64_t *os = tmp;
uint8_t *bj = pub + i * (uint32_t)8U;
uint64_t u = load64_le(bj);
@@ -872,17 +901,17 @@ Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
x[2U] = f1h | f2l;
x[3U] = f2h | f3l;
x[4U] = f3h;
- montgomery_ladder(init1, priv, init1);
- encode_point(out, init1);
+ Hacl_Curve25519_51_montgomery_ladder(init1, priv, init1);
+ Hacl_Curve25519_51_encode_point(out, init1);
}
void
Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv)
{
uint8_t basepoint[32U] = { 0U };
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U) {
uint8_t *os = basepoint;
- uint8_t x = g25519[i];
+ uint8_t x = Hacl_Curve25519_51_g25519[i];
os[i] = x;
}
Hacl_Curve25519_51_scalarmult(pub, priv, basepoint);
@@ -894,7 +923,7 @@ Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
uint8_t zeros1[32U] = { 0U };
Hacl_Curve25519_51_scalarmult(out, priv, pub);
uint8_t res = (uint8_t)255U;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++) {
+ for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i = i + (uint32_t)1U) {
uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros1[i]);
res = uu____0 & res;
}
diff --git a/lib/freebl/verified/Hacl_Kremlib.h b/lib/freebl/verified/Hacl_Kremlib.h
index a2116220f..675fe4417 100644
--- a/lib/freebl/verified/Hacl_Kremlib.h
+++ b/lib/freebl/verified/Hacl_Kremlib.h
@@ -29,23 +29,23 @@
#ifndef __Hacl_Kremlib_H
#define __Hacl_Kremlib_H
-static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b);
+inline static uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b);
-static inline uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b);
+inline static uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b);
-static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b);
+inline static uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s);
-static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
+inline static FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
-static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
+inline static uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
-static inline FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
+inline static FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
#define __Hacl_Kremlib_H_DEFINED
#endif
diff --git a/lib/freebl/verified/Hacl_Poly1305_128.c b/lib/freebl/verified/Hacl_Poly1305_128.c
index e6293ba3e..2d1af2376 100644
--- a/lib/freebl/verified/Hacl_Poly1305_128.c
+++ b/lib/freebl/verified/Hacl_Poly1305_128.c
@@ -23,7 +23,7 @@
#include "Hacl_Poly1305_128.h"
-inline void
+void
Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b)
{
Lib_IntVector_Intrinsics_vec128 e[5U];
@@ -108,7 +108,7 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
acc[4U] = acc41;
}
-inline void
+void
Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
Lib_IntVector_Intrinsics_vec128 *out,
Lib_IntVector_Intrinsics_vec128 *p)
@@ -239,47 +239,61 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
Lib_IntVector_Intrinsics_vec128 t3 = a35;
Lib_IntVector_Intrinsics_vec128 t4 = a45;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l0 = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
+ tmp00 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t1, c00);
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ tmp10 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t2, c10);
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ tmp20 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t3, c20);
+ Lib_IntVector_Intrinsics_vec128
+ tmp30 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l4 = Lib_IntVector_Intrinsics_vec128_add64(t4, c30);
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ tmp40 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o0 = x02;
- Lib_IntVector_Intrinsics_vec128 o10 = x12;
- Lib_IntVector_Intrinsics_vec128 o20 = x21;
- Lib_IntVector_Intrinsics_vec128 o30 = x32;
- Lib_IntVector_Intrinsics_vec128 o40 = x42;
+ c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128
+ l5 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp00,
+ Lib_IntVector_Intrinsics_vec128_smul64(c40, (uint64_t)5U));
+ Lib_IntVector_Intrinsics_vec128
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l5,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c50 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp10, c50);
+ Lib_IntVector_Intrinsics_vec128 o00 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o10 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o20 = tmp20;
+ Lib_IntVector_Intrinsics_vec128 o30 = tmp30;
+ Lib_IntVector_Intrinsics_vec128 o40 = tmp40;
Lib_IntVector_Intrinsics_vec128
o01 =
- Lib_IntVector_Intrinsics_vec128_add64(o0,
- Lib_IntVector_Intrinsics_vec128_interleave_high64(o0, o0));
+ Lib_IntVector_Intrinsics_vec128_add64(o00,
+ Lib_IntVector_Intrinsics_vec128_interleave_high64(o00, o00));
Lib_IntVector_Intrinsics_vec128
o11 =
Lib_IntVector_Intrinsics_vec128_add64(o10,
@@ -304,43 +318,50 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(o11, c0);
+ Lib_IntVector_Intrinsics_vec128 l6 = Lib_IntVector_Intrinsics_vec128_add64(o11, c0);
Lib_IntVector_Intrinsics_vec128
tmp1 =
- Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_and(l6,
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(o21, c1);
+ c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l7 = Lib_IntVector_Intrinsics_vec128_add64(o21, c1);
Lib_IntVector_Intrinsics_vec128
tmp2 =
- Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_and(l7,
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(o31, c2);
+ c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l8 = Lib_IntVector_Intrinsics_vec128_add64(o31, c2);
Lib_IntVector_Intrinsics_vec128
tmp3 =
- Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_and(l8,
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(o41, c3);
+ c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l9 = Lib_IntVector_Intrinsics_vec128_add64(o41, c3);
Lib_IntVector_Intrinsics_vec128
tmp4 =
- Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_and(l9,
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l9, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
- o00 =
+ l10 =
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
- Lib_IntVector_Intrinsics_vec128 o1 = tmp1;
+ Lib_IntVector_Intrinsics_vec128
+ tmp0_ =
+ Lib_IntVector_Intrinsics_vec128_and(l10,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l10, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 o0 = tmp0_;
+ Lib_IntVector_Intrinsics_vec128 o1 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
- out[0U] = o00;
+ out[0U] = o0;
out[1U] = o1;
out[2U] = o2;
out[3U] = o3;
@@ -514,43 +535,57 @@ Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k
Lib_IntVector_Intrinsics_vec128 t3 = a34;
Lib_IntVector_Intrinsics_vec128 t4 = a44;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c0);
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
+ Lib_IntVector_Intrinsics_vec128
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
+ Lib_IntVector_Intrinsics_vec128
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
+ Lib_IntVector_Intrinsics_vec128
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o0 = x02;
- Lib_IntVector_Intrinsics_vec128 o1 = x12;
- Lib_IntVector_Intrinsics_vec128 o2 = x21;
- Lib_IntVector_Intrinsics_vec128 o3 = x32;
- Lib_IntVector_Intrinsics_vec128 o4 = x42;
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
rn[0U] = o0;
rn[1U] = o1;
rn[2U] = o2;
@@ -736,43 +771,57 @@ Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t0, Lib_IntVector_Intrinsics_vec128_zero);
+ Lib_IntVector_Intrinsics_vec128
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
+ c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c0);
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o0 = x02;
- Lib_IntVector_Intrinsics_vec128 o1 = x12;
- Lib_IntVector_Intrinsics_vec128 o2 = x21;
- Lib_IntVector_Intrinsics_vec128 o3 = x32;
- Lib_IntVector_Intrinsics_vec128 o4 = x42;
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+ Lib_IntVector_Intrinsics_vec128
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -798,7 +847,7 @@ Hacl_Poly1305_128_poly1305_update(
uint32_t len1 = len0 - bs;
uint8_t *text1 = t0 + bs;
uint32_t nb = len1 / bs;
- for (uint32_t i = (uint32_t)0U; i < nb; i++) {
+ for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
uint8_t *block = text1 + i * bs;
Lib_IntVector_Intrinsics_vec128 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
@@ -953,43 +1002,57 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128 t3 = a34;
Lib_IntVector_Intrinsics_vec128 t4 = a44;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t1, c0);
+ Lib_IntVector_Intrinsics_vec128
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
+ Lib_IntVector_Intrinsics_vec128
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o00 = x02;
- Lib_IntVector_Intrinsics_vec128 o10 = x12;
- Lib_IntVector_Intrinsics_vec128 o20 = x21;
- Lib_IntVector_Intrinsics_vec128 o30 = x32;
- Lib_IntVector_Intrinsics_vec128 o40 = x42;
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o00 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o10 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o20 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o30 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o40 = tmp4;
acc[0U] = o00;
acc[1U] = o10;
acc[2U] = o20;
@@ -1022,7 +1085,7 @@ Hacl_Poly1305_128_poly1305_update(
uint8_t *t1 = text + len0;
uint32_t nb = len1 / (uint32_t)16U;
uint32_t rem1 = len1 % (uint32_t)16U;
- for (uint32_t i = (uint32_t)0U; i < nb; i++) {
+ for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
uint8_t *block = t1 + i * (uint32_t)16U;
Lib_IntVector_Intrinsics_vec128 e[5U];
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
@@ -1187,43 +1250,57 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
+ Lib_IntVector_Intrinsics_vec128
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t11, c0);
+ Lib_IntVector_Intrinsics_vec128
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
+ Lib_IntVector_Intrinsics_vec128
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
+ c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o0 = x02;
- Lib_IntVector_Intrinsics_vec128 o1 = x12;
- Lib_IntVector_Intrinsics_vec128 o2 = x21;
- Lib_IntVector_Intrinsics_vec128 o3 = x32;
- Lib_IntVector_Intrinsics_vec128 o4 = x42;
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -1236,7 +1313,7 @@ Hacl_Poly1305_128_poly1305_update(
for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
e[_i] = Lib_IntVector_Intrinsics_vec128_zero;
uint8_t tmp[16U] = { 0U };
- memcpy(tmp, last1, rem1 * sizeof(last1[0U]));
+ memcpy(tmp, last1, rem1 * sizeof last1[0U]);
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@@ -1397,43 +1474,57 @@ Hacl_Poly1305_128_poly1305_update(
Lib_IntVector_Intrinsics_vec128 t3 = a36;
Lib_IntVector_Intrinsics_vec128 t4 = a46;
Lib_IntVector_Intrinsics_vec128
- mask261 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
+ l = Lib_IntVector_Intrinsics_vec128_add64(t01, Lib_IntVector_Intrinsics_vec128_zero);
+ Lib_IntVector_Intrinsics_vec128
+ tmp0 =
+ Lib_IntVector_Intrinsics_vec128_and(l,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(t11, c0);
+ Lib_IntVector_Intrinsics_vec128
+ tmp1 =
+ Lib_IntVector_Intrinsics_vec128_and(l0,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(t2, c1);
+ Lib_IntVector_Intrinsics_vec128
+ tmp2 =
+ Lib_IntVector_Intrinsics_vec128_and(l1,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
+ c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(t3, c2);
Lib_IntVector_Intrinsics_vec128
- z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask261);
- Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask261);
- Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
- Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
+ tmp3 =
+ Lib_IntVector_Intrinsics_vec128_and(l2,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
+ c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(t4, c3);
Lib_IntVector_Intrinsics_vec128
- z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
+ tmp4 =
+ Lib_IntVector_Intrinsics_vec128_and(l3,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
- Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
- Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask261);
- Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask261);
- Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
- Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
- z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
+ l4 =
+ Lib_IntVector_Intrinsics_vec128_add64(tmp0,
+ Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
Lib_IntVector_Intrinsics_vec128
- z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask261);
- Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask261);
- Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
- Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
+ tmp01 =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask261);
- Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
- Lib_IntVector_Intrinsics_vec128 o0 = x02;
- Lib_IntVector_Intrinsics_vec128 o1 = x12;
- Lib_IntVector_Intrinsics_vec128 o2 = x21;
- Lib_IntVector_Intrinsics_vec128 o3 = x32;
- Lib_IntVector_Intrinsics_vec128 o4 = x42;
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 tmp11 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 o0 = tmp01;
+ Lib_IntVector_Intrinsics_vec128 o1 = tmp11;
+ Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -1452,126 +1543,89 @@ Hacl_Poly1305_128_poly1305_finish(
Lib_IntVector_Intrinsics_vec128 *acc = ctx;
uint8_t *ks = key + (uint32_t)16U;
Lib_IntVector_Intrinsics_vec128 f0 = acc[0U];
- Lib_IntVector_Intrinsics_vec128 f13 = acc[1U];
- Lib_IntVector_Intrinsics_vec128 f23 = acc[2U];
- Lib_IntVector_Intrinsics_vec128 f33 = acc[3U];
+ Lib_IntVector_Intrinsics_vec128 f12 = acc[1U];
+ Lib_IntVector_Intrinsics_vec128 f22 = acc[2U];
+ Lib_IntVector_Intrinsics_vec128 f32 = acc[3U];
Lib_IntVector_Intrinsics_vec128 f40 = acc[4U];
Lib_IntVector_Intrinsics_vec128
- l0 = Lib_IntVector_Intrinsics_vec128_add64(f0, Lib_IntVector_Intrinsics_vec128_zero);
- Lib_IntVector_Intrinsics_vec128
- tmp00 =
- Lib_IntVector_Intrinsics_vec128_and(l0,
- Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
- Lib_IntVector_Intrinsics_vec128
- c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(f13, c00);
- Lib_IntVector_Intrinsics_vec128
- tmp10 =
- Lib_IntVector_Intrinsics_vec128_and(l1,
- Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
- Lib_IntVector_Intrinsics_vec128
- c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(f23, c10);
- Lib_IntVector_Intrinsics_vec128
- tmp20 =
- Lib_IntVector_Intrinsics_vec128_and(l2,
- Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
- Lib_IntVector_Intrinsics_vec128
- c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(f33, c20);
- Lib_IntVector_Intrinsics_vec128
- tmp30 =
- Lib_IntVector_Intrinsics_vec128_and(l3,
- Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
- Lib_IntVector_Intrinsics_vec128
- c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l4 = Lib_IntVector_Intrinsics_vec128_add64(f40, c30);
- Lib_IntVector_Intrinsics_vec128
- tmp40 =
- Lib_IntVector_Intrinsics_vec128_and(l4,
- Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
- Lib_IntVector_Intrinsics_vec128
- c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128
- f010 =
- Lib_IntVector_Intrinsics_vec128_add64(tmp00,
- Lib_IntVector_Intrinsics_vec128_smul64(c40, (uint64_t)5U));
- Lib_IntVector_Intrinsics_vec128 f110 = tmp10;
- Lib_IntVector_Intrinsics_vec128 f210 = tmp20;
- Lib_IntVector_Intrinsics_vec128 f310 = tmp30;
- Lib_IntVector_Intrinsics_vec128 f410 = tmp40;
- Lib_IntVector_Intrinsics_vec128
- l = Lib_IntVector_Intrinsics_vec128_add64(f010, Lib_IntVector_Intrinsics_vec128_zero);
+ l = Lib_IntVector_Intrinsics_vec128_add64(f0, Lib_IntVector_Intrinsics_vec128_zero);
Lib_IntVector_Intrinsics_vec128
tmp0 =
Lib_IntVector_Intrinsics_vec128_and(l,
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l5 = Lib_IntVector_Intrinsics_vec128_add64(f110, c0);
+ Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(f12, c0);
Lib_IntVector_Intrinsics_vec128
tmp1 =
- Lib_IntVector_Intrinsics_vec128_and(l5,
+ Lib_IntVector_Intrinsics_vec128_and(l0,
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l6 = Lib_IntVector_Intrinsics_vec128_add64(f210, c1);
+ c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(f22, c1);
Lib_IntVector_Intrinsics_vec128
tmp2 =
- Lib_IntVector_Intrinsics_vec128_and(l6,
+ Lib_IntVector_Intrinsics_vec128_and(l1,
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l7 = Lib_IntVector_Intrinsics_vec128_add64(f310, c2);
+ c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(f32, c2);
Lib_IntVector_Intrinsics_vec128
tmp3 =
- Lib_IntVector_Intrinsics_vec128_and(l7,
+ Lib_IntVector_Intrinsics_vec128_and(l2,
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, (uint32_t)26U);
- Lib_IntVector_Intrinsics_vec128 l8 = Lib_IntVector_Intrinsics_vec128_add64(f410, c3);
+ c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(f40, c3);
Lib_IntVector_Intrinsics_vec128
tmp4 =
- Lib_IntVector_Intrinsics_vec128_and(l8,
+ Lib_IntVector_Intrinsics_vec128_and(l3,
Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
Lib_IntVector_Intrinsics_vec128
- c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, (uint32_t)26U);
+ c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
Lib_IntVector_Intrinsics_vec128
- f02 =
+ l4 =
Lib_IntVector_Intrinsics_vec128_add64(tmp0,
Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
- Lib_IntVector_Intrinsics_vec128 f12 = tmp1;
- Lib_IntVector_Intrinsics_vec128 f22 = tmp2;
- Lib_IntVector_Intrinsics_vec128 f32 = tmp3;
- Lib_IntVector_Intrinsics_vec128 f42 = tmp4;
+ Lib_IntVector_Intrinsics_vec128
+ tmp0_ =
+ Lib_IntVector_Intrinsics_vec128_and(l4,
+ Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+ Lib_IntVector_Intrinsics_vec128
+ c5 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+ Lib_IntVector_Intrinsics_vec128 f010 = tmp0_;
+ Lib_IntVector_Intrinsics_vec128 f110 = Lib_IntVector_Intrinsics_vec128_add64(tmp1, c5);
+ Lib_IntVector_Intrinsics_vec128 f210 = tmp2;
+ Lib_IntVector_Intrinsics_vec128 f310 = tmp3;
+ Lib_IntVector_Intrinsics_vec128 f410 = tmp4;
Lib_IntVector_Intrinsics_vec128
mh = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
Lib_IntVector_Intrinsics_vec128
ml = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffffbU);
- Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_eq64(f42, mh);
+ Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_eq64(f410, mh);
Lib_IntVector_Intrinsics_vec128
mask1 =
Lib_IntVector_Intrinsics_vec128_and(mask,
- Lib_IntVector_Intrinsics_vec128_eq64(f32, mh));
+ Lib_IntVector_Intrinsics_vec128_eq64(f310, mh));
Lib_IntVector_Intrinsics_vec128
mask2 =
Lib_IntVector_Intrinsics_vec128_and(mask1,
- Lib_IntVector_Intrinsics_vec128_eq64(f22, mh));
+ Lib_IntVector_Intrinsics_vec128_eq64(f210, mh));
Lib_IntVector_Intrinsics_vec128
mask3 =
Lib_IntVector_Intrinsics_vec128_and(mask2,
- Lib_IntVector_Intrinsics_vec128_eq64(f12, mh));
+ Lib_IntVector_Intrinsics_vec128_eq64(f110, mh));
Lib_IntVector_Intrinsics_vec128
mask4 =
Lib_IntVector_Intrinsics_vec128_and(mask3,
- Lib_IntVector_Intrinsics_vec128_lognot(Lib_IntVector_Intrinsics_vec128_gt64(ml, f02)));
+ Lib_IntVector_Intrinsics_vec128_lognot(Lib_IntVector_Intrinsics_vec128_gt64(ml, f010)));
Lib_IntVector_Intrinsics_vec128 ph = Lib_IntVector_Intrinsics_vec128_and(mask4, mh);
Lib_IntVector_Intrinsics_vec128 pl = Lib_IntVector_Intrinsics_vec128_and(mask4, ml);
- Lib_IntVector_Intrinsics_vec128 o0 = Lib_IntVector_Intrinsics_vec128_sub64(f02, pl);
- Lib_IntVector_Intrinsics_vec128 o1 = Lib_IntVector_Intrinsics_vec128_sub64(f12, ph);
- Lib_IntVector_Intrinsics_vec128 o2 = Lib_IntVector_Intrinsics_vec128_sub64(f22, ph);
- Lib_IntVector_Intrinsics_vec128 o3 = Lib_IntVector_Intrinsics_vec128_sub64(f32, ph);
- Lib_IntVector_Intrinsics_vec128 o4 = Lib_IntVector_Intrinsics_vec128_sub64(f42, ph);
+ Lib_IntVector_Intrinsics_vec128 o0 = Lib_IntVector_Intrinsics_vec128_sub64(f010, pl);
+ Lib_IntVector_Intrinsics_vec128 o1 = Lib_IntVector_Intrinsics_vec128_sub64(f110, ph);
+ Lib_IntVector_Intrinsics_vec128 o2 = Lib_IntVector_Intrinsics_vec128_sub64(f210, ph);
+ Lib_IntVector_Intrinsics_vec128 o3 = Lib_IntVector_Intrinsics_vec128_sub64(f310, ph);
+ Lib_IntVector_Intrinsics_vec128 o4 = Lib_IntVector_Intrinsics_vec128_sub64(f410, ph);
Lib_IntVector_Intrinsics_vec128 f011 = o0;
Lib_IntVector_Intrinsics_vec128 f111 = o1;
Lib_IntVector_Intrinsics_vec128 f211 = o2;
diff --git a/lib/freebl/verified/Hacl_Poly1305_32.c b/lib/freebl/verified/Hacl_Poly1305_32.c
index 0ed0174ea..a447d8307 100644
--- a/lib/freebl/verified/Hacl_Poly1305_32.c
+++ b/lib/freebl/verified/Hacl_Poly1305_32.c
@@ -174,35 +174,30 @@ Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
- uint64_t mask261 = (uint64_t)0x3ffffffU;
- uint64_t z0 = t0 >> (uint32_t)26U;
- uint64_t z1 = t3 >> (uint32_t)26U;
- uint64_t x0 = t0 & mask261;
- uint64_t x3 = t3 & mask261;
- uint64_t x1 = t1 + z0;
- uint64_t x4 = t4 + z1;
- uint64_t z01 = x1 >> (uint32_t)26U;
- uint64_t z11 = x4 >> (uint32_t)26U;
- uint64_t t = z11 << (uint32_t)2U;
- uint64_t z12 = z11 + t;
- uint64_t x11 = x1 & mask261;
- uint64_t x41 = x4 & mask261;
- uint64_t x2 = t2 + z01;
- uint64_t x01 = x0 + z12;
- uint64_t z02 = x2 >> (uint32_t)26U;
- uint64_t z13 = x01 >> (uint32_t)26U;
- uint64_t x21 = x2 & mask261;
- uint64_t x02 = x01 & mask261;
- uint64_t x31 = x3 + z02;
- uint64_t x12 = x11 + z13;
- uint64_t z03 = x31 >> (uint32_t)26U;
- uint64_t x32 = x31 & mask261;
- uint64_t x42 = x41 + z03;
- uint64_t o0 = x02;
- uint64_t o1 = x12;
- uint64_t o2 = x21;
- uint64_t o3 = x32;
- uint64_t o4 = x42;
+ uint64_t l = t0 + (uint64_t)0U;
+ uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
+ uint64_t c0 = l >> (uint32_t)26U;
+ uint64_t l0 = t1 + c0;
+ uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
+ uint64_t c1 = l0 >> (uint32_t)26U;
+ uint64_t l1 = t2 + c1;
+ uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
+ uint64_t c2 = l1 >> (uint32_t)26U;
+ uint64_t l2 = t3 + c2;
+ uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
+ uint64_t c3 = l2 >> (uint32_t)26U;
+ uint64_t l3 = t4 + c3;
+ uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
+ uint64_t c4 = l3 >> (uint32_t)26U;
+ uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
+ uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
+ uint64_t c5 = l4 >> (uint32_t)26U;
+ uint64_t tmp11 = tmp1 + c5;
+ uint64_t o0 = tmp01;
+ uint64_t o1 = tmp11;
+ uint64_t o2 = tmp2;
+ uint64_t o3 = tmp3;
+ uint64_t o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -217,7 +212,7 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t *acc = ctx;
uint32_t nb = len / (uint32_t)16U;
uint32_t rem1 = len % (uint32_t)16U;
- for (uint32_t i = (uint32_t)0U; i < nb; i++) {
+ for (uint32_t i = (uint32_t)0U; i < nb; i = i + (uint32_t)1U) {
uint8_t *block = text + i * (uint32_t)16U;
uint64_t e[5U] = { 0U };
uint64_t u0 = load64_le(block);
@@ -301,35 +296,30 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
- uint64_t mask261 = (uint64_t)0x3ffffffU;
- uint64_t z0 = t0 >> (uint32_t)26U;
- uint64_t z1 = t3 >> (uint32_t)26U;
- uint64_t x0 = t0 & mask261;
- uint64_t x3 = t3 & mask261;
- uint64_t x1 = t1 + z0;
- uint64_t x4 = t4 + z1;
- uint64_t z01 = x1 >> (uint32_t)26U;
- uint64_t z11 = x4 >> (uint32_t)26U;
- uint64_t t = z11 << (uint32_t)2U;
- uint64_t z12 = z11 + t;
- uint64_t x11 = x1 & mask261;
- uint64_t x41 = x4 & mask261;
- uint64_t x2 = t2 + z01;
- uint64_t x01 = x0 + z12;
- uint64_t z02 = x2 >> (uint32_t)26U;
- uint64_t z13 = x01 >> (uint32_t)26U;
- uint64_t x21 = x2 & mask261;
- uint64_t x02 = x01 & mask261;
- uint64_t x31 = x3 + z02;
- uint64_t x12 = x11 + z13;
- uint64_t z03 = x31 >> (uint32_t)26U;
- uint64_t x32 = x31 & mask261;
- uint64_t x42 = x41 + z03;
- uint64_t o0 = x02;
- uint64_t o1 = x12;
- uint64_t o2 = x21;
- uint64_t o3 = x32;
- uint64_t o4 = x42;
+ uint64_t l = t0 + (uint64_t)0U;
+ uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
+ uint64_t c0 = l >> (uint32_t)26U;
+ uint64_t l0 = t1 + c0;
+ uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
+ uint64_t c1 = l0 >> (uint32_t)26U;
+ uint64_t l1 = t2 + c1;
+ uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
+ uint64_t c2 = l1 >> (uint32_t)26U;
+ uint64_t l2 = t3 + c2;
+ uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
+ uint64_t c3 = l2 >> (uint32_t)26U;
+ uint64_t l3 = t4 + c3;
+ uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
+ uint64_t c4 = l3 >> (uint32_t)26U;
+ uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
+ uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
+ uint64_t c5 = l4 >> (uint32_t)26U;
+ uint64_t tmp11 = tmp1 + c5;
+ uint64_t o0 = tmp01;
+ uint64_t o1 = tmp11;
+ uint64_t o2 = tmp2;
+ uint64_t o3 = tmp3;
+ uint64_t o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -340,7 +330,7 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
uint8_t *last1 = text + nb * (uint32_t)16U;
uint64_t e[5U] = { 0U };
uint8_t tmp[16U] = { 0U };
- memcpy(tmp, last1, rem1 * sizeof(last1[0U]));
+ memcpy(tmp, last1, rem1 * sizeof last1[0U]);
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@@ -422,35 +412,30 @@ Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
uint64_t t2 = a26;
uint64_t t3 = a36;
uint64_t t4 = a46;
- uint64_t mask261 = (uint64_t)0x3ffffffU;
- uint64_t z0 = t0 >> (uint32_t)26U;
- uint64_t z1 = t3 >> (uint32_t)26U;
- uint64_t x0 = t0 & mask261;
- uint64_t x3 = t3 & mask261;
- uint64_t x1 = t1 + z0;
- uint64_t x4 = t4 + z1;
- uint64_t z01 = x1 >> (uint32_t)26U;
- uint64_t z11 = x4 >> (uint32_t)26U;
- uint64_t t = z11 << (uint32_t)2U;
- uint64_t z12 = z11 + t;
- uint64_t x11 = x1 & mask261;
- uint64_t x41 = x4 & mask261;
- uint64_t x2 = t2 + z01;
- uint64_t x01 = x0 + z12;
- uint64_t z02 = x2 >> (uint32_t)26U;
- uint64_t z13 = x01 >> (uint32_t)26U;
- uint64_t x21 = x2 & mask261;
- uint64_t x02 = x01 & mask261;
- uint64_t x31 = x3 + z02;
- uint64_t x12 = x11 + z13;
- uint64_t z03 = x31 >> (uint32_t)26U;
- uint64_t x32 = x31 & mask261;
- uint64_t x42 = x41 + z03;
- uint64_t o0 = x02;
- uint64_t o1 = x12;
- uint64_t o2 = x21;
- uint64_t o3 = x32;
- uint64_t o4 = x42;
+ uint64_t l = t0 + (uint64_t)0U;
+ uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
+ uint64_t c0 = l >> (uint32_t)26U;
+ uint64_t l0 = t1 + c0;
+ uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
+ uint64_t c1 = l0 >> (uint32_t)26U;
+ uint64_t l1 = t2 + c1;
+ uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
+ uint64_t c2 = l1 >> (uint32_t)26U;
+ uint64_t l2 = t3 + c2;
+ uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
+ uint64_t c3 = l2 >> (uint32_t)26U;
+ uint64_t l3 = t4 + c3;
+ uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
+ uint64_t c4 = l3 >> (uint32_t)26U;
+ uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
+ uint64_t tmp01 = l4 & (uint64_t)0x3ffffffU;
+ uint64_t c5 = l4 >> (uint32_t)26U;
+ uint64_t tmp11 = tmp1 + c5;
+ uint64_t o0 = tmp01;
+ uint64_t o1 = tmp11;
+ uint64_t o2 = tmp2;
+ uint64_t o3 = tmp3;
+ uint64_t o4 = tmp4;
acc[0U] = o0;
acc[1U] = o1;
acc[2U] = o2;
@@ -466,64 +451,47 @@ Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
uint64_t *acc = ctx;
uint8_t *ks = key + (uint32_t)16U;
uint64_t f0 = acc[0U];
- uint64_t f13 = acc[1U];
- uint64_t f23 = acc[2U];
- uint64_t f33 = acc[3U];
+ uint64_t f12 = acc[1U];
+ uint64_t f22 = acc[2U];
+ uint64_t f32 = acc[3U];
uint64_t f40 = acc[4U];
- uint64_t l0 = f0 + (uint64_t)0U;
- uint64_t tmp00 = l0 & (uint64_t)0x3ffffffU;
- uint64_t c00 = l0 >> (uint32_t)26U;
- uint64_t l1 = f13 + c00;
- uint64_t tmp10 = l1 & (uint64_t)0x3ffffffU;
- uint64_t c10 = l1 >> (uint32_t)26U;
- uint64_t l2 = f23 + c10;
- uint64_t tmp20 = l2 & (uint64_t)0x3ffffffU;
- uint64_t c20 = l2 >> (uint32_t)26U;
- uint64_t l3 = f33 + c20;
- uint64_t tmp30 = l3 & (uint64_t)0x3ffffffU;
- uint64_t c30 = l3 >> (uint32_t)26U;
- uint64_t l4 = f40 + c30;
- uint64_t tmp40 = l4 & (uint64_t)0x3ffffffU;
- uint64_t c40 = l4 >> (uint32_t)26U;
- uint64_t f010 = tmp00 + c40 * (uint64_t)5U;
- uint64_t f110 = tmp10;
- uint64_t f210 = tmp20;
- uint64_t f310 = tmp30;
- uint64_t f410 = tmp40;
- uint64_t l = f010 + (uint64_t)0U;
+ uint64_t l = f0 + (uint64_t)0U;
uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
uint64_t c0 = l >> (uint32_t)26U;
- uint64_t l5 = f110 + c0;
- uint64_t tmp1 = l5 & (uint64_t)0x3ffffffU;
- uint64_t c1 = l5 >> (uint32_t)26U;
- uint64_t l6 = f210 + c1;
- uint64_t tmp2 = l6 & (uint64_t)0x3ffffffU;
- uint64_t c2 = l6 >> (uint32_t)26U;
- uint64_t l7 = f310 + c2;
- uint64_t tmp3 = l7 & (uint64_t)0x3ffffffU;
- uint64_t c3 = l7 >> (uint32_t)26U;
- uint64_t l8 = f410 + c3;
- uint64_t tmp4 = l8 & (uint64_t)0x3ffffffU;
- uint64_t c4 = l8 >> (uint32_t)26U;
- uint64_t f02 = tmp0 + c4 * (uint64_t)5U;
- uint64_t f12 = tmp1;
- uint64_t f22 = tmp2;
- uint64_t f32 = tmp3;
- uint64_t f42 = tmp4;
+ uint64_t l0 = f12 + c0;
+ uint64_t tmp1 = l0 & (uint64_t)0x3ffffffU;
+ uint64_t c1 = l0 >> (uint32_t)26U;
+ uint64_t l1 = f22 + c1;
+ uint64_t tmp2 = l1 & (uint64_t)0x3ffffffU;
+ uint64_t c2 = l1 >> (uint32_t)26U;
+ uint64_t l2 = f32 + c2;
+ uint64_t tmp3 = l2 & (uint64_t)0x3ffffffU;
+ uint64_t c3 = l2 >> (uint32_t)26U;
+ uint64_t l3 = f40 + c3;
+ uint64_t tmp4 = l3 & (uint64_t)0x3ffffffU;
+ uint64_t c4 = l3 >> (uint32_t)26U;
+ uint64_t l4 = tmp0 + c4 * (uint64_t)5U;
+ uint64_t tmp0_ = l4 & (uint64_t)0x3ffffffU;
+ uint64_t c5 = l4 >> (uint32_t)26U;
+ uint64_t f010 = tmp0_;
+ uint64_t f110 = tmp1 + c5;
+ uint64_t f210 = tmp2;
+ uint64_t f310 = tmp3;
+ uint64_t f410 = tmp4;
uint64_t mh = (uint64_t)0x3ffffffU;
uint64_t ml = (uint64_t)0x3fffffbU;
- uint64_t mask = FStar_UInt64_eq_mask(f42, mh);
- uint64_t mask1 = mask & FStar_UInt64_eq_mask(f32, mh);
- uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f22, mh);
- uint64_t mask3 = mask2 & FStar_UInt64_eq_mask(f12, mh);
- uint64_t mask4 = mask3 & ~~FStar_UInt64_gte_mask(f02, ml);
+ uint64_t mask = FStar_UInt64_eq_mask(f410, mh);
+ uint64_t mask1 = mask & FStar_UInt64_eq_mask(f310, mh);
+ uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f210, mh);
+ uint64_t mask3 = mask2 & FStar_UInt64_eq_mask(f110, mh);
+ uint64_t mask4 = mask3 & ~~FStar_UInt64_gte_mask(f010, ml);
uint64_t ph = mask4 & mh;
uint64_t pl = mask4 & ml;
- uint64_t o0 = f02 - pl;
- uint64_t o1 = f12 - ph;
- uint64_t o2 = f22 - ph;
- uint64_t o3 = f32 - ph;
- uint64_t o4 = f42 - ph;
+ uint64_t o0 = f010 - pl;
+ uint64_t o1 = f110 - ph;
+ uint64_t o2 = f210 - ph;
+ uint64_t o3 = f310 - ph;
+ uint64_t o4 = f410 - ph;
uint64_t f011 = o0;
uint64_t f111 = o1;
uint64_t f211 = o2;
diff --git a/lib/freebl/verified/kremlin/include/kremlin/internal/types.h b/lib/freebl/verified/kremlin/include/kremlin/internal/types.h
index f65e40cba..67d5af5f0 100644
--- a/lib/freebl/verified/kremlin/include/kremlin/internal/types.h
+++ b/lib/freebl/verified/kremlin/include/kremlin/internal/types.h
@@ -52,16 +52,12 @@ typedef const char *Prims_string;
/* The uint128 type is a special case since we offer several implementations of
* it, depending on the compiler and whether the user wants the verified
* implementation or not. */
-#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && defined(_M_X64) && \
- !defined(__clang__)
+#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && defined(_M_X64)
#include <emmintrin.h>
typedef __m128i FStar_UInt128_uint128;
-#elif !defined(KRML_VERIFIED_UINT128) && !defined(_MSC_VER) && \
- (defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__) || \
- (defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)))
+#elif !defined(KRML_VERIFIED_UINT128) && !defined(_MSC_VER) && \
+ (defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__))
typedef unsigned __int128 FStar_UInt128_uint128;
-#elif !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && defined(__clang__)
-typedef __uint128_t FStar_UInt128_uint128;
#else
typedef struct FStar_UInt128_uint128_s {
uint64_t low;
@@ -76,11 +72,11 @@ typedef FStar_UInt128_uint128 FStar_UInt128_t, uint128_t;
#include "kremlin/lowstar_endianness.h"
/* This one is always included, because it defines C.Endianness functions too. */
-#if !defined(_MSC_VER) || defined(__clang__)
+#if !defined(_MSC_VER)
#include "fstar_uint128_gcc64.h"
#endif
-#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER) && !defined(__clang__)
+#if !defined(KRML_VERIFIED_UINT128) && defined(_MSC_VER)
#include "fstar_uint128_msvc.h"
#elif defined(KRML_VERIFIED_UINT128)
#include "FStar_UInt128_Verified.h"
diff --git a/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128.h b/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128.h
index 791d3460b..824728b9e 100644
--- a/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128.h
+++ b/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128.h
@@ -1,6 +1,11 @@
/*
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License.
+
+ This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
+ KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -skip-compilation -extract-uints -tmpdir dist/minimal -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -library FStar.UInt128 -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -add-include "kremlin/internal/target.h" -bundle LowStar.Endianness= -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_UInt8.krml .extract/FStar_Endianness.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_SyntaxHelpers.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/C_String.krml .extract/FStar_Bytes.krml .extract/FStar_HyperStack_IO.krml .extract/LowStar_Printf.krml .extract/LowStar_Endianness.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
+ F* version: b0467796
+ KreMLin version: ab4c97c6
*/
#include <inttypes.h>
@@ -13,70 +18,70 @@
#ifndef __FStar_UInt128_H
#define __FStar_UInt128_H
-static inline uint64_t
+inline static uint64_t
FStar_UInt128___proj__Mkuint128__item__low(FStar_UInt128_uint128 projectee);
-static inline uint64_t
+inline static uint64_t
FStar_UInt128___proj__Mkuint128__item__high(FStar_UInt128_uint128 projectee);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a);
+inline static FStar_UInt128_uint128 FStar_UInt128_lognot(FStar_UInt128_uint128 a);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s);
-static inline bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+inline static bool FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+inline static bool FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+inline static bool FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+inline static bool FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
+inline static bool FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b);
-static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
+inline static FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a);
-static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
+inline static uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a);
-static inline FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y);
+inline static FStar_UInt128_uint128 FStar_UInt128_mul32(uint64_t x, uint32_t y);
-static inline FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
+inline static FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y);
#define __FStar_UInt128_H_DEFINED
#endif
diff --git a/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128_Verified.h b/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128_Verified.h
index b629a954a..1ffe8435c 100644
--- a/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128_Verified.h
+++ b/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt128_Verified.h
@@ -1,6 +1,11 @@
/*
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License.
+
+ This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
+ KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -skip-compilation -extract-uints -tmpdir dist/uint128 -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/types.h" -add-include "kremlin/internal/target.h" -bundle FStar.UInt64[rename=FStar_UInt_8_16_32_64] -bundle FStar.UInt128=[rename=FStar_UInt128_Verified] -fc89 -bundle *,WindowsWorkaroundSigh -static-header FStar.UInt128,FStar.UInt64 -ccopt -DKRML_VERIFIED_UINT128 -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_UInt8.krml .extract/FStar_Endianness.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_SyntaxHelpers.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/C_String.krml .extract/FStar_Bytes.krml .extract/FStar_HyperStack_IO.krml .extract/LowStar_Printf.krml .extract/LowStar_Endianness.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
+ F* version: b0467796
+ KreMLin version: ab4c97c6
*/
#include <inttypes.h>
@@ -13,31 +18,31 @@
#include "FStar_UInt_8_16_32_64.h"
-static inline uint64_t
+inline static uint64_t
FStar_UInt128___proj__Mkuint128__item__low(FStar_UInt128_uint128 projectee)
{
return projectee.low;
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt128___proj__Mkuint128__item__high(FStar_UInt128_uint128 projectee)
{
return projectee.high;
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_constant_time_carry(uint64_t a, uint64_t b)
{
return (a ^ ((a ^ b) | ((a - b) ^ b))) >> (uint32_t)63U;
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_carry(uint64_t a, uint64_t b)
{
return FStar_UInt128_constant_time_carry(a, b);
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -46,7 +51,7 @@ FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -55,7 +60,7 @@ FStar_UInt128_add_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -64,7 +69,7 @@ FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -73,7 +78,7 @@ FStar_UInt128_sub(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -82,7 +87,7 @@ FStar_UInt128_sub_underspec(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -91,13 +96,13 @@ FStar_UInt128_sub_mod_impl(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return FStar_UInt128_sub_mod_impl(a, b);
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -106,7 +111,7 @@ FStar_UInt128_logand(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -115,7 +120,7 @@ FStar_UInt128_logxor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -124,7 +129,7 @@ FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_lognot(FStar_UInt128_uint128 a)
{
FStar_UInt128_uint128 lit;
@@ -135,19 +140,19 @@ FStar_UInt128_lognot(FStar_UInt128_uint128 a)
static uint32_t FStar_UInt128_u32_64 = (uint32_t)64U;
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s)
{
return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s));
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_add_u64_shift_left_respec(uint64_t hi, uint64_t lo, uint32_t s)
{
return FStar_UInt128_add_u64_shift_left(hi, lo, s);
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
{
if (s == (uint32_t)0U) {
@@ -160,7 +165,7 @@ FStar_UInt128_shift_left_small(FStar_UInt128_uint128 a, uint32_t s)
}
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
{
FStar_UInt128_uint128 lit;
@@ -169,7 +174,7 @@ FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
{
if (s < FStar_UInt128_u32_64) {
@@ -179,19 +184,19 @@ FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s)
}
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s)
{
return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s));
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_add_u64_shift_right_respec(uint64_t hi, uint64_t lo, uint32_t s)
{
return FStar_UInt128_add_u64_shift_right(hi, lo, s);
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
{
if (s == (uint32_t)0U) {
@@ -204,7 +209,7 @@ FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s)
}
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
{
FStar_UInt128_uint128 lit;
@@ -213,7 +218,7 @@ FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
{
if (s < FStar_UInt128_u32_64) {
@@ -223,37 +228,37 @@ FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s)
}
}
-static inline bool
+inline static bool
FStar_UInt128_eq(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return a.low == b.low && a.high == b.high;
}
-static inline bool
+inline static bool
FStar_UInt128_gt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return a.high > b.high || (a.high == b.high && a.low > b.low);
}
-static inline bool
+inline static bool
FStar_UInt128_lt(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return a.high < b.high || (a.high == b.high && a.low < b.low);
}
-static inline bool
+inline static bool
FStar_UInt128_gte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return a.high > b.high || (a.high == b.high && a.low >= b.low);
}
-static inline bool
+inline static bool
FStar_UInt128_lte(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
return a.high < b.high || (a.high == b.high && a.low <= b.low);
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -262,7 +267,7 @@ FStar_UInt128_eq_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
{
FStar_UInt128_uint128 lit;
@@ -273,7 +278,7 @@ FStar_UInt128_gte_mask(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b)
return lit;
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_uint64_to_uint128(uint64_t a)
{
FStar_UInt128_uint128 lit;
@@ -282,13 +287,13 @@ FStar_UInt128_uint64_to_uint128(uint64_t a)
return lit;
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a)
{
return a.low;
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_u64_mod_32(uint64_t a)
{
return a & (uint64_t)0xffffffffU;
@@ -296,13 +301,13 @@ FStar_UInt128_u64_mod_32(uint64_t a)
static uint32_t FStar_UInt128_u32_32 = (uint32_t)32U;
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_u32_combine(uint64_t hi, uint64_t lo)
{
return lo + (hi << FStar_UInt128_u32_32);
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_mul32(uint64_t x, uint32_t y)
{
FStar_UInt128_uint128 lit;
@@ -314,13 +319,13 @@ FStar_UInt128_mul32(uint64_t x, uint32_t y)
return lit;
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt128_u32_combine_(uint64_t hi, uint64_t lo)
{
return lo + (hi << FStar_UInt128_u32_32);
}
-static inline FStar_UInt128_uint128
+inline static FStar_UInt128_uint128
FStar_UInt128_mul_wide(uint64_t x, uint64_t y)
{
FStar_UInt128_uint128 lit;
diff --git a/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt_8_16_32_64.h b/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt_8_16_32_64.h
index 6ca83ed01..68cd217a8 100644
--- a/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt_8_16_32_64.h
+++ b/lib/freebl/verified/kremlin/kremlib/dist/minimal/FStar_UInt_8_16_32_64.h
@@ -1,6 +1,11 @@
/*
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License.
+
+ This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
+ KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -skip-compilation -extract-uints -tmpdir dist/minimal -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -library FStar.UInt128 -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -add-include "kremlin/internal/target.h" -bundle LowStar.Endianness= -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_UInt8.krml .extract/FStar_Endianness.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_SyntaxHelpers.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/C_String.krml .extract/FStar_Bytes.krml .extract/FStar_HyperStack_IO.krml .extract/LowStar_Printf.krml .extract/LowStar_Endianness.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
+ F* version: b0467796
+ KreMLin version: ab4c97c6
*/
#include <inttypes.h>
@@ -23,7 +28,7 @@ extern uint64_t FStar_UInt64_minus(uint64_t a);
extern uint32_t FStar_UInt64_n_minus_one;
-static inline uint64_t
+inline static uint64_t
FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
{
uint64_t x = a ^ b;
@@ -33,7 +38,7 @@ FStar_UInt64_eq_mask(uint64_t a, uint64_t b)
return xnx - (uint64_t)1U;
}
-static inline uint64_t
+inline static uint64_t
FStar_UInt64_gte_mask(uint64_t a, uint64_t b)
{
uint64_t x = a;
@@ -61,7 +66,7 @@ extern uint32_t FStar_UInt32_minus(uint32_t a);
extern uint32_t FStar_UInt32_n_minus_one;
-static inline uint32_t
+inline static uint32_t
FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
{
uint32_t x = a ^ b;
@@ -71,7 +76,7 @@ FStar_UInt32_eq_mask(uint32_t a, uint32_t b)
return xnx - (uint32_t)1U;
}
-static inline uint32_t
+inline static uint32_t
FStar_UInt32_gte_mask(uint32_t a, uint32_t b)
{
uint32_t x = a;
@@ -99,7 +104,7 @@ extern uint16_t FStar_UInt16_minus(uint16_t a);
extern uint32_t FStar_UInt16_n_minus_one;
-static inline uint16_t
+inline static uint16_t
FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
{
uint16_t x = a ^ b;
@@ -109,7 +114,7 @@ FStar_UInt16_eq_mask(uint16_t a, uint16_t b)
return xnx - (uint16_t)1U;
}
-static inline uint16_t
+inline static uint16_t
FStar_UInt16_gte_mask(uint16_t a, uint16_t b)
{
uint16_t x = a;
@@ -137,7 +142,7 @@ extern uint8_t FStar_UInt8_minus(uint8_t a);
extern uint32_t FStar_UInt8_n_minus_one;
-static inline uint8_t
+inline static uint8_t
FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
{
uint8_t x = a ^ b;
@@ -147,7 +152,7 @@ FStar_UInt8_eq_mask(uint8_t a, uint8_t b)
return xnx - (uint8_t)1U;
}
-static inline uint8_t
+inline static uint8_t
FStar_UInt8_gte_mask(uint8_t a, uint8_t b)
{
uint8_t x = a;
diff --git a/lib/freebl/verified/kremlin/kremlib/dist/minimal/LowStar_Endianness.h b/lib/freebl/verified/kremlin/kremlib/dist/minimal/LowStar_Endianness.h
index a6bff78d9..89106c3d7 100644
--- a/lib/freebl/verified/kremlin/kremlib/dist/minimal/LowStar_Endianness.h
+++ b/lib/freebl/verified/kremlin/kremlib/dist/minimal/LowStar_Endianness.h
@@ -1,6 +1,11 @@
/*
Copyright (c) INRIA and Microsoft Corporation. All rights reserved.
Licensed under the Apache 2.0 License.
+
+ This file was generated by KreMLin <https://github.com/FStarLang/kremlin>
+ KreMLin invocation: ../krml -fparentheses -fcurly-braces -fno-shadow -header copyright-header.txt -minimal -skip-compilation -extract-uints -tmpdir dist/minimal -bundle FStar.UInt64+FStar.UInt32+FStar.UInt16+FStar.UInt8=[rename=FStar_UInt_8_16_32_64] -library FStar.UInt128 -add-include <inttypes.h> -add-include <stdbool.h> -add-include "kremlin/internal/compat.h" -add-include "kremlin/lowstar_endianness.h" -add-include "kremlin/internal/types.h" -add-include "kremlin/internal/target.h" -bundle LowStar.Endianness= -bundle FStar.UInt128= -bundle *,WindowsWorkaroundSigh fstar_uint128.c -o libkremlib.a .extract/prims.krml .extract/FStar_Pervasives_Native.krml .extract/FStar_Pervasives.krml .extract/FStar_Preorder.krml .extract/FStar_Calc.krml .extract/FStar_Squash.krml .extract/FStar_Classical.krml .extract/FStar_StrongExcludedMiddle.krml .extract/FStar_FunctionalExtensionality.krml .extract/FStar_List_Tot_Base.krml .extract/FStar_List_Tot_Properties.krml .extract/FStar_List_Tot.krml .extract/FStar_Seq_Base.krml .extract/FStar_Seq_Properties.krml .extract/FStar_Seq.krml .extract/FStar_Mul.krml .extract/FStar_Math_Lib.krml .extract/FStar_Math_Lemmas.krml .extract/FStar_BitVector.krml .extract/FStar_UInt.krml .extract/FStar_UInt32.krml .extract/FStar_Int.krml .extract/FStar_Int16.krml .extract/FStar_Reflection_Types.krml .extract/FStar_Reflection_Data.krml .extract/FStar_Order.krml .extract/FStar_Reflection_Basic.krml .extract/FStar_Ghost.krml .extract/FStar_ErasedLogic.krml .extract/FStar_UInt64.krml .extract/FStar_UInt8.krml .extract/FStar_Endianness.krml .extract/FStar_Set.krml .extract/FStar_PropositionalExtensionality.krml .extract/FStar_PredicateExtensionality.krml .extract/FStar_TSet.krml .extract/FStar_Monotonic_Heap.krml .extract/FStar_Heap.krml .extract/FStar_Map.krml .extract/FStar_Monotonic_HyperHeap.krml .extract/FStar_Monotonic_HyperStack.krml .extract/FStar_HyperStack.krml .extract/FStar_Monotonic_Witnessed.krml .extract/FStar_HyperStack_ST.krml .extract/FStar_HyperStack_All.krml .extract/FStar_Char.krml .extract/FStar_Exn.krml .extract/FStar_ST.krml .extract/FStar_All.krml .extract/FStar_List.krml .extract/FStar_String.krml .extract/FStar_Reflection_Const.krml .extract/FStar_Reflection_Derived.krml .extract/FStar_Reflection_Derived_Lemmas.krml .extract/FStar_Date.krml .extract/FStar_Universe.krml .extract/FStar_GSet.krml .extract/FStar_ModifiesGen.krml .extract/FStar_Range.krml .extract/FStar_Tactics_Types.krml .extract/FStar_Tactics_Result.krml .extract/FStar_Tactics_Effect.krml .extract/FStar_Tactics_Builtins.krml .extract/FStar_Reflection.krml .extract/FStar_Tactics_SyntaxHelpers.krml .extract/FStar_Tactics_Util.krml .extract/FStar_Reflection_Formula.krml .extract/FStar_Tactics_Derived.krml .extract/FStar_Tactics_Logic.krml .extract/FStar_Tactics.krml .extract/FStar_BigOps.krml .extract/LowStar_Monotonic_Buffer.krml .extract/LowStar_Buffer.krml .extract/Spec_Loops.krml .extract/LowStar_BufferOps.krml .extract/C_Loops.krml .extract/FStar_Kremlin_Endianness.krml .extract/FStar_UInt63.krml .extract/FStar_Dyn.krml .extract/FStar_Int63.krml .extract/FStar_Int64.krml .extract/FStar_Int32.krml .extract/FStar_Int8.krml .extract/FStar_UInt16.krml .extract/FStar_Int_Cast.krml .extract/FStar_UInt128.krml .extract/C_Endianness.krml .extract/WasmSupport.krml .extract/FStar_Float.krml .extract/FStar_IO.krml .extract/C.krml .extract/LowStar_Modifies.krml .extract/C_String.krml .extract/FStar_Bytes.krml .extract/FStar_HyperStack_IO.krml .extract/LowStar_Printf.krml .extract/LowStar_Endianness.krml .extract/C_Failure.krml .extract/TestLib.krml .extract/FStar_Int_Cast_Full.krml
+ F* version: b0467796
+ KreMLin version: ab4c97c6
*/
#include <inttypes.h>
@@ -15,13 +20,13 @@
#include "FStar_UInt128.h"
-static inline void store128_le(uint8_t *x0, FStar_UInt128_uint128 x1);
+inline static void store128_le(uint8_t *x0, FStar_UInt128_uint128 x1);
-static inline FStar_UInt128_uint128 load128_le(uint8_t *x0);
+inline static FStar_UInt128_uint128 load128_le(uint8_t *x0);
-static inline void store128_be(uint8_t *x0, FStar_UInt128_uint128 x1);
+inline static void store128_be(uint8_t *x0, FStar_UInt128_uint128 x1);
-static inline FStar_UInt128_uint128 load128_be(uint8_t *x0);
+inline static FStar_UInt128_uint128 load128_be(uint8_t *x0);
#define __LowStar_Endianness_H_DEFINED
#endif
diff --git a/lib/freebl/verified/kremlin/kremlib/dist/minimal/fstar_uint128_gcc64.h b/lib/freebl/verified/kremlin/kremlib/dist/minimal/fstar_uint128_gcc64.h
index 149f50f28..ba01362a9 100644
--- a/lib/freebl/verified/kremlin/kremlib/dist/minimal/fstar_uint128_gcc64.h
+++ b/lib/freebl/verified/kremlin/kremlib/dist/minimal/fstar_uint128_gcc64.h
@@ -24,9 +24,8 @@
#include "FStar_UInt_8_16_32_64.h"
#include "LowStar_Endianness.h"
-#if !defined(KRML_VERIFIED_UINT128) && (!defined(_MSC_VER) || defined(__clang__)) && \
- (defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__) || \
- (defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)))
+#if !defined(KRML_VERIFIED_UINT128) && !defined(_MSC_VER) && \
+ (defined(__x86_64__) || defined(__x86_64) || defined(__aarch64__))
/* GCC + using native unsigned __int128 support */
diff --git a/lib/freebl/verified/libintvector.h b/lib/freebl/verified/libintvector.h
index 35ae7c6f3..f11c9308a 100644
--- a/lib/freebl/verified/libintvector.h
+++ b/lib/freebl/verified/libintvector.h
@@ -2,16 +2,9 @@
#define __Vec_Intrin_H
#include <sys/types.h>
-
-#define Lib_IntVector_Intrinsics_bit_mask64(x) -((x)&1)
-
-#if defined(__x86_64__) || defined(_M_X64)
-
-// The following functions are only available on machines that support Intel AVX
-
-#include <emmintrin.h>
-#include <tmmintrin.h>
+#include <wmmintrin.h>
#include <smmintrin.h>
+#include <immintrin.h>
typedef __m128i Lib_IntVector_Intrinsics_vec128;
@@ -76,13 +69,13 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
(_mm_shuffle_epi8(x0, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2)))
#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, x1) \
- ((x1 == 8 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) : _mm_xor_si128(_mm_slli_epi32(x0, x1), _mm_srli_epi32(x0, 32 - (x1))))))
+ ((x1 == 8 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) : _mm_xor_si128(_mm_slli_epi32(x0, x1), _mm_srli_epi32(x0, 32 - x1)))))
#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0, x1) \
- (Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, 32 - (x1)))
+ (Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, 32 - x1))
#define Lib_IntVector_Intrinsics_vec128_shuffle32(x0, x1, x2, x3, x4) \
- (_mm_shuffle_epi32(x0, _MM_SHUFFLE(x4, x3, x2, x1)))
+ (_mm_shuffle_epi32(x0, _MM_SHUFFLE(x1, x2, x3, x4)))
#define Lib_IntVector_Intrinsics_vec128_shuffle64(x0, x1, x2) \
(_mm_shuffle_epi32(x0, _MM_SHUFFLE(2 * x1 + 1, 2 * x1, 2 * x2 + 1, 2 * x2)))
@@ -132,6 +125,8 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
#define Lib_IntVector_Intrinsics_vec128_zero \
(_mm_set1_epi16((uint16_t)0))
+#define Lib_IntVector_Intrinsics_bit_mask64(x) -((x)&1)
+
#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \
(_mm_add_epi64(x0, x1))
@@ -162,13 +157,13 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
#define Lib_IntVector_Intrinsics_vec128_load64(x) \
(_mm_set1_epi64x(x)) /* hi lo */
-#define Lib_IntVector_Intrinsics_vec128_load64s(x0, x1) \
- (_mm_set_epi64x(x1, x0)) /* hi lo */
+#define Lib_IntVector_Intrinsics_vec128_load64s(x1, x2) \
+ (_mm_set_epi64x(x1, x2)) /* hi lo */
#define Lib_IntVector_Intrinsics_vec128_load32(x) \
(_mm_set1_epi32(x))
-#define Lib_IntVector_Intrinsics_vec128_load32s(x0, x1, x2, x3) \
+#define Lib_IntVector_Intrinsics_vec128_load32s(x3, x2, x1, x0) \
(_mm_set_epi32(x3, x2, x1, x0)) /* hi lo */
#define Lib_IntVector_Intrinsics_vec128_interleave_low32(x1, x2) \
@@ -183,11 +178,6 @@ typedef __m128i Lib_IntVector_Intrinsics_vec128;
#define Lib_IntVector_Intrinsics_vec128_interleave_high64(x1, x2) \
(_mm_unpackhi_epi64(x1, x2))
-// The following functions are only available on machines that support Intel AVX2
-
-#include <immintrin.h>
-#include <wmmintrin.h>
-
typedef __m256i Lib_IntVector_Intrinsics_vec256;
#define Lib_IntVector_Intrinsics_vec256_eq64(x0, x1) \
@@ -239,31 +229,19 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
(_mm256_shuffle_epi8(x0, _mm256_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2, 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2)))
#define Lib_IntVector_Intrinsics_vec256_rotate_left32(x0, x1) \
- ((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_16(x0) : _mm256_or_si256(_mm256_slli_epi32(x0, x1), _mm256_srli_epi32(x0, 32 - (x1))))))
+ ((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_left32_16(x0) : _mm256_or_si256(_mm256_slli_epi32(x0, x1), _mm256_srli_epi32(x0, 32 - x1)))))
#define Lib_IntVector_Intrinsics_vec256_rotate_right32(x0, x1) \
- (Lib_IntVector_Intrinsics_vec256_rotate_left32(x0, 32 - (x1)))
-
-#define Lib_IntVector_Intrinsics_vec256_rotate_right64_8(x0) \
- (_mm256_shuffle_epi8(x0, _mm256_set_epi8(8, 15, 14, 13, 12, 11, 10, 9, 0, 7, 6, 5, 4, 3, 2, 1, 8, 15, 14, 13, 12, 11, 10, 9, 0, 7, 6, 5, 4, 3, 2, 1)))
-
-#define Lib_IntVector_Intrinsics_vec256_rotate_right64_16(x0) \
- (_mm256_shuffle_epi8(x0, _mm256_set_epi8(9, 8, 15, 14, 13, 12, 11, 10, 1, 0, 7, 6, 5, 4, 3, 2, 9, 8, 15, 14, 13, 12, 11, 10, 1, 0, 7, 6, 5, 4, 3, 2)))
+ (Lib_IntVector_Intrinsics_vec256_rotate_left32(x0, 32 - x1))
-#define Lib_IntVector_Intrinsics_vec256_rotate_right64_24(x0) \
- (_mm256_shuffle_epi8(x0, _mm256_set_epi8(10, 9, 8, 15, 14, 13, 12, 11, 2, 1, 0, 7, 6, 5, 4, 3, 10, 9, 8, 15, 14, 13, 12, 11, 2, 1, 0, 7, 6, 5, 4, 3)))
-
-#define Lib_IntVector_Intrinsics_vec256_rotate_right64_32(x0) \
- (_mm256_shuffle_epi8(x0, _mm256_set_epi8(11, 10, 9, 8, 15, 14, 13, 12, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12, 3, 2, 1, 0, 7, 6, 5, 4)))
-
-#define Lib_IntVector_Intrinsics_vec256_rotate_right64(x0, x1) \
- ((x1 == 8 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_8(x0) : (x1 == 16 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_16(x0) : (x1 == 24 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_24(x0) : (x1 == 32 ? Lib_IntVector_Intrinsics_vec256_rotate_right64_32(x0) : _mm256_xor_si256(_mm256_srli_epi64((x0), (x1)), _mm256_slli_epi64((x0), (64 - (x1)))))))))
+#define Lib_IntVector_Intrinsics_vec128_shuffle32(x0, x1, x2, x3, x4) \
+ (_mm_shuffle_epi32(x0, _MM_SHUFFLE(x1, x2, x3, x4)))
#define Lib_IntVector_Intrinsics_vec256_shuffle64(x0, x1, x2, x3, x4) \
- (_mm256_permute4x64_epi64(x0, _MM_SHUFFLE(x4, x3, x2, x1)))
+ (_mm256_permute4x64_epi64(x0, _MM_SHUFFLE(x1, x2, x3, x4)))
#define Lib_IntVector_Intrinsics_vec256_shuffle32(x0, x1, x2, x3, x4, x5, x6, x7, x8) \
- (_mm256_permutevar8x32_epi32(x0, _mm256_set_epi32(x8, x7, x6, x5, x4, x3, x2, x1)))
+ (_mm256_permutevar8x32_epi32(x0, _mm256_set_epi32(x1, x2, x3, x4, x5, x6, x7, x8)))
#define Lib_IntVector_Intrinsics_vec256_load_le(x0) \
(_mm256_loadu_si256((__m256i*)(x0)))
@@ -319,19 +297,19 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
#define Lib_IntVector_Intrinsics_vec256_load64(x1) \
(_mm256_set1_epi64x(x1)) /* hi lo */
-#define Lib_IntVector_Intrinsics_vec256_load64s(x0, x1, x2, x3) \
- (_mm256_set_epi64x(x3, x2, x1, x0)) /* hi lo */
+#define Lib_IntVector_Intrinsics_vec256_load64s(x1, x2, x3, x4) \
+ (_mm256_set_epi64x(x1, x2, x3, x4)) /* hi lo */
#define Lib_IntVector_Intrinsics_vec256_load32(x) \
(_mm256_set1_epi32(x))
-#define Lib_IntVector_Intrinsics_vec256_load32s(x0, x1, x2, x3, x4, x5, x6, x7) \
+#define Lib_IntVector_Intrinsics_vec256_load32s(x7, x6, x5, x4, x3, x2, x1, x0) \
(_mm256_set_epi32(x7, x6, x5, x4, x3, x2, x1, x0)) /* hi lo */
#define Lib_IntVector_Intrinsics_vec256_load128(x) \
(_mm256_set_m128i((__m128i)x))
-#define Lib_IntVector_Intrinsics_vec256_load128s(x0, x1) \
+#define Lib_IntVector_Intrinsics_vec256_load128s(x1, x0) \
(_mm256_set_m128i((__m128i)x1, (__m128i)x0))
#define Lib_IntVector_Intrinsics_vec256_interleave_low32(x1, x2) \
@@ -352,172 +330,6 @@ typedef __m256i Lib_IntVector_Intrinsics_vec256;
#define Lib_IntVector_Intrinsics_vec256_interleave_high128(x1, x2) \
(_mm256_permute2x128_si256(x1, x2, 0x31))
-#elif defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)
-#include <arm_neon.h>
-
-typedef uint32x4_t Lib_IntVector_Intrinsics_vec128;
-
-#define Lib_IntVector_Intrinsics_vec128_xor(x0, x1) \
- (veorq_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_eq64(x0, x1) \
- (vceqq_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_eq32(x0, x1) \
- (vceqq_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_gt32(x0, x1) \
- (vcgtq_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_or(x0, x1) \
- (voorq_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_and(x0, x1) \
- (vandq_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_lognot(x0) \
- (vmvnq_u32(x0))
-
-#define Lib_IntVector_Intrinsics_vec128_shift_left(x0, x1) \
- (vextq_u32(x0, vdupq_n_u8(0), 16 - (x1) / 8))
-
-#define Lib_IntVector_Intrinsics_vec128_shift_right(x0, x1) \
- (vextq_u32(x0, vdupq_n_u8(0), (x1) / 8))
-
-#define Lib_IntVector_Intrinsics_vec128_shift_left64(x0, x1) \
- (vreinterpretq_u32_u64(vshlq_n_u64(vreinterpretq_u64_u32(x0), x1)))
-
-#define Lib_IntVector_Intrinsics_vec128_shift_right64(x0, x1) \
- (vreinterpretq_u32_u64(vshrq_n_u64(vreinterpretq_u64_u32(x0), x1)))
-
-#define Lib_IntVector_Intrinsics_vec128_shift_left32(x0, x1) \
- (vshlq_n_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_shift_right32(x0, x1) \
- (vreinterpretq_u32_u64(vshrq_n_u64(vreinterpretq_u64_u32(x0), x1)))
-
-#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, x1) \
- (vsriq_n_u32(vshlq_n_u32((x0), (x1)), (x0), 32 - (x1)))
-
-#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0, x1) \
- (vsriq_n_u32(vshlq_n_u32((x0), 32 - (x1)), (x0), (x1)))
-
-/*
-#define Lib_IntVector_Intrinsics_vec128_shuffle32(x0, x1, x2, x3, x4) \
- (_mm_shuffle_epi32(x0, _MM_SHUFFLE(x1,x2,x3,x4)))
-
-#define Lib_IntVector_Intrinsics_vec128_shuffle64(x0, x1, x2) \
- (_mm_shuffle_epi32(x0, _MM_SHUFFLE(2*x1+1,2*x1,2*x2+1,2*x2)))
-*/
-
-#define Lib_IntVector_Intrinsics_vec128_load_le(x0) \
- (vld1q_u32((const uint32_t*)(x0)))
-
-#define Lib_IntVector_Intrinsics_vec128_store_le(x0, x1) \
- (vst1q_u32((uint32_t*)(x0), (x1)))
-
-/*
-#define Lib_IntVector_Intrinsics_vec128_load_be(x0) \
- ( Lib_IntVector_Intrinsics_vec128 l = vrev64q_u8(vld1q_u32((uint32_t*)(x0)));
-
-*/
-
-#define Lib_IntVector_Intrinsics_vec128_load32_be(x0) \
- (vrev32q_u8(vld1q_u32((const uint32_t*)(x0))))
-
-#define Lib_IntVector_Intrinsics_vec128_load64_be(x0) \
- (vreinterpretq_u32_u64(vrev64q_u8(vld1q_u32((const uint32_t*)(x0)))))
-
-/*
-#define Lib_IntVector_Intrinsics_vec128_store_be(x0, x1) \
- (_mm_storeu_si128((__m128i*)(x0), _mm_shuffle_epi8(x1, _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))))
-*/
-
-#define Lib_IntVector_Intrinsics_vec128_store32_be(x0, x1) \
- (vst1q_u32((uint32_t*)(x0), (vrev32q_u8(x1))))
-
-#define Lib_IntVector_Intrinsics_vec128_store64_be(x0, x1) \
- (vst1q_u32((uint32_t*)(x0), (vrev64q_u8(x1))))
-
-#define Lib_IntVector_Intrinsics_vec128_insert8(x0, x1, x2) \
- (vsetq_lane_u8(x1, x0, x2))
-
-#define Lib_IntVector_Intrinsics_vec128_insert32(x0, x1, x2) \
- (vsetq_lane_u32(x1, x0, x2))
-
-#define Lib_IntVector_Intrinsics_vec128_insert64(x0, x1, x2) \
- (vreinterpretq_u32_u64(vsetq_lane_u64(x1, vreinterpretq_u64_u32(x0), x2)))
-
-#define Lib_IntVector_Intrinsics_vec128_extract8(x0, x1) \
- (vgetq_lane_u8(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_extract32(x0, x1) \
- (vgetq_lane_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_extract64(x0, x1) \
- (vreinterpretq_u32_u64(vgetq_lane_u64(vreinterpretq_u64_u32(x0), x1)))
-
-#define Lib_IntVector_Intrinsics_vec128_zero \
- (vdup_n_u8(0))
-
-#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \
- (vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(x0), vreinterpretq_u64_u32(x1))))
-
-#define Lib_IntVector_Intrinsics_vec128_sub64(x0, x1) \
- (vreinterpretq_u32_u64(vsubq_u64(vreinterpretq_u64_u32(x0), vreinterpretq_u64_u32(x1))))
-
-#define Lib_IntVector_Intrinsics_vec128_mul64(x0, x1) \
- (vmull_u32(vmovn_u64(x0), vmovn_u64(x1)))
-
-#define Lib_IntVector_Intrinsics_vec128_smul64(x0, x1) \
- (vmull_u32(vmovn_u64(x0), vdupq_n_u64(x1)))
-
-#define Lib_IntVector_Intrinsics_vec128_add32(x0, x1) \
- (vaddq_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_sub32(x0, x1) \
- (vsubq_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_mul32(x0, x1) \
- (vmulq_lane_u32(x0, x1))
-
-#define Lib_IntVector_Intrinsics_vec128_smul32(x0, x1) \
- (vmulq_lane_u32(x0, vdupq_n_u32(x1)))
-
-#define Lib_IntVector_Intrinsics_vec128_load128(x) \
- ((uint32x4_t)(x))
-
-#define Lib_IntVector_Intrinsics_vec128_load64(x) \
- (vreinterpretq_u32_u64(vdupq_n_u64(x))) /* hi lo */
-
-#define Lib_IntVector_Intrinsics_vec128_load32(x) \
- (vdupq_n_u32(x)) /* hi lo */
-
-static inline Lib_IntVector_Intrinsics_vec128
-Lib_IntVector_Intrinsics_vec128_load64s(uint64_t x1, uint64_t x2)
-{
- const uint64_t a[2] = { x1, x2 };
- return vreinterpretq_u32_u64(vld1q_u64(a));
-}
-
-static inline Lib_IntVector_Intrinsics_vec128
-Lib_IntVector_Intrinsics_vec128_load32s(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4)
-{
- const uint32_t a[4] = { x1, x2, x3, x4 };
- return vld1q_u32(a);
-}
-
-#define Lib_IntVector_Intrinsics_vec128_interleave_low32(x1, x2) \
- (vzip1q_u32(x1, x2))
-
-#define Lib_IntVector_Intrinsics_vec128_interleave_high32(x1, x2) \
- (vzip2q_u32(x1, x2))
-
-#define Lib_IntVector_Intrinsics_vec128_interleave_low64(x1, x2) \
- (vreinterpretq_u32_u64(vzip1q_u64(vreinterpretq_u64_u32(x1), vreinterpretq_u64_u32(x2))))
-
-#define Lib_IntVector_Intrinsics_vec128_interleave_high64(x1, x2) \
- (vreinterpretq_u32_u64(vzip2q_u64(vreinterpretq_u64_u32(x1), vreinterpretq_u64_u32(x2))))
+#define Lib_IntVector_Intrinsics_bit_mask64(x) -((x)&1)
#endif
-#endif