summaryrefslogtreecommitdiff
path: root/lib/freebl/verified/Hacl_Chacha20Poly1305_256.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/freebl/verified/Hacl_Chacha20Poly1305_256.c')
-rw-r--r--lib/freebl/verified/Hacl_Chacha20Poly1305_256.c47
1 files changed, 24 insertions, 23 deletions
diff --git a/lib/freebl/verified/Hacl_Chacha20Poly1305_256.c b/lib/freebl/verified/Hacl_Chacha20Poly1305_256.c
index efa598ae1..a4e54f1e2 100644
--- a/lib/freebl/verified/Hacl_Chacha20Poly1305_256.c
+++ b/lib/freebl/verified/Hacl_Chacha20Poly1305_256.c
@@ -23,6 +23,9 @@
#include "Hacl_Chacha20Poly1305_256.h"
+#include "internal/Hacl_Poly1305_256.h"
+#include "internal/Hacl_Krmllib.h"
+#include "libintvector.h"
static inline void
poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t *text)
{
@@ -44,9 +47,8 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
uint32_t nb = len1 / bs;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *block = text1 + i * bs;
- Lib_IntVector_Intrinsics_vec256 e[5U];
- for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
- e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
+ KRML_PRE_ALIGN(32)
+ Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
Lib_IntVector_Intrinsics_vec256
hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
@@ -270,9 +272,8 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
uint32_t rem1 = len1 % (uint32_t)16U;
for (uint32_t i = (uint32_t)0U; i < nb; i++) {
uint8_t *block = t10 + i * (uint32_t)16U;
- Lib_IntVector_Intrinsics_vec256 e[5U];
- for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
- e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
+ KRML_PRE_ALIGN(32)
+ Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
uint64_t u0 = load64_le(block);
uint64_t lo = u0;
uint64_t u = load64_le(block + (uint32_t)8U);
@@ -478,9 +479,8 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
}
if (rem1 > (uint32_t)0U) {
uint8_t *last = t10 + nb * (uint32_t)16U;
- Lib_IntVector_Intrinsics_vec256 e[5U];
- for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
- e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
+ KRML_PRE_ALIGN(32)
+ Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
uint8_t tmp[16U] = { 0U };
memcpy(tmp, last, rem1 * sizeof(uint8_t));
uint64_t u0 = load64_le(tmp);
@@ -691,9 +691,8 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
if (r > (uint32_t)0U) {
Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec256 *acc = ctx;
- Lib_IntVector_Intrinsics_vec256 e[5U];
- for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
- e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
+ KRML_PRE_ALIGN(32)
+ Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
uint64_t u0 = load64_le(tmp);
uint64_t lo = u0;
uint64_t u = load64_le(tmp + (uint32_t)8U);
@@ -909,22 +908,22 @@ poly1305_do_256(
uint8_t *m,
uint8_t *out)
{
- Lib_IntVector_Intrinsics_vec256 ctx[25U];
- for (uint32_t _i = 0U; _i < (uint32_t)25U; ++_i)
- ctx[_i] = Lib_IntVector_Intrinsics_vec256_zero;
+ KRML_PRE_ALIGN(32)
+ Lib_IntVector_Intrinsics_vec256 ctx[25U] KRML_POST_ALIGN(32) = { 0U };
uint8_t block[16U] = { 0U };
Hacl_Poly1305_256_poly1305_init(ctx, k);
if (aadlen != (uint32_t)0U) {
poly1305_padded_256(ctx, aadlen, aad);
}
- poly1305_padded_256(ctx, mlen, m);
+ if (mlen != (uint32_t)0U) {
+ poly1305_padded_256(ctx, mlen, m);
+ }
store64_le(block, (uint64_t)aadlen);
store64_le(block + (uint32_t)8U, (uint64_t)mlen);
Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
Lib_IntVector_Intrinsics_vec256 *acc = ctx;
- Lib_IntVector_Intrinsics_vec256 e[5U];
- for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
- e[_i] = Lib_IntVector_Intrinsics_vec256_zero;
+ KRML_PRE_ALIGN(32)
+ Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
uint64_t u0 = load64_le(block);
uint64_t lo = u0;
uint64_t u = load64_le(block + (uint32_t)8U);
@@ -1165,10 +1164,12 @@ Hacl_Chacha20Poly1305_256_aead_decrypt(
uint8_t *key = tmp;
poly1305_do_256(key, aadlen, aad, mlen, cipher, computed_mac);
uint8_t res = (uint8_t)255U;
- for (uint32_t i = (uint32_t)0U; i < (uint32_t)16U; i++) {
- uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
- res = uu____0 & res;
- }
+ KRML_MAYBE_FOR16(i,
+ (uint32_t)0U,
+ (uint32_t)16U,
+ (uint32_t)1U,
+ uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
+ res = uu____0 & res;);
uint8_t z = res;
if (z == (uint8_t)255U) {
Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n, (uint32_t)1U);