summaryrefslogtreecommitdiff
path: root/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c
diff options
context:
space:
mode:
Diffstat (limited to 'FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c')
-rw-r--r--FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c2954
1 files changed, 1416 insertions, 1538 deletions
diff --git a/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c b/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c
index f9f02b003..eb0911b01 100644
--- a/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c
+++ b/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c
@@ -1,8 +1,8 @@
/* sha256.c
*
- * Copyright (C) 2006-2015 wolfSSL Inc.
+ * Copyright (C) 2006-2020 wolfSSL Inc.
*
- * This file is part of wolfSSL. (formerly known as CyaSSL)
+ * This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -16,299 +16,604 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
-/* code submitted by raphael.huck@efixo.com */
-
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <wolfssl/wolfcrypt/settings.h>
-#include <wolfssl/wolfcrypt/sha256.h>
-#if !defined(NO_SHA256)
-#ifdef HAVE_FIPS
+/*
+ * SHA256 Build Options:
+ * USE_SLOW_SHA256: Reduces code size by not partially unrolling
+ (~2KB smaller and ~25% slower) (default OFF)
+ * WOLFSSL_SHA256_BY_SPEC: Uses the Ch/Maj based on SHA256 specification
+ (default ON)
+ * WOLFSSL_SHA256_ALT_CH_MAJ: Alternate Ch/Maj that is easier for compilers to
+ optimize and recognize as SHA256 (default OFF)
+ * SHA256_MANY_REGISTERS: A SHA256 version that keeps all data in registers
+ and partial unrolled (default OFF)
+ */
-int wc_InitSha256(Sha256* sha)
-{
- return InitSha256_fips(sha);
-}
+/* Default SHA256 to use Ch/Maj based on specification */
+#if !defined(WOLFSSL_SHA256_BY_SPEC) && !defined(WOLFSSL_SHA256_ALT_CH_MAJ)
+ #define WOLFSSL_SHA256_BY_SPEC
+#endif
-int wc_Sha256Update(Sha256* sha, const byte* data, word32 len)
-{
- return Sha256Update_fips(sha, data, len);
-}
+#if !defined(NO_SHA256) && !defined(WOLFSSL_ARMASM)
+#if defined(HAVE_FIPS) && \
+ defined(HAVE_FIPS_VERSION) && (HAVE_FIPS_VERSION >= 2)
-int wc_Sha256Final(Sha256* sha, byte* out)
-{
- return Sha256Final_fips(sha, out);
-}
+ /* set NO_WRAPPERS before headers, use direct internal f()s not wrappers */
+ #define FIPS_NO_WRAPPERS
+ #ifdef USE_WINDOWS_API
+ #pragma code_seg(".fipsA$d")
+ #pragma const_seg(".fipsB$d")
+ #endif
+#endif
-int wc_Sha256Hash(const byte* data, word32 len, byte* out)
-{
- return Sha256Hash(data, len, out);
-}
+#include <wolfssl/wolfcrypt/sha256.h>
+#include <wolfssl/wolfcrypt/error-crypt.h>
+#include <wolfssl/wolfcrypt/cpuid.h>
+#include <wolfssl/wolfcrypt/hash.h>
+
+#ifdef WOLF_CRYPTO_CB
+ #include <wolfssl/wolfcrypt/cryptocb.h>
+#endif
+
+/* fips wrapper calls, user can call direct */
+#if defined(HAVE_FIPS) && \
+ (!defined(HAVE_FIPS_VERSION) || (HAVE_FIPS_VERSION < 2))
+
+ int wc_InitSha256(wc_Sha256* sha)
+ {
+ if (sha == NULL) {
+ return BAD_FUNC_ARG;
+ }
+ return InitSha256_fips(sha);
+ }
+ int wc_InitSha256_ex(wc_Sha256* sha, void* heap, int devId)
+ {
+ (void)heap;
+ (void)devId;
+ if (sha == NULL) {
+ return BAD_FUNC_ARG;
+ }
+ return InitSha256_fips(sha);
+ }
+ int wc_Sha256Update(wc_Sha256* sha, const byte* data, word32 len)
+ {
+ if (sha == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
+
+ if (data == NULL && len == 0) {
+ /* valid, but do nothing */
+ return 0;
+ }
+
+ return Sha256Update_fips(sha, data, len);
+ }
+ int wc_Sha256Final(wc_Sha256* sha, byte* out)
+ {
+ if (sha == NULL || out == NULL) {
+ return BAD_FUNC_ARG;
+ }
+ return Sha256Final_fips(sha, out);
+ }
+ void wc_Sha256Free(wc_Sha256* sha)
+ {
+ (void)sha;
+ /* Not supported in FIPS */
+ }
-#else /* else build without fips */
+#else /* else build without fips, or for FIPS v2 */
-#if !defined(NO_SHA256) && defined(WOLFSSL_TI_HASH)
+
+#if defined(WOLFSSL_TI_HASH)
/* #include <wolfcrypt/src/port/ti/ti-hash.c> included by wc_port.c */
+#elif defined(WOLFSSL_CRYPTOCELL)
+ /* wc_port.c includes wolfcrypt/src/port/arm/cryptoCellHash.c */
#else
-#if !defined (ALIGN32)
- #if defined (__GNUC__)
- #define ALIGN32 __attribute__ ( (aligned (32)))
- #elif defined(_MSC_VER)
- /* disable align warning, we want alignment ! */
- #pragma warning(disable: 4324)
- #define ALIGN32 __declspec (align (32))
- #else
- #define ALIGN32
- #endif
-#endif
+#include <wolfssl/wolfcrypt/logging.h>
-#ifdef WOLFSSL_PIC32MZ_HASH
-#define wc_InitSha256 wc_InitSha256_sw
-#define wc_Sha256Update wc_Sha256Update_sw
-#define wc_Sha256Final wc_Sha256Final_sw
+#ifdef NO_INLINE
+ #include <wolfssl/wolfcrypt/misc.h>
+#else
+ #define WOLFSSL_MISC_INCLUDED
+ #include <wolfcrypt/src/misc.c>
#endif
-#ifdef HAVE_FIPS
- /* set NO_WRAPPERS before headers, use direct internal f()s not wrappers */
- #define FIPS_NO_WRAPPERS
+#ifdef WOLFSSL_DEVCRYPTO_HASH
+ #include <wolfssl/wolfcrypt/port/devcrypto/wc_devcrypto.h>
#endif
+
+
#if defined(USE_INTEL_SPEEDUP)
-#define HAVE_INTEL_AVX1
-#define HAVE_INTEL_AVX2
+ #if defined(__GNUC__) && ((__GNUC__ < 4) || \
+ (__GNUC__ == 4 && __GNUC_MINOR__ <= 8))
+ #undef NO_AVX2_SUPPORT
+ #define NO_AVX2_SUPPORT
+ #endif
+ #if defined(__clang__) && ((__clang_major__ < 3) || \
+ (__clang_major__ == 3 && __clang_minor__ <= 5))
+ #define NO_AVX2_SUPPORT
+ #elif defined(__clang__) && defined(NO_AVX2_SUPPORT)
+ #undef NO_AVX2_SUPPORT
+ #endif
-#if defined(DEBUG_XMM)
-#include "stdio.h"
+ #define HAVE_INTEL_AVX1
+ #ifndef NO_AVX2_SUPPORT
+ #define HAVE_INTEL_AVX2
+ #endif
+#endif /* USE_INTEL_SPEEDUP */
+
+#if defined(HAVE_INTEL_AVX2)
+ #define HAVE_INTEL_RORX
#endif
+
+#if !defined(WOLFSSL_PIC32MZ_HASH) && !defined(STM32_HASH_SHA2) && \
+ (!defined(WOLFSSL_IMX6_CAAM) || defined(NO_IMX6_CAAM_HASH)) && \
+ !defined(WOLFSSL_AFALG_HASH) && !defined(WOLFSSL_DEVCRYPTO_HASH) && \
+ (!defined(WOLFSSL_ESP32WROOM32_CRYPT) || defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)) && \
+ (!defined(WOLFSSL_RENESAS_TSIP_CRYPT) || defined(NO_WOLFSSL_RENESAS_TSIP_HASH))
+
+static int InitSha256(wc_Sha256* sha256)
+{
+ int ret = 0;
+
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ XMEMSET(sha256->digest, 0, sizeof(sha256->digest));
+ sha256->digest[0] = 0x6A09E667L;
+ sha256->digest[1] = 0xBB67AE85L;
+ sha256->digest[2] = 0x3C6EF372L;
+ sha256->digest[3] = 0xA54FF53AL;
+ sha256->digest[4] = 0x510E527FL;
+ sha256->digest[5] = 0x9B05688CL;
+ sha256->digest[6] = 0x1F83D9ABL;
+ sha256->digest[7] = 0x5BE0CD19L;
+
+ sha256->buffLen = 0;
+ sha256->loLen = 0;
+ sha256->hiLen = 0;
+#if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ sha256->flags = 0;
#endif
-#if defined(HAVE_INTEL_AVX2)
-#define HAVE_INTEL_RORX
+ return ret;
+}
#endif
-
-/*****
-Intel AVX1/AVX2 Macro Control Structure
-#define HAVE_INTEL_AVX1
-#define HAVE_INTEL_AVX2
+/* Hardware Acceleration */
+#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
-#define HAVE_INTEL_RORX
+ /* in case intel instructions aren't available, plus we need the K[] global */
+ #define NEED_SOFT_SHA256
+ /*****
+ Intel AVX1/AVX2 Macro Control Structure
-int InitSha256(Sha256* sha256) {
- Save/Recover XMM, YMM
- ...
-}
+ #define HAVE_INTEL_AVX1
+ #define HAVE_INTEL_AVX2
-#if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
- Transform() ; Function prototype
-#else
- Transform() { }
- int Sha256Final() {
- Save/Recover XMM, YMM
- ...
- }
-#endif
+ #define HAVE_INTEL_RORX
-#if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
- #if defined(HAVE_INTEL_RORX
- #define RND with rorx instuction
+
+ int InitSha256(wc_Sha256* sha256) {
+ Save/Recover XMM, YMM
+ ...
+ }
+
+ #if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
+ Transform_Sha256(); Function prototype
#else
- #define RND
+ Transform_Sha256() { }
+ int Sha256Final() {
+ Save/Recover XMM, YMM
+ ...
+ }
#endif
-#endif
-#if defined(HAVE_INTEL_AVX1)
-
- #define XMM Instructions/inline asm
-
- int Transform() {
- Stitched Message Sched/Round
- }
-
-#elif defined(HAVE_INTEL_AVX2)
-
- #define YMM Instructions/inline asm
-
- int Transform() {
- More granural Stitched Message Sched/Round
- }
-
-*/
+ #if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
+ #if defined(HAVE_INTEL_RORX
+ #define RND with rorx instruction
+ #else
+ #define RND
+ #endif
+ #endif
+ #if defined(HAVE_INTEL_AVX1)
-#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ #define XMM Instructions/inline asm
-/* Each platform needs to query info type 1 from cpuid to see if aesni is
- * supported. Also, let's setup a macro for proper linkage w/o ABI conflicts
- */
+ int Transform_Sha256() {
+ Stitched Message Sched/Round
+ }
-#ifndef _MSC_VER
- #define cpuid(reg, leaf, sub)\
- __asm__ __volatile__ ("cpuid":\
- "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) :\
- "a" (leaf), "c"(sub));
+ #elif defined(HAVE_INTEL_AVX2)
- #define XASM_LINK(f) asm(f)
-#else
+ #define YMM Instructions/inline asm
- #include <intrin.h>
- #define cpuid(a,b) __cpuid((int*)a,b)
-
- #define XASM_LINK(f)
-
-#endif /* _MSC_VER */
-
-#define EAX 0
-#define EBX 1
-#define ECX 2
-#define EDX 3
-
-#define CPUID_AVX1 0x1
-#define CPUID_AVX2 0x2
-#define CPUID_RDRAND 0x4
-#define CPUID_RDSEED 0x8
-#define CPUID_BMI2 0x10 /* MULX, RORX */
-
-#define IS_INTEL_AVX1 (cpuid_flags&CPUID_AVX1)
-#define IS_INTEL_AVX2 (cpuid_flags&CPUID_AVX2)
-#define IS_INTEL_BMI2 (cpuid_flags&CPUID_BMI2)
-#define IS_INTEL_RDRAND (cpuid_flags&CPUID_RDRAND)
-#define IS_INTEL_RDSEED (cpuid_flags&CPUID_RDSEED)
-
-static word32 cpuid_check = 0 ;
-static word32 cpuid_flags = 0 ;
-
-static word32 cpuid_flag(word32 leaf, word32 sub, word32 num, word32 bit) {
- int got_intel_cpu=0;
- unsigned int reg[5];
-
- reg[4] = '\0' ;
- cpuid(reg, 0, 0);
- if(memcmp((char *)&(reg[EBX]), "Genu", 4) == 0 &&
- memcmp((char *)&(reg[EDX]), "ineI", 4) == 0 &&
- memcmp((char *)&(reg[ECX]), "ntel", 4) == 0) {
- got_intel_cpu = 1;
- }
- if (got_intel_cpu) {
- cpuid(reg, leaf, sub);
- return((reg[num]>>bit)&0x1) ;
- }
- return 0 ;
-}
+ int Transform_Sha256() {
+ More granular Stitched Message Sched/Round
+ }
-static int set_cpuid_flags(void) {
- if(cpuid_check==0) {
- if(cpuid_flag(1, 0, ECX, 28)){ cpuid_flags |= CPUID_AVX1 ;}
- if(cpuid_flag(7, 0, EBX, 5)){ cpuid_flags |= CPUID_AVX2 ; }
- if(cpuid_flag(7, 0, EBX, 8)) { cpuid_flags |= CPUID_BMI2 ; }
- if(cpuid_flag(1, 0, ECX, 30)){ cpuid_flags |= CPUID_RDRAND ; }
- if(cpuid_flag(7, 0, EBX, 18)){ cpuid_flags |= CPUID_RDSEED ; }
- cpuid_check = 1 ;
- return 0 ;
- }
- return 1 ;
-}
+ #endif
+ */
-/* #if defined(HAVE_INTEL_AVX1/2) at the tail of sha512 */
-static int Transform(Sha256* sha256);
+ /* Each platform needs to query info type 1 from cpuid to see if aesni is
+ * supported. Also, let's setup a macro for proper linkage w/o ABI conflicts
+ */
-#if defined(HAVE_INTEL_AVX1)
-static int Transform_AVX1(Sha256 *sha256) ;
+ /* #if defined(HAVE_INTEL_AVX1/2) at the tail of sha256 */
+ static int Transform_Sha256(wc_Sha256* sha256, const byte* data);
+
+#ifdef __cplusplus
+ extern "C" {
#endif
-#if defined(HAVE_INTEL_AVX2)
-static int Transform_AVX2(Sha256 *sha256) ;
-static int Transform_AVX1_RORX(Sha256 *sha256) ;
+
+ #if defined(HAVE_INTEL_AVX1)
+ extern int Transform_Sha256_AVX1(wc_Sha256 *sha256, const byte* data);
+ extern int Transform_Sha256_AVX1_Len(wc_Sha256* sha256,
+ const byte* data, word32 len);
+ #endif
+ #if defined(HAVE_INTEL_AVX2)
+ extern int Transform_Sha256_AVX2(wc_Sha256 *sha256, const byte* data);
+ extern int Transform_Sha256_AVX2_Len(wc_Sha256* sha256,
+ const byte* data, word32 len);
+ #ifdef HAVE_INTEL_RORX
+ extern int Transform_Sha256_AVX1_RORX(wc_Sha256 *sha256, const byte* data);
+ extern int Transform_Sha256_AVX1_RORX_Len(wc_Sha256* sha256,
+ const byte* data, word32 len);
+ extern int Transform_Sha256_AVX2_RORX(wc_Sha256 *sha256, const byte* data);
+ extern int Transform_Sha256_AVX2_RORX_Len(wc_Sha256* sha256,
+ const byte* data, word32 len);
+ #endif /* HAVE_INTEL_RORX */
+ #endif /* HAVE_INTEL_AVX2 */
+
+#ifdef __cplusplus
+ } /* extern "C" */
#endif
-static int (*Transform_p)(Sha256* sha256) /* = _Transform */;
+ static int (*Transform_Sha256_p)(wc_Sha256* sha256, const byte* data);
+ /* = _Transform_Sha256 */
+ static int (*Transform_Sha256_Len_p)(wc_Sha256* sha256, const byte* data,
+ word32 len);
+ /* = NULL */
+ static int transform_check = 0;
+ static word32 intel_flags;
-#define XTRANSFORM(sha256, B) (*Transform_p)(sha256)
+ #define XTRANSFORM(S, D) (*Transform_Sha256_p)((S),(D))
+ #define XTRANSFORM_LEN(S, D, L) (*Transform_Sha256_Len_p)((S),(D),(L))
-static void set_Transform(void) {
- if(set_cpuid_flags())return ;
+ static void Sha256_SetTransform(void)
+ {
-#if defined(HAVE_INTEL_AVX2)
- if(IS_INTEL_AVX2 && IS_INTEL_BMI2){
- Transform_p = Transform_AVX1_RORX; return ;
- Transform_p = Transform_AVX2 ;
- /* for avoiding warning,"not used" */
- }
-#endif
-#if defined(HAVE_INTEL_AVX1)
- Transform_p = ((IS_INTEL_AVX1) ? Transform_AVX1 : Transform) ; return ;
-#endif
- Transform_p = Transform ; return ;
-}
+ if (transform_check)
+ return;
-#else
- #if defined(FREESCALE_MMCAU)
- #define XTRANSFORM(sha256, B) Transform(sha256, B)
- #else
- #define XTRANSFORM(sha256, B) Transform(sha256)
- #endif
-#endif
+ intel_flags = cpuid_get_flags();
-/* Dummy for saving MM_REGs on behalf of Transform */
-#if defined(HAVE_INTEL_AVX2)&& !defined(HAVE_INTEL_AVX1)
-#define SAVE_XMM_YMM __asm__ volatile("or %%r8d, %%r8d":::\
- "%ymm4","%ymm5","%ymm6","%ymm7","%ymm8","%ymm9","%ymm10","%ymm11","%ymm12","%ymm13","%ymm14","%ymm15")
-#elif defined(HAVE_INTEL_AVX1)
-#define SAVE_XMM_YMM __asm__ volatile("or %%r8d, %%r8d":::\
- "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10",\
- "xmm11","xmm12","xmm13","xmm14","xmm15")
-#else
-#define SAVE_XMM_YMM
-#endif
+ #ifdef HAVE_INTEL_AVX2
+ if (1 && IS_INTEL_AVX2(intel_flags)) {
+ #ifdef HAVE_INTEL_RORX
+ if (IS_INTEL_BMI2(intel_flags)) {
+ Transform_Sha256_p = Transform_Sha256_AVX2_RORX;
+ Transform_Sha256_Len_p = Transform_Sha256_AVX2_RORX_Len;
+ }
+ else
+ #endif
+ if (1)
+ {
+ Transform_Sha256_p = Transform_Sha256_AVX2;
+ Transform_Sha256_Len_p = Transform_Sha256_AVX2_Len;
+ }
+ #ifdef HAVE_INTEL_RORX
+ else {
+ Transform_Sha256_p = Transform_Sha256_AVX1_RORX;
+ Transform_Sha256_Len_p = Transform_Sha256_AVX1_RORX_Len;
+ }
+ #endif
+ }
+ else
+ #endif
+ #ifdef HAVE_INTEL_AVX1
+ if (IS_INTEL_AVX1(intel_flags)) {
+ Transform_Sha256_p = Transform_Sha256_AVX1;
+ Transform_Sha256_Len_p = Transform_Sha256_AVX1_Len;
+ }
+ else
+ #endif
+ {
+ Transform_Sha256_p = Transform_Sha256;
+ Transform_Sha256_Len_p = NULL;
+ }
-#ifdef WOLFSSL_PIC32MZ_HASH
-#define InitSha256 InitSha256_sw
-#define Sha256Update Sha256Update_sw
-#define Sha256Final Sha256Final_sw
-#endif
+ transform_check = 1;
+ }
-#include <wolfssl/wolfcrypt/logging.h>
-#include <wolfssl/wolfcrypt/error-crypt.h>
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
-#ifdef NO_INLINE
- #include <wolfssl/wolfcrypt/misc.h>
-#else
- #include <wolfcrypt/src/misc.c>
-#endif
+ sha256->heap = heap;
+ #ifdef WOLF_CRYPTO_CB
+ sha256->devId = devId;
+ #endif
-#ifdef FREESCALE_MMCAU
- #include "cau_api.h"
-#endif
+ ret = InitSha256(sha256);
+ if (ret != 0)
+ return ret;
+
+ /* choose best Transform function under this runtime environment */
+ Sha256_SetTransform();
+
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ ret = wolfAsync_DevCtxInit(&sha256->asyncDev,
+ WOLFSSL_ASYNC_MARKER_SHA256, sha256->heap, devId);
+ #else
+ (void)devId;
+ #endif /* WOLFSSL_ASYNC_CRYPT */
-#ifndef WOLFSSL_HAVE_MIN
-#define WOLFSSL_HAVE_MIN
+ return ret;
+ }
- static INLINE word32 min(word32 a, word32 b)
+#elif defined(FREESCALE_LTC_SHA)
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
{
- return a > b ? b : a;
+ (void)heap;
+ (void)devId;
+
+ LTC_HASH_Init(LTC_BASE, &sha256->ctx, kLTC_Sha256, NULL, 0);
+
+ return 0;
}
-#endif /* WOLFSSL_HAVE_MIN */
+#elif defined(FREESCALE_MMCAU_SHA)
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
+ #include "cau_api.h"
+ #else
+ #include "fsl_mmcau.h"
+ #endif
-int wc_InitSha256(Sha256* sha256)
-{
- #ifdef FREESCALE_MMCAU
+ #define XTRANSFORM(S, D) Transform_Sha256((S),(D))
+ #define XTRANSFORM_LEN(S, D, L) Transform_Sha256_Len((S),(D),(L))
+
+ #ifndef WC_HASH_DATA_ALIGNMENT
+ /* these hardware API's require 4 byte (word32) alignment */
+ #define WC_HASH_DATA_ALIGNMENT 4
+ #endif
+
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
+
+ (void)heap;
+ (void)devId;
+
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret != 0) {
+ return ret;
+ }
+
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
cau_sha256_initialize_output(sha256->digest);
#else
+ MMCAU_SHA256_InitializeOutput((uint32_t*)sha256->digest);
+ #endif
+ wolfSSL_CryptHwMutexUnLock();
+
+ sha256->buffLen = 0;
+ sha256->loLen = 0;
+ sha256->hiLen = 0;
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ sha256->W = NULL;
+ #endif
+
+ return ret;
+ }
+
+ static int Transform_Sha256(wc_Sha256* sha256, const byte* data)
+ {
+ int ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
+ cau_sha256_hash_n((byte*)data, 1, sha256->digest);
+ #else
+ MMCAU_SHA256_HashN((byte*)data, 1, sha256->digest);
+ #endif
+ wolfSSL_CryptHwMutexUnLock();
+ }
+ return ret;
+ }
+
+ static int Transform_Sha256_Len(wc_Sha256* sha256, const byte* data,
+ word32 len)
+ {
+ int ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ #if defined(WC_HASH_DATA_ALIGNMENT) && WC_HASH_DATA_ALIGNMENT > 0
+ if ((size_t)data % WC_HASH_DATA_ALIGNMENT) {
+ /* data pointer is NOT aligned,
+ * so copy and perform one block at a time */
+ byte* local = (byte*)sha256->buffer;
+ while (len >= WC_SHA256_BLOCK_SIZE) {
+ XMEMCPY(local, data, WC_SHA256_BLOCK_SIZE);
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
+ cau_sha256_hash_n(local, 1, sha256->digest);
+ #else
+ MMCAU_SHA256_HashN(local, 1, sha256->digest);
+ #endif
+ data += WC_SHA256_BLOCK_SIZE;
+ len -= WC_SHA256_BLOCK_SIZE;
+ }
+ }
+ else
+ #endif
+ {
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
+ cau_sha256_hash_n((byte*)data, len/WC_SHA256_BLOCK_SIZE,
+ sha256->digest);
+ #else
+ MMCAU_SHA256_HashN((byte*)data, len/WC_SHA256_BLOCK_SIZE,
+ sha256->digest);
+ #endif
+ }
+ wolfSSL_CryptHwMutexUnLock();
+ }
+ return ret;
+ }
+
+#elif defined(WOLFSSL_PIC32MZ_HASH)
+ #include <wolfssl/wolfcrypt/port/pic32/pic32mz-crypt.h>
+
+#elif defined(STM32_HASH_SHA2)
+
+ /* Supports CubeMX HAL or Standard Peripheral Library */
+
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ (void)devId;
+ (void)heap;
+
+ wc_Stm32_Hash_Init(&sha256->stmCtx);
+ return 0;
+ }
+
+ int wc_Sha256Update(wc_Sha256* sha256, const byte* data, word32 len)
+ {
+ int ret = 0;
+
+ if (sha256 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
+
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ ret = wc_Stm32_Hash_Update(&sha256->stmCtx,
+ HASH_AlgoSelection_SHA256, data, len);
+ wolfSSL_CryptHwMutexUnLock();
+ }
+ return ret;
+ }
+
+ int wc_Sha256Final(wc_Sha256* sha256, byte* hash)
+ {
+ int ret = 0;
+
+ if (sha256 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
+
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ ret = wc_Stm32_Hash_Final(&sha256->stmCtx,
+ HASH_AlgoSelection_SHA256, hash, WC_SHA256_DIGEST_SIZE);
+ wolfSSL_CryptHwMutexUnLock();
+ }
+
+ (void)wc_InitSha256(sha256); /* reset state */
+
+ return ret;
+ }
+
+#elif defined(WOLFSSL_IMX6_CAAM) && !defined(NO_IMX6_CAAM_HASH)
+ /* functions defined in wolfcrypt/src/port/caam/caam_sha256.c */
+
+#elif defined(WOLFSSL_AFALG_HASH)
+ /* implemented in wolfcrypt/src/port/af_alg/afalg_hash.c */
+
+#elif defined(WOLFSSL_DEVCRYPTO_HASH)
+ /* implemented in wolfcrypt/src/port/devcrypto/devcrypt_hash.c */
+
+#elif defined(WOLFSSL_SCE) && !defined(WOLFSSL_SCE_NO_HASH)
+ #include "hal_data.h"
+
+ #ifndef WOLFSSL_SCE_SHA256_HANDLE
+ #define WOLFSSL_SCE_SHA256_HANDLE g_sce_hash_0
+ #endif
+
+ #define WC_SHA256_DIGEST_WORD_SIZE 16
+ #define XTRANSFORM(S, D) wc_Sha256SCE_XTRANSFORM((S), (D))
+ static int wc_Sha256SCE_XTRANSFORM(wc_Sha256* sha256, const byte* data)
+ {
+ if (WOLFSSL_SCE_GSCE_HANDLE.p_cfg->endian_flag ==
+ CRYPTO_WORD_ENDIAN_LITTLE)
+ {
+ ByteReverseWords((word32*)data, (word32*)data,
+ WC_SHA256_BLOCK_SIZE);
+ ByteReverseWords(sha256->digest, sha256->digest,
+ WC_SHA256_DIGEST_SIZE);
+ }
+
+ if (WOLFSSL_SCE_SHA256_HANDLE.p_api->hashUpdate(
+ WOLFSSL_SCE_SHA256_HANDLE.p_ctrl, (word32*)data,
+ WC_SHA256_DIGEST_WORD_SIZE, sha256->digest) != SSP_SUCCESS){
+ WOLFSSL_MSG("Unexpected hardware return value");
+ return WC_HW_E;
+ }
+
+ if (WOLFSSL_SCE_GSCE_HANDLE.p_cfg->endian_flag ==
+ CRYPTO_WORD_ENDIAN_LITTLE)
+ {
+ ByteReverseWords((word32*)data, (word32*)data,
+ WC_SHA256_BLOCK_SIZE);
+ ByteReverseWords(sha256->digest, sha256->digest,
+ WC_SHA256_DIGEST_SIZE);
+ }
+
+ return 0;
+ }
+
+
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ sha256->heap = heap;
+
+ ret = InitSha256(sha256);
+ if (ret != 0)
+ return ret;
+
+ (void)devId;
+
+ return ret;
+ }
+
+#elif defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+
+ #define NEED_SOFT_SHA256
+
+ static int InitSha256(wc_Sha256* sha256)
+ {
+ int ret = 0;
+
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ XMEMSET(sha256->digest, 0, sizeof(sha256->digest));
sha256->digest[0] = 0x6A09E667L;
sha256->digest[1] = 0xBB67AE85L;
sha256->digest[2] = 0x3C6EF372L;
@@ -317,1450 +622,1023 @@ int wc_InitSha256(Sha256* sha256)
sha256->digest[5] = 0x9B05688CL;
sha256->digest[6] = 0x1F83D9ABL;
sha256->digest[7] = 0x5BE0CD19L;
- #endif
- sha256->buffLen = 0;
- sha256->loLen = 0;
- sha256->hiLen = 0;
-
-#if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
- set_Transform() ; /* choose best Transform function under this runtime environment */
-#endif
-
- return 0;
-}
+ sha256->buffLen = 0;
+ sha256->loLen = 0;
+ sha256->hiLen = 0;
+
+ /* always start firstblock = 1 when using hw engine */
+ sha256->ctx.isfirstblock = 1;
+ sha256->ctx.sha_type = SHA2_256;
+ if(sha256->ctx.mode == ESP32_SHA_HW) {
+ /* release hw */
+ esp_sha_hw_unlock();
+ }
+ /* always set mode as INIT
+ * whether using HW or SW is determined at first call of update()
+ */
+ sha256->ctx.mode = ESP32_SHA_INIT;
+ return ret;
+ }
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
-#if !defined(FREESCALE_MMCAU)
-static const ALIGN32 word32 K[64] = {
- 0x428A2F98L, 0x71374491L, 0xB5C0FBCFL, 0xE9B5DBA5L, 0x3956C25BL,
- 0x59F111F1L, 0x923F82A4L, 0xAB1C5ED5L, 0xD807AA98L, 0x12835B01L,
- 0x243185BEL, 0x550C7DC3L, 0x72BE5D74L, 0x80DEB1FEL, 0x9BDC06A7L,
- 0xC19BF174L, 0xE49B69C1L, 0xEFBE4786L, 0x0FC19DC6L, 0x240CA1CCL,
- 0x2DE92C6FL, 0x4A7484AAL, 0x5CB0A9DCL, 0x76F988DAL, 0x983E5152L,
- 0xA831C66DL, 0xB00327C8L, 0xBF597FC7L, 0xC6E00BF3L, 0xD5A79147L,
- 0x06CA6351L, 0x14292967L, 0x27B70A85L, 0x2E1B2138L, 0x4D2C6DFCL,
- 0x53380D13L, 0x650A7354L, 0x766A0ABBL, 0x81C2C92EL, 0x92722C85L,
- 0xA2BFE8A1L, 0xA81A664BL, 0xC24B8B70L, 0xC76C51A3L, 0xD192E819L,
- 0xD6990624L, 0xF40E3585L, 0x106AA070L, 0x19A4C116L, 0x1E376C08L,
- 0x2748774CL, 0x34B0BCB5L, 0x391C0CB3L, 0x4ED8AA4AL, 0x5B9CCA4FL,
- 0x682E6FF3L, 0x748F82EEL, 0x78A5636FL, 0x84C87814L, 0x8CC70208L,
- 0x90BEFFFAL, 0xA4506CEBL, 0xBEF9A3F7L, 0xC67178F2L
-};
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
-#endif
+ XMEMSET(sha256, 0, sizeof(wc_Sha256));
+ sha256->ctx.mode = ESP32_SHA_INIT;
+ sha256->ctx.isfirstblock = 1;
+ (void)devId;
-#if defined(FREESCALE_MMCAU)
+ ret = InitSha256(sha256);
-static int Transform(Sha256* sha256, byte* buf)
-{
- cau_sha256_hash_n(buf, 1, sha256->digest);
+ return ret;
+ }
- return 0;
-}
+#elif defined(WOLFSSL_RENESAS_TSIP_CRYPT) && \
+ !defined(NO_WOLFSSL_RENESAS_TSIP_CRYPT_HASH)
-#endif /* FREESCALE_MMCAU */
+ /* implemented in wolfcrypt/src/port/Renesas/renesas_tsip_sha.c */
-#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
-#define Maj(x,y,z) ((((x) | (y)) & (z)) | ((x) & (y)))
-#define R(x, n) (((x)&0xFFFFFFFFU)>>(n))
+#else
+ #define NEED_SOFT_SHA256
-#define S(x, n) rotrFixed(x, n)
-#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
-#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
-#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
-#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ sha256->heap = heap;
+ #ifdef WOLF_CRYPTO_CB
+ sha256->devId = devId;
+ sha256->devCtx = NULL;
+ #endif
-#define RND(a,b,c,d,e,f,g,h,i) \
- t0 = (h) + Sigma1((e)) + Ch((e), (f), (g)) + K[(i)] + W[(i)]; \
- t1 = Sigma0((a)) + Maj((a), (b), (c)); \
- (d) += t0; \
- (h) = t0 + t1;
+ ret = InitSha256(sha256);
+ if (ret != 0)
+ return ret;
-#if !defined(FREESCALE_MMCAU)
-static int Transform(Sha256* sha256)
-{
- word32 S[8], t0, t1;
- int i;
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ sha256->W = NULL;
+ #endif
-#ifdef WOLFSSL_SMALL_STACK
- word32* W;
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ ret = wolfAsync_DevCtxInit(&sha256->asyncDev,
+ WOLFSSL_ASYNC_MARKER_SHA256, sha256->heap, devId);
+ #else
+ (void)devId;
+ #endif /* WOLFSSL_ASYNC_CRYPT */
- W = (word32*) XMALLOC(sizeof(word32) * 64, NULL, DYNAMIC_TYPE_TMP_BUFFER);
- if (W == NULL)
- return MEMORY_E;
+ return ret;
+ }
+#endif /* End Hardware Acceleration */
+
+#ifdef NEED_SOFT_SHA256
+
+ static const ALIGN32 word32 K[64] = {
+ 0x428A2F98L, 0x71374491L, 0xB5C0FBCFL, 0xE9B5DBA5L, 0x3956C25BL,
+ 0x59F111F1L, 0x923F82A4L, 0xAB1C5ED5L, 0xD807AA98L, 0x12835B01L,
+ 0x243185BEL, 0x550C7DC3L, 0x72BE5D74L, 0x80DEB1FEL, 0x9BDC06A7L,
+ 0xC19BF174L, 0xE49B69C1L, 0xEFBE4786L, 0x0FC19DC6L, 0x240CA1CCL,
+ 0x2DE92C6FL, 0x4A7484AAL, 0x5CB0A9DCL, 0x76F988DAL, 0x983E5152L,
+ 0xA831C66DL, 0xB00327C8L, 0xBF597FC7L, 0xC6E00BF3L, 0xD5A79147L,
+ 0x06CA6351L, 0x14292967L, 0x27B70A85L, 0x2E1B2138L, 0x4D2C6DFCL,
+ 0x53380D13L, 0x650A7354L, 0x766A0ABBL, 0x81C2C92EL, 0x92722C85L,
+ 0xA2BFE8A1L, 0xA81A664BL, 0xC24B8B70L, 0xC76C51A3L, 0xD192E819L,
+ 0xD6990624L, 0xF40E3585L, 0x106AA070L, 0x19A4C116L, 0x1E376C08L,
+ 0x2748774CL, 0x34B0BCB5L, 0x391C0CB3L, 0x4ED8AA4AL, 0x5B9CCA4FL,
+ 0x682E6FF3L, 0x748F82EEL, 0x78A5636FL, 0x84C87814L, 0x8CC70208L,
+ 0x90BEFFFAL, 0xA4506CEBL, 0xBEF9A3F7L, 0xC67178F2L
+ };
+
+/* Both versions of Ch and Maj are logically the same, but with the second set
+ the compilers can recognize them better for optimization */
+#ifdef WOLFSSL_SHA256_BY_SPEC
+ /* SHA256 math based on specification */
+ #define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
+ #define Maj(x,y,z) ((((x) | (y)) & (z)) | ((x) & (y)))
#else
- word32 W[64];
+ /* SHA256 math reworked for easier compiler optimization */
+ #define Ch(x,y,z) ((((y) ^ (z)) & (x)) ^ (z))
+ #define Maj(x,y,z) ((((x) ^ (y)) & ((y) ^ (z))) ^ (y))
#endif
+ #define R(x, n) (((x) & 0xFFFFFFFFU) >> (n))
+
+ #define S(x, n) rotrFixed(x, n)
+ #define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
+ #define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
+ #define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
+ #define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+
+ #define a(i) S[(0-i) & 7]
+ #define b(i) S[(1-i) & 7]
+ #define c(i) S[(2-i) & 7]
+ #define d(i) S[(3-i) & 7]
+ #define e(i) S[(4-i) & 7]
+ #define f(i) S[(5-i) & 7]
+ #define g(i) S[(6-i) & 7]
+ #define h(i) S[(7-i) & 7]
+
+ #ifndef XTRANSFORM
+ #define XTRANSFORM(S, D) Transform_Sha256((S),(D))
+ #endif
- /* Copy context->state[] to working vars */
- for (i = 0; i < 8; i++)
- S[i] = sha256->digest[i];
-
- for (i = 0; i < 16; i++)
- W[i] = sha256->buffer[i];
-
- for (i = 16; i < 64; i++)
- W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15]) + W[i-16];
-
- for (i = 0; i < 64; i += 8) {
- RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],i+0);
- RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],i+1);
- RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],i+2);
- RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],i+3);
- RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],i+4);
- RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],i+5);
- RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],i+6);
- RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],i+7);
- }
+#ifndef SHA256_MANY_REGISTERS
+ #define RND(j) \
+ t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + W[i+j]; \
+ t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \
+ d(j) += t0; \
+ h(j) = t0 + t1
+
+ static int Transform_Sha256(wc_Sha256* sha256, const byte* data)
+ {
+ word32 S[8], t0, t1;
+ int i;
+
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ word32* W = sha256->W;
+ if (W == NULL) {
+ W = (word32*)XMALLOC(sizeof(word32) * WC_SHA256_BLOCK_SIZE, NULL,
+ DYNAMIC_TYPE_DIGEST);
+ if (W == NULL)
+ return MEMORY_E;
+ sha256->W = W;
+ }
+ #elif defined(WOLFSSL_SMALL_STACK)
+ word32* W;
+ W = (word32*)XMALLOC(sizeof(word32) * WC_SHA256_BLOCK_SIZE, NULL,
+ DYNAMIC_TYPE_TMP_BUFFER);
+ if (W == NULL)
+ return MEMORY_E;
+ #else
+ word32 W[WC_SHA256_BLOCK_SIZE];
+ #endif
+
+ /* Copy context->state[] to working vars */
+ for (i = 0; i < 8; i++)
+ S[i] = sha256->digest[i];
+
+ for (i = 0; i < 16; i++)
+ W[i] = *((word32*)&data[i*sizeof(word32)]);
+
+ for (i = 16; i < WC_SHA256_BLOCK_SIZE; i++)
+ W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15]) + W[i-16];
+
+ #ifdef USE_SLOW_SHA256
+ /* not unrolled - ~2k smaller and ~25% slower */
+ for (i = 0; i < WC_SHA256_BLOCK_SIZE; i += 8) {
+ int j;
+ for (j = 0; j < 8; j++) { /* braces needed here for macros {} */
+ RND(j);
+ }
+ }
+ #else
+ /* partially loop unrolled */
+ for (i = 0; i < WC_SHA256_BLOCK_SIZE; i += 8) {
+ RND(0); RND(1); RND(2); RND(3);
+ RND(4); RND(5); RND(6); RND(7);
+ }
+ #endif /* USE_SLOW_SHA256 */
- /* Add the working vars back into digest state[] */
- for (i = 0; i < 8; i++) {
- sha256->digest[i] += S[i];
+ /* Add the working vars back into digest state[] */
+ for (i = 0; i < 8; i++) {
+ sha256->digest[i] += S[i];
+ }
+
+ #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SMALL_STACK_CACHE)
+ XFREE(W, NULL, DYNAMIC_TYPE_TMP_BUFFER);
+ #endif
+ return 0;
}
+#else
+ /* SHA256 version that keeps all data in registers */
+ #define SCHED1(j) (W[j] = *((word32*)&data[j*sizeof(word32)]))
+ #define SCHED(j) ( \
+ W[ j & 15] += \
+ Gamma1(W[(j-2) & 15])+ \
+ W[(j-7) & 15] + \
+ Gamma0(W[(j-15) & 15]) \
+ )
+
+ #define RND1(j) \
+ t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + SCHED1(j); \
+ t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \
+ d(j) += t0; \
+ h(j) = t0 + t1
+ #define RNDN(j) \
+ t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + SCHED(j); \
+ t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \
+ d(j) += t0; \
+ h(j) = t0 + t1
+
+ static int Transform_Sha256(wc_Sha256* sha256, const byte* data)
+ {
+ word32 S[8], t0, t1;
+ int i;
+ word32 W[WC_SHA256_BLOCK_SIZE/sizeof(word32)];
+
+ /* Copy digest to working vars */
+ S[0] = sha256->digest[0];
+ S[1] = sha256->digest[1];
+ S[2] = sha256->digest[2];
+ S[3] = sha256->digest[3];
+ S[4] = sha256->digest[4];
+ S[5] = sha256->digest[5];
+ S[6] = sha256->digest[6];
+ S[7] = sha256->digest[7];
+
+ i = 0;
+ RND1( 0); RND1( 1); RND1( 2); RND1( 3);
+ RND1( 4); RND1( 5); RND1( 6); RND1( 7);
+ RND1( 8); RND1( 9); RND1(10); RND1(11);
+ RND1(12); RND1(13); RND1(14); RND1(15);
+ /* 64 operations, partially loop unrolled */
+ for (i = 16; i < 64; i += 16) {
+ RNDN( 0); RNDN( 1); RNDN( 2); RNDN( 3);
+ RNDN( 4); RNDN( 5); RNDN( 6); RNDN( 7);
+ RNDN( 8); RNDN( 9); RNDN(10); RNDN(11);
+ RNDN(12); RNDN(13); RNDN(14); RNDN(15);
+ }
-#ifdef WOLFSSL_SMALL_STACK
- XFREE(W, NULL, DYNAMIC_TYPE_TMP_BUFFER);
+ /* Add the working vars back into digest */
+ sha256->digest[0] += S[0];
+ sha256->digest[1] += S[1];
+ sha256->digest[2] += S[2];
+ sha256->digest[3] += S[3];
+ sha256->digest[4] += S[4];
+ sha256->digest[5] += S[5];
+ sha256->digest[6] += S[6];
+ sha256->digest[7] += S[7];
+
+ return 0;
+ }
+#endif /* SHA256_MANY_REGISTERS */
#endif
+/* End wc_ software implementation */
- return 0;
-}
-#endif /* #if !defined(FREESCALE_MMCAU) */
+#ifdef XTRANSFORM
-static INLINE void AddLength(Sha256* sha256, word32 len)
-{
- word32 tmp = sha256->loLen;
- if ( (sha256->loLen += len) < tmp)
- sha256->hiLen++; /* carry low to high */
-}
+ static WC_INLINE void AddLength(wc_Sha256* sha256, word32 len)
+ {
+ word32 tmp = sha256->loLen;
+ if ((sha256->loLen += len) < tmp) {
+ sha256->hiLen++; /* carry low to high */
+ }
+ }
-int wc_Sha256Update(Sha256* sha256, const byte* data, word32 len)
-{
+ /* do block size increments/updates */
+ static WC_INLINE int Sha256Update(wc_Sha256* sha256, const byte* data, word32 len)
+ {
+ int ret = 0;
+ word32 blocksLen;
+ byte* local;
- /* do block size increments */
- byte* local = (byte*)sha256->buffer;
+ if (sha256 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
+
+ if (data == NULL && len == 0) {
+ /* valid, but do nothing */
+ return 0;
+ }
+
+ /* check that internal buffLen is valid */
+ if (sha256->buffLen >= WC_SHA256_BLOCK_SIZE) {
+ return BUFFER_E;
+ }
- SAVE_XMM_YMM ; /* for Intel AVX */
+ /* add length for final */
+ AddLength(sha256, len);
- while (len) {
- word32 add = min(len, SHA256_BLOCK_SIZE - sha256->buffLen);
- XMEMCPY(&local[sha256->buffLen], data, add);
+ local = (byte*)sha256->buffer;
- sha256->buffLen += add;
- data += add;
- len -= add;
+ /* process any remainder from previous operation */
+ if (sha256->buffLen > 0) {
+ blocksLen = min(len, WC_SHA256_BLOCK_SIZE - sha256->buffLen);
+ XMEMCPY(&local[sha256->buffLen], data, blocksLen);
- if (sha256->buffLen == SHA256_BLOCK_SIZE) {
- int ret;
+ sha256->buffLen += blocksLen;
+ data += blocksLen;
+ len -= blocksLen;
- #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU)
+ if (sha256->buffLen == WC_SHA256_BLOCK_SIZE) {
+ #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU_SHA)
#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
- if(!IS_INTEL_AVX1 && !IS_INTEL_AVX2)
+ if (!IS_INTEL_AVX1(intel_flags) && !IS_INTEL_AVX2(intel_flags))
#endif
- ByteReverseWords(sha256->buffer, sha256->buffer,
- SHA256_BLOCK_SIZE);
+ {
+ ByteReverseWords(sha256->buffer, sha256->buffer,
+ WC_SHA256_BLOCK_SIZE);
+ }
#endif
- ret = XTRANSFORM(sha256, local);
- if (ret != 0)
- return ret;
- AddLength(sha256, SHA256_BLOCK_SIZE);
- sha256->buffLen = 0;
+ #if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if (sha256->ctx.mode == ESP32_SHA_INIT){
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if (sha256->ctx.mode == ESP32_SHA_SW){
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ } else {
+ esp_sha256_process(sha256, (const byte*)local);
+ }
+ #else
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ #endif
+
+ if (ret == 0)
+ sha256->buffLen = 0;
+ else
+ len = 0; /* error */
+ }
+ }
+
+ /* process blocks */
+ #ifdef XTRANSFORM_LEN
+ #if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ if (Transform_Sha256_Len_p != NULL)
+ #endif
+ {
+ /* get number of blocks */
+ /* 64-1 = 0x3F (~ Inverted = 0xFFFFFFC0) */
+ /* len (masked by 0xFFFFFFC0) returns block aligned length */
+ blocksLen = len & ~(WC_SHA256_BLOCK_SIZE-1);
+ if (blocksLen > 0) {
+ /* Byte reversal and alignment handled in function if required */
+ XTRANSFORM_LEN(sha256, data, blocksLen);
+ data += blocksLen;
+ len -= blocksLen;
+ }
}
+ #if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ else
+ #endif
+ #endif /* XTRANSFORM_LEN */
+ #if !defined(XTRANSFORM_LEN) || defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ {
+ while (len >= WC_SHA256_BLOCK_SIZE) {
+ word32* local32 = sha256->buffer;
+ /* optimization to avoid memcpy if data pointer is properly aligned */
+ /* Intel transform function requires use of sha256->buffer */
+ /* Little Endian requires byte swap, so can't use data directly */
+ #if defined(WC_HASH_DATA_ALIGNMENT) && !defined(LITTLE_ENDIAN_ORDER) && \
+ !defined(HAVE_INTEL_AVX1) && !defined(HAVE_INTEL_AVX2)
+ if (((size_t)data % WC_HASH_DATA_ALIGNMENT) == 0) {
+ local32 = (word32*)data;
+ }
+ else
+ #endif
+ {
+ XMEMCPY(local32, data, WC_SHA256_BLOCK_SIZE);
+ }
+
+ data += WC_SHA256_BLOCK_SIZE;
+ len -= WC_SHA256_BLOCK_SIZE;
+
+ #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU_SHA)
+ #if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ if (!IS_INTEL_AVX1(intel_flags) && !IS_INTEL_AVX2(intel_flags))
+ #endif
+ {
+ ByteReverseWords(local32, local32, WC_SHA256_BLOCK_SIZE);
+ }
+ #endif
+
+ #if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if (sha256->ctx.mode == ESP32_SHA_INIT){
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if (sha256->ctx.mode == ESP32_SHA_SW){
+ ret = XTRANSFORM(sha256, (const byte*)local32);
+ } else {
+ esp_sha256_process(sha256, (const byte*)local32);
+ }
+ #else
+ ret = XTRANSFORM(sha256, (const byte*)local32);
+ #endif
+
+ if (ret != 0)
+ break;
+ }
+ }
+ #endif
+
+ /* save remainder */
+ if (len > 0) {
+ XMEMCPY(local, data, len);
+ sha256->buffLen = len;
+ }
+
+ return ret;
}
- return 0;
-}
+ int wc_Sha256Update(wc_Sha256* sha256, const byte* data, word32 len)
+ {
+ if (sha256 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
-int wc_Sha256Final(Sha256* sha256, byte* hash)
-{
- byte* local = (byte*)sha256->buffer;
- int ret;
-
- SAVE_XMM_YMM ; /* for Intel AVX */
+ if (data == NULL && len == 0) {
+ /* valid, but do nothing */
+ return 0;
+ }
+
+ #ifdef WOLF_CRYPTO_CB
+ if (sha256->devId != INVALID_DEVID) {
+ int ret = wc_CryptoCb_Sha256Hash(sha256, data, len, NULL);
+ if (ret != CRYPTOCB_UNAVAILABLE)
+ return ret;
+ /* fall-through when unavailable */
+ }
+ #endif
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ if (sha256->asyncDev.marker == WOLFSSL_ASYNC_MARKER_SHA256) {
+ #if defined(HAVE_INTEL_QA)
+ return IntelQaSymSha256(&sha256->asyncDev, NULL, data, len);
+ #endif
+ }
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+
+ return Sha256Update(sha256, data, len);
+ }
+
+ static WC_INLINE int Sha256Final(wc_Sha256* sha256)
+ {
+
+ int ret;
+ byte* local;
- AddLength(sha256, sha256->buffLen); /* before adding pads */
+ if (sha256 == NULL) {
+ return BAD_FUNC_ARG;
+ }
- local[sha256->buffLen++] = 0x80; /* add 1 */
+ local = (byte*)sha256->buffer;
+ local[sha256->buffLen++] = 0x80; /* add 1 */
- /* pad with zeros */
- if (sha256->buffLen > SHA256_PAD_SIZE) {
- XMEMSET(&local[sha256->buffLen], 0, SHA256_BLOCK_SIZE - sha256->buffLen);
- sha256->buffLen += SHA256_BLOCK_SIZE - sha256->buffLen;
+ /* pad with zeros */
+ if (sha256->buffLen > WC_SHA256_PAD_SIZE) {
+ XMEMSET(&local[sha256->buffLen], 0,
+ WC_SHA256_BLOCK_SIZE - sha256->buffLen);
+ sha256->buffLen += WC_SHA256_BLOCK_SIZE - sha256->buffLen;
- #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU)
+ #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU_SHA)
#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
- if(!IS_INTEL_AVX1 && !IS_INTEL_AVX2)
+ if (!IS_INTEL_AVX1(intel_flags) && !IS_INTEL_AVX2(intel_flags))
#endif
- ByteReverseWords(sha256->buffer, sha256->buffer, SHA256_BLOCK_SIZE);
+ {
+ ByteReverseWords(sha256->buffer, sha256->buffer,
+ WC_SHA256_BLOCK_SIZE);
+ }
#endif
- ret = XTRANSFORM(sha256, local);
- if (ret != 0)
- return ret;
+ #if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if (sha256->ctx.mode == ESP32_SHA_INIT) {
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if (sha256->ctx.mode == ESP32_SHA_SW) {
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ } else {
+ ret = esp_sha256_process(sha256, (const byte*)local);
+ }
+ #else
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ #endif
+ if (ret != 0)
+ return ret;
- sha256->buffLen = 0;
- }
- XMEMSET(&local[sha256->buffLen], 0, SHA256_PAD_SIZE - sha256->buffLen);
+ sha256->buffLen = 0;
+ }
+ XMEMSET(&local[sha256->buffLen], 0,
+ WC_SHA256_PAD_SIZE - sha256->buffLen);
- /* put lengths in bits */
- sha256->hiLen = (sha256->loLen >> (8*sizeof(sha256->loLen) - 3)) +
- (sha256->hiLen << 3);
- sha256->loLen = sha256->loLen << 3;
+ /* put lengths in bits */
+ sha256->hiLen = (sha256->loLen >> (8 * sizeof(sha256->loLen) - 3)) +
+ (sha256->hiLen << 3);
+ sha256->loLen = sha256->loLen << 3;
- /* store lengths */
- #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU)
+ /* store lengths */
+ #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU_SHA)
#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
- if(!IS_INTEL_AVX1 && !IS_INTEL_AVX2)
+ if (!IS_INTEL_AVX1(intel_flags) && !IS_INTEL_AVX2(intel_flags))
#endif
- ByteReverseWords(sha256->buffer, sha256->buffer, SHA256_BLOCK_SIZE);
+ {
+ ByteReverseWords(sha256->buffer, sha256->buffer,
+ WC_SHA256_BLOCK_SIZE);
+ }
#endif
- /* ! length ordering dependent on digest endian type ! */
- XMEMCPY(&local[SHA256_PAD_SIZE], &sha256->hiLen, sizeof(word32));
- XMEMCPY(&local[SHA256_PAD_SIZE + sizeof(word32)], &sha256->loLen,
- sizeof(word32));
+ /* ! length ordering dependent on digest endian type ! */
+ XMEMCPY(&local[WC_SHA256_PAD_SIZE], &sha256->hiLen, sizeof(word32));
+ XMEMCPY(&local[WC_SHA256_PAD_SIZE + sizeof(word32)], &sha256->loLen,
+ sizeof(word32));
- #if defined(FREESCALE_MMCAU) || defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ #if defined(FREESCALE_MMCAU_SHA) || defined(HAVE_INTEL_AVX1) || \
+ defined(HAVE_INTEL_AVX2)
/* Kinetis requires only these bytes reversed */
#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
- if(IS_INTEL_AVX1 || IS_INTEL_AVX2)
+ if (IS_INTEL_AVX1(intel_flags) || IS_INTEL_AVX2(intel_flags))
#endif
- ByteReverseWords(&sha256->buffer[SHA256_PAD_SIZE/sizeof(word32)],
- &sha256->buffer[SHA256_PAD_SIZE/sizeof(word32)],
- 2 * sizeof(word32));
+ {
+ ByteReverseWords(
+ &sha256->buffer[WC_SHA256_PAD_SIZE / sizeof(word32)],
+ &sha256->buffer[WC_SHA256_PAD_SIZE / sizeof(word32)],
+ 2 * sizeof(word32));
+ }
+ #endif
+
+ #if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if (sha256->ctx.mode == ESP32_SHA_INIT) {
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if (sha256->ctx.mode == ESP32_SHA_SW) {
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ } else {
+ ret = esp_sha256_digest_process(sha256, 1);
+ }
+ #else
+ ret = XTRANSFORM(sha256, (const byte*)local);
#endif
- ret = XTRANSFORM(sha256, local);
- if (ret != 0)
return ret;
+ }
- #if defined(LITTLE_ENDIAN_ORDER)
- ByteReverseWords(sha256->digest, sha256->digest, SHA256_DIGEST_SIZE);
+ int wc_Sha256FinalRaw(wc_Sha256* sha256, byte* hash)
+ {
+ #ifdef LITTLE_ENDIAN_ORDER
+ word32 digest[WC_SHA256_DIGEST_SIZE / sizeof(word32)];
#endif
- XMEMCPY(hash, sha256->digest, SHA256_DIGEST_SIZE);
- return wc_InitSha256(sha256); /* reset state */
-}
+ if (sha256 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
+ #ifdef LITTLE_ENDIAN_ORDER
+ ByteReverseWords((word32*)digest, (word32*)sha256->digest,
+ WC_SHA256_DIGEST_SIZE);
+ XMEMCPY(hash, digest, WC_SHA256_DIGEST_SIZE);
+ #else
+ XMEMCPY(hash, sha256->digest, WC_SHA256_DIGEST_SIZE);
+ #endif
+ return 0;
+ }
-int wc_Sha256Hash(const byte* data, word32 len, byte* hash)
-{
- int ret = 0;
-#ifdef WOLFSSL_SMALL_STACK
- Sha256* sha256;
-#else
- Sha256 sha256[1];
-#endif
+ int wc_Sha256Final(wc_Sha256* sha256, byte* hash)
+ {
+ int ret;
-#ifdef WOLFSSL_SMALL_STACK
- sha256 = (Sha256*)XMALLOC(sizeof(Sha256), NULL, DYNAMIC_TYPE_TMP_BUFFER);
- if (sha256 == NULL)
- return MEMORY_E;
-#endif
+ if (sha256 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
- if ((ret = wc_InitSha256(sha256)) != 0) {
- WOLFSSL_MSG("InitSha256 failed");
+ #ifdef WOLF_CRYPTO_CB
+ if (sha256->devId != INVALID_DEVID) {
+ ret = wc_CryptoCb_Sha256Hash(sha256, NULL, 0, hash);
+ if (ret != CRYPTOCB_UNAVAILABLE)
+ return ret;
+ /* fall-through when unavailable */
+ }
+ #endif
+
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ if (sha256->asyncDev.marker == WOLFSSL_ASYNC_MARKER_SHA256) {
+ #if defined(HAVE_INTEL_QA)
+ return IntelQaSymSha256(&sha256->asyncDev, hash, NULL,
+ WC_SHA256_DIGEST_SIZE);
+ #endif
+ }
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+
+ ret = Sha256Final(sha256);
+ if (ret != 0)
+ return ret;
+
+ #if defined(LITTLE_ENDIAN_ORDER)
+ ByteReverseWords(sha256->digest, sha256->digest, WC_SHA256_DIGEST_SIZE);
+ #endif
+ XMEMCPY(hash, sha256->digest, WC_SHA256_DIGEST_SIZE);
+
+ return InitSha256(sha256); /* reset state */
}
- else if ((ret = wc_Sha256Update(sha256, data, len)) != 0) {
- WOLFSSL_MSG("Sha256Update failed");
+
+#endif /* XTRANSFORM */
+
+#ifdef WOLFSSL_SHA224
+
+#ifdef STM32_HASH_SHA2
+
+ /* Supports CubeMX HAL or Standard Peripheral Library */
+
+ int wc_InitSha224_ex(wc_Sha224* sha224, void* heap, int devId)
+ {
+ if (sha224 == NULL)
+ return BAD_FUNC_ARG;
+
+ (void)devId;
+ (void)heap;
+
+ wc_Stm32_Hash_Init(&sha224->stmCtx);
+ return 0;
}
- else if ((ret = wc_Sha256Final(sha256, hash)) != 0) {
- WOLFSSL_MSG("Sha256Final failed");
+
+ int wc_Sha224Update(wc_Sha224* sha224, const byte* data, word32 len)
+ {
+ int ret = 0;
+
+ if (sha224 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
+
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ ret = wc_Stm32_Hash_Update(&sha224->stmCtx,
+ HASH_AlgoSelection_SHA224, data, len);
+ wolfSSL_CryptHwMutexUnLock();
+ }
+ return ret;
}
-#ifdef WOLFSSL_SMALL_STACK
- XFREE(sha256, NULL, DYNAMIC_TYPE_TMP_BUFFER);
-#endif
+ int wc_Sha224Final(wc_Sha224* sha224, byte* hash)
+ {
+ int ret = 0;
- return ret;
-}
+ if (sha224 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
-#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ ret = wc_Stm32_Hash_Final(&sha224->stmCtx,
+ HASH_AlgoSelection_SHA224, hash, WC_SHA224_DIGEST_SIZE);
+ wolfSSL_CryptHwMutexUnLock();
+ }
-#define _DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- { word32 d ;\
- d = sha256->digest[0]; __asm__ volatile("movl %0, %"#S_0::"r"(d):SSE_REGs) ;\
- d = sha256->digest[1]; __asm__ volatile("movl %0, %"#S_1::"r"(d):SSE_REGs) ;\
- d = sha256->digest[2]; __asm__ volatile("movl %0, %"#S_2::"r"(d):SSE_REGs) ;\
- d = sha256->digest[3]; __asm__ volatile("movl %0, %"#S_3::"r"(d):SSE_REGs) ;\
- d = sha256->digest[4]; __asm__ volatile("movl %0, %"#S_4::"r"(d):SSE_REGs) ;\
- d = sha256->digest[5]; __asm__ volatile("movl %0, %"#S_5::"r"(d):SSE_REGs) ;\
- d = sha256->digest[6]; __asm__ volatile("movl %0, %"#S_6::"r"(d):SSE_REGs) ;\
- d = sha256->digest[7]; __asm__ volatile("movl %0, %"#S_7::"r"(d):SSE_REGs) ;\
-}
+ (void)wc_InitSha224(sha224); /* reset state */
-#define _RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- { word32 d ; \
- __asm__ volatile("movl %"#S_0", %0":"=r"(d)::SSE_REGs) ; sha256->digest[0] += d;\
- __asm__ volatile("movl %"#S_1", %0":"=r"(d)::SSE_REGs) ; sha256->digest[1] += d;\
- __asm__ volatile("movl %"#S_2", %0":"=r"(d)::SSE_REGs) ; sha256->digest[2] += d;\
- __asm__ volatile("movl %"#S_3", %0":"=r"(d)::SSE_REGs) ; sha256->digest[3] += d;\
- __asm__ volatile("movl %"#S_4", %0":"=r"(d)::SSE_REGs) ; sha256->digest[4] += d;\
- __asm__ volatile("movl %"#S_5", %0":"=r"(d)::SSE_REGs) ; sha256->digest[5] += d;\
- __asm__ volatile("movl %"#S_6", %0":"=r"(d)::SSE_REGs) ; sha256->digest[6] += d;\
- __asm__ volatile("movl %"#S_7", %0":"=r"(d)::SSE_REGs) ; sha256->digest[7] += d;\
-}
+ return ret;
+ }
+#elif defined(WOLFSSL_IMX6_CAAM) && !defined(NO_IMX6_CAAM_HASH)
+ /* functions defined in wolfcrypt/src/port/caam/caam_sha256.c */
-#define DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
+#elif defined(WOLFSSL_AFALG_HASH)
+ #error SHA224 currently not supported with AF_ALG enabled
-#define RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
+#elif defined(WOLFSSL_DEVCRYPTO_HASH)
+ /* implemented in wolfcrypt/src/port/devcrypto/devcrypt_hash.c */
+#else
+ #define NEED_SOFT_SHA224
-#define S_0 %r15d
-#define S_1 %r10d
-#define S_2 %r11d
-#define S_3 %r12d
-#define S_4 %r13d
-#define S_5 %r14d
-#define S_6 %ebx
-#define S_7 %r9d
+ static int InitSha224(wc_Sha224* sha224)
+ {
+ int ret = 0;
-#define SSE_REGs "%edi", "%ecx", "%esi", "%edx", "%ebx","%r8","%r9","%r10","%r11","%r12","%r13","%r14","%r15"
+ if (sha224 == NULL) {
+ return BAD_FUNC_ARG;
+ }
-#if defined(HAVE_INTEL_RORX)
-#define RND_STEP_RORX_1(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("rorx $6, %"#e", %%edx\n\t":::"%edx",SSE_REGs); /* edx = e>>6 */\
+ sha224->digest[0] = 0xc1059ed8;
+ sha224->digest[1] = 0x367cd507;
+ sha224->digest[2] = 0x3070dd17;
+ sha224->digest[3] = 0xf70e5939;
+ sha224->digest[4] = 0xffc00b31;
+ sha224->digest[5] = 0x68581511;
+ sha224->digest[6] = 0x64f98fa7;
+ sha224->digest[7] = 0xbefa4fa4;
+
+ sha224->buffLen = 0;
+ sha224->loLen = 0;
+ sha224->hiLen = 0;
+
+ #if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
+ /* choose best Transform function under this runtime environment */
+ Sha256_SetTransform();
+ #endif
+ #if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ sha224->flags = 0;
+ #endif
-#define RND_STEP_RORX_2(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("rorx $11, %"#e",%%edi\n\t":::"%edi",SSE_REGs); /* edi = e>>11 */\
-__asm__ volatile("xorl %%edx, %%edi\n\t":::"%edx","%edi",SSE_REGs); /* edi = (e>>11) ^ (e>>6) */\
-__asm__ volatile("rorx $25, %"#e", %%edx\n\t":::"%edx",SSE_REGs); /* edx = e>>25 */\
+ return ret;
+ }
-#define RND_STEP_RORX_3(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#f", %%esi\n\t":::"%esi",SSE_REGs); /* esi = f */\
-__asm__ volatile("xorl %"#g", %%esi\n\t":::"%esi",SSE_REGs); /* esi = f ^ g */\
-__asm__ volatile("xorl %%edi, %%edx\n\t":::"%edi","%edx",SSE_REGs); /* edx = Sigma1(e) */\
-__asm__ volatile("andl %"#e", %%esi\n\t":::"%esi",SSE_REGs); /* esi = (f ^ g) & e */\
-__asm__ volatile("xorl %"#g", %%esi\n\t":::"%esi",SSE_REGs); /* esi = Ch(e,f,g) */\
+#endif
-#define RND_STEP_RORX_4(a,b,c,d,e,f,g,h,i)\
-/*__asm__ volatile("movl %0, %%edx\n\t"::"m"(w_k):"%edx");*/\
-__asm__ volatile("addl %0, %"#h"\n\t"::"r"(W_K[i]):SSE_REGs); /* h += w_k */\
-__asm__ volatile("addl %%edx, %"#h"\n\t":::"%edx",SSE_REGs); /* h = h + w_k + Sigma1(e) */\
-__asm__ volatile("rorx $2, %"#a", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = a>>2 */\
-__asm__ volatile("rorx $13, %"#a", %%edi\n\t":::"%edi",SSE_REGs);/* edi = a>>13 */\
+#ifdef NEED_SOFT_SHA224
+ int wc_InitSha224_ex(wc_Sha224* sha224, void* heap, int devId)
+ {
+ int ret = 0;
-#define RND_STEP_RORX_5(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("rorx $22, %"#a", %%edx\n\t":::"%edx",SSE_REGs); /* edx = a>>22 */\
-__asm__ volatile("xorl %%r8d, %%edi\n\t":::"%edi","%r8",SSE_REGs);/* edi = (a>>2) ^ (a>>13) */\
-__asm__ volatile("xorl %%edi, %%edx\n\t":::"%edi","%edx",SSE_REGs); /* edx = Sigma0(a) */\
-
-#define RND_STEP_RORX_6(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#b", %%edi\n\t":::"%edi",SSE_REGs); /* edi = b */\
-__asm__ volatile("orl %"#a", %%edi\n\t":::"%edi",SSE_REGs); /* edi = a | b */\
-__asm__ volatile("andl %"#c", %%edi\n\t":::"%edi",SSE_REGs); /* edi = (a | b) & c*/\
-__asm__ volatile("movl %"#b", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = b */\
+ if (sha224 == NULL)
+ return BAD_FUNC_ARG;
-#define RND_STEP_RORX_7(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl %%esi, %"#h"\n\t":::"%esi",SSE_REGs); /* h += Ch(e,f,g) */\
-__asm__ volatile("andl %"#a", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = b & a */\
-__asm__ volatile("orl %%edi, %%r8d\n\t":::"%edi","%r8",SSE_REGs); /* r8d = Maj(a,b,c) */\
+ sha224->heap = heap;
-#define RND_STEP_RORX_8(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl "#h", "#d"\n\t"); /* d += h + w_k + Sigma1(e) + Ch(e,f,g) */\
-__asm__ volatile("addl %"#h", %%r8d\n\t":::"%r8",SSE_REGs); \
-__asm__ volatile("addl %%edx, %%r8d\n\t":::"%edx","%r8",SSE_REGs); \
-__asm__ volatile("movl %r8d, "#h"\n\t");
+ ret = InitSha224(sha224);
+ if (ret != 0)
+ return ret;
-#endif
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ sha224->W = NULL;
+ #endif
-#define RND_STEP_1(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#e", %%edx\n\t":::"%edx",SSE_REGs);\
-__asm__ volatile("roll $26, %%edx\n\t":::"%edx",SSE_REGs); /* edx = e>>6 */\
-__asm__ volatile("movl %"#e", %%edi\n\t":::"%edi",SSE_REGs);\
-
-#define RND_STEP_2(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("roll $21, %%edi\n\t":::"%edi",SSE_REGs); /* edi = e>>11 */\
-__asm__ volatile("xorl %%edx, %%edi\n\t":::"%edx","%edi",SSE_REGs); /* edi = (e>>11) ^ (e>>6) */\
-__asm__ volatile("movl %"#e", %%edx\n\t":::"%edx",SSE_REGs); /* edx = e */\
-__asm__ volatile("roll $7, %%edx\n\t":::"%edx",SSE_REGs); /* edx = e>>25 */\
-
-#define RND_STEP_3(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#f", %%esi\n\t":::"%esi",SSE_REGs); /* esi = f */\
-__asm__ volatile("xorl %"#g", %%esi\n\t":::"%esi",SSE_REGs); /* esi = f ^ g */\
-__asm__ volatile("xorl %%edi, %%edx\n\t":::"%edi","%edx",SSE_REGs); /* edx = Sigma1(e) */\
-__asm__ volatile("andl %"#e", %%esi\n\t":::"%esi",SSE_REGs); /* esi = (f ^ g) & e */\
-__asm__ volatile("xorl %"#g", %%esi\n\t":::"%esi",SSE_REGs); /* esi = Ch(e,f,g) */\
-
-#define RND_STEP_4(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl %0, %"#h"\n\t"::"r"(W_K[i]):SSE_REGs); /* h += w_k */\
-__asm__ volatile("addl %%edx, %"#h"\n\t":::"%edx",SSE_REGs); /* h = h + w_k + Sigma1(e) */\
-__asm__ volatile("movl %"#a", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = a */\
-__asm__ volatile("roll $30, %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = a>>2 */\
-__asm__ volatile("movl %"#a", %%edi\n\t":::"%edi",SSE_REGs); /* edi = a */\
-__asm__ volatile("roll $19, %%edi\n\t":::"%edi",SSE_REGs); /* edi = a>>13 */\
-__asm__ volatile("movl %"#a", %%edx\n\t":::"%edx",SSE_REGs); /* edx = a */\
-
-#define RND_STEP_5(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("roll $10, %%edx\n\t":::"%edx",SSE_REGs); /* edx = a>>22 */\
-__asm__ volatile("xorl %%r8d, %%edi\n\t":::"%edi","%r8",SSE_REGs); /* edi = (a>>2) ^ (a>>13) */\
-__asm__ volatile("xorl %%edi, %%edx\n\t":::"%edi","%edx",SSE_REGs);/* edx = Sigma0(a) */\
-
-#define RND_STEP_6(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#b", %%edi\n\t":::"%edi",SSE_REGs); /* edi = b */\
-__asm__ volatile("orl %"#a", %%edi\n\t":::"%edi",SSE_REGs); /* edi = a | b */\
-__asm__ volatile("andl %"#c", %%edi\n\t":::"%edi",SSE_REGs); /* edi = (a | b) & c */\
-__asm__ volatile("movl %"#b", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = b */\
-
-#define RND_STEP_7(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl %%esi, %"#h"\n\t":::"%esi",SSE_REGs); /* h += Ch(e,f,g) */\
-__asm__ volatile("andl %"#a", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = b & a */\
-__asm__ volatile("orl %%edi, %%r8d\n\t":::"%edi","%r8",SSE_REGs); /* r8d = Maj(a,b,c) */\
-
-#define RND_STEP_8(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl "#h", "#d"\n\t"); /* d += h + w_k + Sigma1(e) + Ch(e,f,g) */\
-__asm__ volatile("addl %"#h", %%r8d\n\t":::"%r8",SSE_REGs); \
- /* r8b = h + w_k + Sigma1(e) + Ch(e,f,g) + Maj(a,b,c) */\
-__asm__ volatile("addl %%edx, %%r8d\n\t":::"%edx","%r8",SSE_REGs);\
- /* r8b = h + w_k + Sigma1(e) Sigma0(a) + Ch(e,f,g) + Maj(a,b,c) */\
-__asm__ volatile("movl %%r8d, %"#h"\n\t":::"%r8", SSE_REGs); \
- /* h = h + w_k + Sigma1(e) + Sigma0(a) + Ch(e,f,g) + Maj(a,b,c) */ \
-
-#define RND_X(a,b,c,d,e,f,g,h,i) \
- RND_STEP_1(a,b,c,d,e,f,g,h,i); \
- RND_STEP_2(a,b,c,d,e,f,g,h,i); \
- RND_STEP_3(a,b,c,d,e,f,g,h,i); \
- RND_STEP_4(a,b,c,d,e,f,g,h,i); \
- RND_STEP_5(a,b,c,d,e,f,g,h,i); \
- RND_STEP_6(a,b,c,d,e,f,g,h,i); \
- RND_STEP_7(a,b,c,d,e,f,g,h,i); \
- RND_STEP_8(a,b,c,d,e,f,g,h,i);
-
-#define RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-
-#define RND_1_3(a,b,c,d,e,f,g,h,i) {\
- RND_STEP_1(a,b,c,d,e,f,g,h,i); \
- RND_STEP_2(a,b,c,d,e,f,g,h,i); \
- RND_STEP_3(a,b,c,d,e,f,g,h,i); \
-}
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA224)
+ ret = wolfAsync_DevCtxInit(&sha224->asyncDev,
+ WOLFSSL_ASYNC_MARKER_SHA224, sha224->heap, devId);
+ #else
+ (void)devId;
+ #endif /* WOLFSSL_ASYNC_CRYPT */
-#define RND_4_6(a,b,c,d,e,f,g,h,i) {\
- RND_STEP_4(a,b,c,d,e,f,g,h,i); \
- RND_STEP_5(a,b,c,d,e,f,g,h,i); \
- RND_STEP_6(a,b,c,d,e,f,g,h,i); \
-}
+ return ret;
+ }
-#define RND_7_8(a,b,c,d,e,f,g,h,i) {\
- RND_STEP_7(a,b,c,d,e,f,g,h,i); \
- RND_STEP_8(a,b,c,d,e,f,g,h,i); \
-}
+ int wc_Sha224Update(wc_Sha224* sha224, const byte* data, word32 len)
+ {
+ int ret;
-#define RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-
-#define RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-#define RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-#define RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-#define FOR(cnt, init, max, inc, loop) \
- __asm__ volatile("movl $"#init", %0\n\t"#loop":"::"m"(cnt):)
-#define END(cnt, init, max, inc, loop) \
- __asm__ volatile("addl $"#inc", %0\n\tcmpl $"#max", %0\n\tjle "#loop"\n\t":"=m"(cnt)::) ;
-
-#endif /* defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2) */
-
-#if defined(HAVE_INTEL_AVX1) /* inline Assember for Intel AVX1 instructions */
-
-#define VPALIGNR(op1,op2,op3,op4) __asm__ volatile("vpalignr $"#op4", %"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPADDD(op1,op2,op3) __asm__ volatile("vpaddd %"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSRLD(op1,op2,op3) __asm__ volatile("vpsrld $"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSRLQ(op1,op2,op3) __asm__ volatile("vpsrlq $"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSLLD(op1,op2,op3) __asm__ volatile("vpslld $"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPOR(op1,op2,op3) __asm__ volatile("vpor %"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPXOR(op1,op2,op3) __asm__ volatile("vpxor %"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSHUFD(op1,op2,op3) __asm__ volatile("vpshufd $"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSHUFB(op1,op2,op3) __asm__ volatile("vpshufb %"#op3", %"#op2", %"#op1:::XMM_REGs)
-
-#define MessageSched(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER, SHUF_00BA, SHUF_DC00,\
- a,b,c,d,e,f,g,h,_i)\
- RND_STEP_1(a,b,c,d,e,f,g,h,_i);\
- VPALIGNR (XTMP0, X3, X2, 4) ;\
- RND_STEP_2(a,b,c,d,e,f,g,h,_i);\
- VPADDD (XTMP0, XTMP0, X0) ;\
- RND_STEP_3(a,b,c,d,e,f,g,h,_i);\
- VPALIGNR (XTMP1, X1, X0, 4) ; /* XTMP1 = W[-15] */\
- RND_STEP_4(a,b,c,d,e,f,g,h,_i);\
- VPSRLD (XTMP2, XTMP1, 7) ;\
- RND_STEP_5(a,b,c,d,e,f,g,h,_i);\
- VPSLLD (XTMP3, XTMP1, 25) ; /* VPSLLD (XTMP3, XTMP1, (32-7)) */\
- RND_STEP_6(a,b,c,d,e,f,g,h,_i);\
- VPOR (XTMP3, XTMP3, XTMP2) ; /* XTMP1 = W[-15] MY_ROR 7 */\
- RND_STEP_7(a,b,c,d,e,f,g,h,_i);\
- VPSRLD (XTMP2, XTMP1,18) ;\
- RND_STEP_8(a,b,c,d,e,f,g,h,_i);\
-\
- RND_STEP_1(h,a,b,c,d,e,f,g,_i+1);\
- VPSRLD (XTMP4, XTMP1, 3) ; /* XTMP4 = W[-15] >> 3 */\
- RND_STEP_2(h,a,b,c,d,e,f,g,_i+1);\
- VPSLLD (XTMP1, XTMP1, 14) ; /* VPSLLD (XTMP1, XTMP1, (32-18)) */\
- RND_STEP_3(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP3, XTMP3, XTMP1) ;\
- RND_STEP_4(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP3, XTMP3, XTMP2) ; /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR 18 */\
- RND_STEP_5(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP1, XTMP3, XTMP4) ; /* XTMP1 = s0 */\
- RND_STEP_6(h,a,b,c,d,e,f,g,_i+1);\
- VPSHUFD(XTMP2, X3, 0b11111010) ; /* XTMP2 = W[-2] {BBAA}*/\
- RND_STEP_7(h,a,b,c,d,e,f,g,_i+1);\
- VPADDD (XTMP0, XTMP0, XTMP1) ; /* XTMP0 = W[-16] + W[-7] + s0 */\
- RND_STEP_8(h,a,b,c,d,e,f,g,_i+1);\
-\
- RND_STEP_1(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLD (XTMP4, XTMP2, 10) ; /* XTMP4 = W[-2] >> 10 {BBAA} */\
- RND_STEP_2(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLQ (XTMP3, XTMP2, 19) ; /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */\
- RND_STEP_3(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLQ (XTMP2, XTMP2, 17) ; /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */\
- RND_STEP_4(g,h,a,b,c,d,e,f,_i+2);\
- VPXOR (XTMP2, XTMP2, XTMP3) ;\
- RND_STEP_5(g,h,a,b,c,d,e,f,_i+2);\
- VPXOR (XTMP4, XTMP4, XTMP2) ; /* XTMP4 = s1 {xBxA} */\
- RND_STEP_6(g,h,a,b,c,d,e,f,_i+2);\
- VPSHUFB (XTMP4, XTMP4, SHUF_00BA) ; /* XTMP4 = s1 {00BA} */\
- RND_STEP_7(g,h,a,b,c,d,e,f,_i+2);\
- VPADDD (XTMP0, XTMP0, XTMP4) ; /* XTMP0 = {..., ..., W[1], W[0]} */\
- RND_STEP_8(g,h,a,b,c,d,e,f,_i+2);\
-\
- RND_STEP_1(f,g,h,a,b,c,d,e,_i+3);\
- VPSHUFD (XTMP2, XTMP0, 0b01010000) ; /* XTMP2 = W[-2] {DDCC} */\
- RND_STEP_2(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLD (XTMP5, XTMP2, 10); /* XTMP5 = W[-2] >> 10 {DDCC} */\
- RND_STEP_3(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLQ (XTMP3, XTMP2, 19); /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */\
- RND_STEP_4(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLQ (XTMP2, XTMP2, 17) ; /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */\
- RND_STEP_5(f,g,h,a,b,c,d,e,_i+3);\
- VPXOR (XTMP2, XTMP2, XTMP3) ;\
- RND_STEP_6(f,g,h,a,b,c,d,e,_i+3);\
- VPXOR (XTMP5, XTMP5, XTMP2) ; /* XTMP5 = s1 {xDxC} */\
- RND_STEP_7(f,g,h,a,b,c,d,e,_i+3);\
- VPSHUFB (XTMP5, XTMP5, SHUF_DC00) ; /* XTMP5 = s1 {DC00} */\
- RND_STEP_8(f,g,h,a,b,c,d,e,_i+3);\
- VPADDD (X0, XTMP5, XTMP0) ; /* X0 = {W[3], W[2], W[1], W[0]} */\
-
-#if defined(HAVE_INTEL_RORX)
-
-#define MessageSched_RORX(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, \
- XFER, SHUF_00BA, SHUF_DC00,a,b,c,d,e,f,g,h,_i)\
- RND_STEP_RORX_1(a,b,c,d,e,f,g,h,_i);\
- VPALIGNR (XTMP0, X3, X2, 4) ;\
- RND_STEP_RORX_2(a,b,c,d,e,f,g,h,_i);\
- VPADDD (XTMP0, XTMP0, X0) ;\
- RND_STEP_RORX_3(a,b,c,d,e,f,g,h,_i);\
- VPALIGNR (XTMP1, X1, X0, 4) ; /* XTMP1 = W[-15] */\
- RND_STEP_RORX_4(a,b,c,d,e,f,g,h,_i);\
- VPSRLD (XTMP2, XTMP1, 7) ;\
- RND_STEP_RORX_5(a,b,c,d,e,f,g,h,_i);\
- VPSLLD (XTMP3, XTMP1, 25) ; /* VPSLLD (XTMP3, XTMP1, (32-7)) */\
- RND_STEP_RORX_6(a,b,c,d,e,f,g,h,_i);\
- VPOR (XTMP3, XTMP3, XTMP2) ; /* XTMP1 = W[-15] MY_ROR 7 */\
- RND_STEP_RORX_7(a,b,c,d,e,f,g,h,_i);\
- VPSRLD (XTMP2, XTMP1,18) ;\
- RND_STEP_RORX_8(a,b,c,d,e,f,g,h,_i);\
-\
- RND_STEP_RORX_1(h,a,b,c,d,e,f,g,_i+1);\
- VPSRLD (XTMP4, XTMP1, 3) ; /* XTMP4 = W[-15] >> 3 */\
- RND_STEP_RORX_2(h,a,b,c,d,e,f,g,_i+1);\
- VPSLLD (XTMP1, XTMP1, 14) ; /* VPSLLD (XTMP1, XTMP1, (32-18)) */\
- RND_STEP_RORX_3(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP3, XTMP3, XTMP1) ;\
- RND_STEP_RORX_4(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP3, XTMP3, XTMP2) ; /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR 18 */\
- RND_STEP_RORX_5(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP1, XTMP3, XTMP4) ; /* XTMP1 = s0 */\
- RND_STEP_RORX_6(h,a,b,c,d,e,f,g,_i+1);\
- VPSHUFD(XTMP2, X3, 0b11111010) ; /* XTMP2 = W[-2] {BBAA}*/\
- RND_STEP_RORX_7(h,a,b,c,d,e,f,g,_i+1);\
- VPADDD (XTMP0, XTMP0, XTMP1) ; /* XTMP0 = W[-16] + W[-7] + s0 */\
- RND_STEP_RORX_8(h,a,b,c,d,e,f,g,_i+1);\
-\
- RND_STEP_RORX_1(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLD (XTMP4, XTMP2, 10) ; /* XTMP4 = W[-2] >> 10 {BBAA} */\
- RND_STEP_RORX_2(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLQ (XTMP3, XTMP2, 19) ; /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */\
- RND_STEP_RORX_3(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLQ (XTMP2, XTMP2, 17) ; /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */\
- RND_STEP_RORX_4(g,h,a,b,c,d,e,f,_i+2);\
- VPXOR (XTMP2, XTMP2, XTMP3) ;\
- RND_STEP_RORX_5(g,h,a,b,c,d,e,f,_i+2);\
- VPXOR (XTMP4, XTMP4, XTMP2) ; /* XTMP4 = s1 {xBxA} */\
- RND_STEP_RORX_6(g,h,a,b,c,d,e,f,_i+2);\
- VPSHUFB (XTMP4, XTMP4, SHUF_00BA) ; /* XTMP4 = s1 {00BA} */\
- RND_STEP_RORX_7(g,h,a,b,c,d,e,f,_i+2);\
- VPADDD (XTMP0, XTMP0, XTMP4) ; /* XTMP0 = {..., ..., W[1], W[0]} */\
- RND_STEP_RORX_8(g,h,a,b,c,d,e,f,_i+2);\
-\
- RND_STEP_RORX_1(f,g,h,a,b,c,d,e,_i+3);\
- VPSHUFD (XTMP2, XTMP0, 0b01010000) ; /* XTMP2 = W[-2] {DDCC} */\
- RND_STEP_RORX_2(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLD (XTMP5, XTMP2, 10); /* XTMP5 = W[-2] >> 10 {DDCC} */\
- RND_STEP_RORX_3(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLQ (XTMP3, XTMP2, 19); /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */\
- RND_STEP_RORX_4(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLQ (XTMP2, XTMP2, 17) ; /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */\
- RND_STEP_RORX_5(f,g,h,a,b,c,d,e,_i+3);\
- VPXOR (XTMP2, XTMP2, XTMP3) ;\
- RND_STEP_RORX_6(f,g,h,a,b,c,d,e,_i+3);\
- VPXOR (XTMP5, XTMP5, XTMP2) ; /* XTMP5 = s1 {xDxC} */\
- RND_STEP_RORX_7(f,g,h,a,b,c,d,e,_i+3);\
- VPSHUFB (XTMP5, XTMP5, SHUF_DC00) ; /* XTMP5 = s1 {DC00} */\
- RND_STEP_RORX_8(f,g,h,a,b,c,d,e,_i+3);\
- VPADDD (X0, XTMP5, XTMP0) ; /* X0 = {W[3], W[2], W[1], W[0]} */\
+ if (sha224 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
-#endif
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA224)
+ if (sha224->asyncDev.marker == WOLFSSL_ASYNC_MARKER_SHA224) {
+ #if defined(HAVE_INTEL_QA)
+ return IntelQaSymSha224(&sha224->asyncDev, NULL, data, len);
+ #endif
+ }
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+ ret = Sha256Update((wc_Sha256*)sha224, data, len);
-#define W_K_from_buff\
- __asm__ volatile("vmovdqu %0, %%xmm4\n\t"\
- "vpshufb %%xmm13, %%xmm4, %%xmm4\n\t"\
- :: "m"(sha256->buffer[0]):"%xmm4") ;\
- __asm__ volatile("vmovdqu %0, %%xmm5\n\t"\
- "vpshufb %%xmm13, %%xmm5, %%xmm5\n\t"\
- ::"m"(sha256->buffer[4]):"%xmm5") ;\
- __asm__ volatile("vmovdqu %0, %%xmm6\n\t"\
- "vpshufb %%xmm13, %%xmm6, %%xmm6\n\t"\
- ::"m"(sha256->buffer[8]):"%xmm6") ;\
- __asm__ volatile("vmovdqu %0, %%xmm7\n\t"\
- "vpshufb %%xmm13, %%xmm7, %%xmm7\n\t"\
- ::"m"(sha256->buffer[12]):"%xmm7") ;\
-
-#define _SET_W_K_XFER(reg, i)\
- __asm__ volatile("vpaddd %0, %"#reg", %%xmm9"::"m"(K[i]):XMM_REGs) ;\
- __asm__ volatile("vmovdqa %%xmm9, %0":"=m"(W_K[i])::XMM_REGs) ;
-
-#define SET_W_K_XFER(reg, i) _SET_W_K_XFER(reg, i)
-
-static const ALIGN32 word64 mSHUF_00BA[] = { 0x0b0a090803020100, 0xFFFFFFFFFFFFFFFF } ; /* shuffle xBxA -> 00BA */
-static const ALIGN32 word64 mSHUF_DC00[] = { 0xFFFFFFFFFFFFFFFF, 0x0b0a090803020100 } ; /* shuffle xDxC -> DC00 */
-static const ALIGN32 word64 mBYTE_FLIP_MASK[] = { 0x0405060700010203, 0x0c0d0e0f08090a0b } ;
-
-
-#define _Init_Masks(mask1, mask2, mask3)\
-__asm__ volatile("vmovdqu %0, %"#mask1 ::"m"(mBYTE_FLIP_MASK[0])) ;\
-__asm__ volatile("vmovdqu %0, %"#mask2 ::"m"(mSHUF_00BA[0])) ;\
-__asm__ volatile("vmovdqu %0, %"#mask3 ::"m"(mSHUF_DC00[0])) ;
-
-#define Init_Masks(BYTE_FLIP_MASK, SHUF_00BA, SHUF_DC00)\
- _Init_Masks(BYTE_FLIP_MASK, SHUF_00BA, SHUF_DC00)
-
-#define X0 %xmm4
-#define X1 %xmm5
-#define X2 %xmm6
-#define X3 %xmm7
-#define X_ X0
-
-#define XTMP0 %xmm0
-#define XTMP1 %xmm1
-#define XTMP2 %xmm2
-#define XTMP3 %xmm3
-#define XTMP4 %xmm8
-#define XTMP5 %xmm9
-#define XFER %xmm10
-
-#define SHUF_00BA %xmm11 /* shuffle xBxA -> 00BA */
-#define SHUF_DC00 %xmm12 /* shuffle xDxC -> DC00 */
-#define BYTE_FLIP_MASK %xmm13
-
-#define XMM_REGs /* Registers are saved in Sha256Update/Finel */
- /*"xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13" */
-
-static int Transform_AVX1(Sha256* sha256)
-{
+ return ret;
+ }
- word32 W_K[64] ; /* temp for W+K */
+ int wc_Sha224Final(wc_Sha224* sha224, byte* hash)
+ {
+ int ret;
- #if defined(DEBUG_XMM)
- int i, j ;
- word32 xmm[29][4*15] ;
+ if (sha224 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
+
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA224)
+ if (sha224->asyncDev.marker == WOLFSSL_ASYNC_MARKER_SHA224) {
+ #if defined(HAVE_INTEL_QA)
+ return IntelQaSymSha224(&sha224->asyncDev, hash, NULL,
+ WC_SHA224_DIGEST_SIZE);
+ #endif
+ }
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+
+ ret = Sha256Final((wc_Sha256*)sha224);
+ if (ret != 0)
+ return ret;
+
+ #if defined(LITTLE_ENDIAN_ORDER)
+ ByteReverseWords(sha224->digest, sha224->digest, WC_SHA224_DIGEST_SIZE);
#endif
+ XMEMCPY(hash, sha224->digest, WC_SHA224_DIGEST_SIZE);
- Init_Masks(BYTE_FLIP_MASK, SHUF_00BA, SHUF_DC00) ;
- W_K_from_buff ; /* X0, X1, X2, X3 = W[0..15] ; */
-
- DigestToReg(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- SET_W_K_XFER(X0, 0) ;
- MessageSched(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,0) ;
- SET_W_K_XFER(X1, 4) ;
- MessageSched(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,4) ;
- SET_W_K_XFER(X2, 8) ;
- MessageSched(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- SET_W_K_XFER(X3, 12) ;
- MessageSched(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,12) ;
- SET_W_K_XFER(X0, 16) ;
- MessageSched(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- SET_W_K_XFER(X1, 20) ;
- MessageSched(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,20) ;
- SET_W_K_XFER(X2, 24) ;
- MessageSched(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- SET_W_K_XFER(X3, 28) ;
- MessageSched(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,28) ;
- SET_W_K_XFER(X0, 32) ;
- MessageSched(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- SET_W_K_XFER(X1, 36) ;
- MessageSched(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,36) ;
- SET_W_K_XFER(X2, 40) ;
- MessageSched(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- SET_W_K_XFER(X3, 44) ;
- MessageSched(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,44) ;
-
- SET_W_K_XFER(X0, 48) ;
- SET_W_K_XFER(X1, 52) ;
- SET_W_K_XFER(X2, 56) ;
- SET_W_K_XFER(X3, 60) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,56) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,57) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,58) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,59) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,60) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,61) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,62) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,63) ;
-
- RegToDigest(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- #if defined(DEBUG_XMM)
- for(i=0; i<29; i++) {
- for(j=0; j<4*14; j+=4)
- printf("xmm%d[%d]=%08x,%08x,%08x,%08x\n", j/4, i,
- xmm[i][j],xmm[i][j+1],xmm[i][j+2],xmm[i][j+3]) ;
- printf("\n") ;
+ return InitSha224(sha224); /* reset state */
}
-
- for(i=0; i<64; i++)printf("W_K[%d]%08x\n", i, W_K[i]) ;
+#endif /* end of SHA224 software implementation */
+
+ int wc_InitSha224(wc_Sha224* sha224)
+ {
+ return wc_InitSha224_ex(sha224, NULL, INVALID_DEVID);
+ }
+
+ void wc_Sha224Free(wc_Sha224* sha224)
+ {
+ if (sha224 == NULL)
+ return;
+
+#ifdef WOLFSSL_SMALL_STACK_CACHE
+ if (sha224->W != NULL) {
+ XFREE(sha224->W, NULL, DYNAMIC_TYPE_DIGEST);
+ sha224->W = NULL;
+ }
+#endif
+
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA224)
+ wolfAsync_DevCtxFree(&sha224->asyncDev, WOLFSSL_ASYNC_MARKER_SHA224);
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+
+ #ifdef WOLFSSL_PIC32MZ_HASH
+ wc_Sha256Pic32Free(sha224);
#endif
+ }
+#endif /* WOLFSSL_SHA224 */
- return 0;
+
+int wc_InitSha256(wc_Sha256* sha256)
+{
+ return wc_InitSha256_ex(sha256, NULL, INVALID_DEVID);
}
-#if defined(HAVE_INTEL_RORX)
-static int Transform_AVX1_RORX(Sha256* sha256)
+void wc_Sha256Free(wc_Sha256* sha256)
{
+ if (sha256 == NULL)
+ return;
- word32 W_K[64] ; /* temp for W+K */
+#ifdef WOLFSSL_SMALL_STACK_CACHE
+ if (sha256->W != NULL) {
+ XFREE(sha256->W, NULL, DYNAMIC_TYPE_DIGEST);
+ sha256->W = NULL;
+ }
+#endif
- #if defined(DEBUG_XMM)
- int i, j ;
- word32 xmm[29][4*15] ;
- #endif
+#if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ wolfAsync_DevCtxFree(&sha256->asyncDev, WOLFSSL_ASYNC_MARKER_SHA256);
+#endif /* WOLFSSL_ASYNC_CRYPT */
+#ifdef WOLFSSL_PIC32MZ_HASH
+ wc_Sha256Pic32Free(sha256);
+#endif
+#if defined(WOLFSSL_AFALG_HASH)
+ if (sha256->alFd > 0) {
+ close(sha256->alFd);
+ sha256->alFd = -1; /* avoid possible double close on socket */
+ }
+ if (sha256->rdFd > 0) {
+ close(sha256->rdFd);
+ sha256->rdFd = -1; /* avoid possible double close on socket */
+ }
+#endif /* WOLFSSL_AFALG_HASH */
+#ifdef WOLFSSL_DEVCRYPTO_HASH
+ wc_DevCryptoFree(&sha256->ctx);
+#endif /* WOLFSSL_DEVCRYPTO */
+#if (defined(WOLFSSL_AFALG_HASH) && defined(WOLFSSL_AFALG_HASH_KEEP)) || \
+ (defined(WOLFSSL_DEVCRYPTO_HASH) && defined(WOLFSSL_DEVCRYPTO_HASH_KEEP)) || \
+ (defined(WOLFSSL_RENESAS_TSIP_CRYPT) && \
+ !defined(NO_WOLFSSL_RENESAS_TSIP_CRYPT_HASH))
+ if (sha256->msg != NULL) {
+ XFREE(sha256->msg, sha256->heap, DYNAMIC_TYPE_TMP_BUFFER);
+ sha256->msg = NULL;
+ }
+#endif
+}
+
+#endif /* !WOLFSSL_TI_HASH */
+#endif /* HAVE_FIPS */
+
+
+#ifndef WOLFSSL_TI_HASH
+#ifdef WOLFSSL_SHA224
+ int wc_Sha224GetHash(wc_Sha224* sha224, byte* hash)
+ {
+ int ret;
+ wc_Sha224 tmpSha224;
- Init_Masks(BYTE_FLIP_MASK, SHUF_00BA, SHUF_DC00) ;
- W_K_from_buff ; /* X0, X1, X2, X3 = W[0..15] ; */
-
- DigestToReg(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
- SET_W_K_XFER(X0, 0) ;
- MessageSched_RORX(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,0) ;
- SET_W_K_XFER(X1, 4) ;
- MessageSched_RORX(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,4) ;
- SET_W_K_XFER(X2, 8) ;
- MessageSched_RORX(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- SET_W_K_XFER(X3, 12) ;
- MessageSched_RORX(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,12) ;
- SET_W_K_XFER(X0, 16) ;
- MessageSched_RORX(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- SET_W_K_XFER(X1, 20) ;
- MessageSched_RORX(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,20) ;
- SET_W_K_XFER(X2, 24) ;
- MessageSched_RORX(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- SET_W_K_XFER(X3, 28) ;
- MessageSched_RORX(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,28) ;
- SET_W_K_XFER(X0, 32) ;
- MessageSched_RORX(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- SET_W_K_XFER(X1, 36) ;
- MessageSched_RORX(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,36) ;
- SET_W_K_XFER(X2, 40) ;
- MessageSched_RORX(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- SET_W_K_XFER(X3, 44) ;
- MessageSched_RORX(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,44) ;
-
- SET_W_K_XFER(X0, 48) ;
- SET_W_K_XFER(X1, 52) ;
- SET_W_K_XFER(X2, 56) ;
- SET_W_K_XFER(X3, 60) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,56) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,57) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,58) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,59) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,60) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,61) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,62) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,63) ;
-
- RegToDigest(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- #if defined(DEBUG_XMM)
- for(i=0; i<29; i++) {
- for(j=0; j<4*14; j+=4)
- printf("xmm%d[%d]=%08x,%08x,%08x,%08x\n", j/4, i,
- xmm[i][j],xmm[i][j+1],xmm[i][j+2],xmm[i][j+3]) ;
- printf("\n") ;
+ if (sha224 == NULL || hash == NULL)
+ return BAD_FUNC_ARG;
+
+ ret = wc_Sha224Copy(sha224, &tmpSha224);
+ if (ret == 0) {
+ ret = wc_Sha224Final(&tmpSha224, hash);
+ wc_Sha224Free(&tmpSha224);
+ }
+ return ret;
}
-
- for(i=0; i<64; i++)printf("W_K[%d]%08x\n", i, W_K[i]) ;
+ int wc_Sha224Copy(wc_Sha224* src, wc_Sha224* dst)
+ {
+ int ret = 0;
+
+ if (src == NULL || dst == NULL)
+ return BAD_FUNC_ARG;
+
+ XMEMCPY(dst, src, sizeof(wc_Sha224));
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ dst->W = NULL;
#endif
- return 0;
-}
-#endif /* HAVE_INTEL_RORX */
+ #ifdef WOLFSSL_ASYNC_CRYPT
+ ret = wolfAsync_DevCopy(&src->asyncDev, &dst->asyncDev);
+ #endif
+ #if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ dst->flags |= WC_HASH_FLAG_ISCOPY;
+ #endif
+
+ return ret;
+ }
-#endif /* HAVE_INTEL_AVX1 */
+#if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ int wc_Sha224SetFlags(wc_Sha224* sha224, word32 flags)
+ {
+ if (sha224) {
+ sha224->flags = flags;
+ }
+ return 0;
+ }
+ int wc_Sha224GetFlags(wc_Sha224* sha224, word32* flags)
+ {
+ if (sha224 && flags) {
+ *flags = sha224->flags;
+ }
+ return 0;
+ }
+#endif
+#endif /* WOLFSSL_SHA224 */
-#if defined(HAVE_INTEL_AVX2)
+#ifdef WOLFSSL_AFALG_HASH
+ /* implemented in wolfcrypt/src/port/af_alg/afalg_hash.c */
-#define _MOVE_to_REG(ymm, mem) __asm__ volatile("vmovdqu %0, %%"#ymm" ":: "m"(mem):YMM_REGs) ;
-#define _MOVE_to_MEM(mem, ymm) __asm__ volatile("vmovdqu %%"#ymm", %0" : "=m"(mem)::YMM_REGs) ;
-#define _BYTE_SWAP(ymm, map) __asm__ volatile("vpshufb %0, %%"#ymm", %%"#ymm"\n\t"\
- :: "m"(map):YMM_REGs) ;
-#define _MOVE_128(ymm0, ymm1, ymm2, map) __asm__ volatile("vperm2i128 $"#map", %%"\
- #ymm2", %%"#ymm1", %%"#ymm0" ":::YMM_REGs) ;
-#define _MOVE_BYTE(ymm0, ymm1, map) __asm__ volatile("vpshufb %0, %%"#ymm1", %%"\
- #ymm0"\n\t":: "m"(map):YMM_REGs) ;
-#define _S_TEMP(dest, src, bits, temp) __asm__ volatile("vpsrld $"#bits", %%"\
- #src", %%"#dest"\n\tvpslld $32-"#bits", %%"#src", %%"#temp"\n\tvpor %%"\
- #temp",%%"#dest", %%"#dest" ":::YMM_REGs) ;
-#define _AVX2_R(dest, src, bits) __asm__ volatile("vpsrld $"#bits", %%"\
- #src", %%"#dest" ":::YMM_REGs) ;
-#define _XOR(dest, src1, src2) __asm__ volatile("vpxor %%"#src1", %%"\
- #src2", %%"#dest" ":::YMM_REGs) ;
-#define _OR(dest, src1, src2) __asm__ volatile("vpor %%"#src1", %%"\
- #src2", %%"#dest" ":::YMM_REGs) ;
-#define _ADD(dest, src1, src2) __asm__ volatile("vpaddd %%"#src1", %%"\
- #src2", %%"#dest" ":::YMM_REGs) ;
-#define _ADD_MEM(dest, src1, mem) __asm__ volatile("vpaddd %0, %%"#src1", %%"\
- #dest" "::"m"(mem):YMM_REGs) ;
-#define _BLEND(map, dest, src1, src2) __asm__ volatile("vpblendd $"#map", %%"\
- #src1", %%"#src2", %%"#dest" ":::YMM_REGs) ;
-
-#define _EXTRACT_XMM_0(xmm, mem) __asm__ volatile("vpextrd $0, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_1(xmm, mem) __asm__ volatile("vpextrd $1, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_2(xmm, mem) __asm__ volatile("vpextrd $2, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_3(xmm, mem) __asm__ volatile("vpextrd $3, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_4(ymm, xmm, mem)\
- __asm__ volatile("vperm2i128 $0x1, %%"#ymm", %%"#ymm", %%"#ymm" ":::YMM_REGs) ;\
- __asm__ volatile("vpextrd $0, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_5(xmm, mem) __asm__ volatile("vpextrd $1, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_6(xmm, mem) __asm__ volatile("vpextrd $2, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_7(xmm, mem) __asm__ volatile("vpextrd $3, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-
-#define _SWAP_YMM_HL(ymm) __asm__ volatile("vperm2i128 $0x1, %%"#ymm", %%"#ymm", %%"#ymm" ":::YMM_REGs) ;
-#define SWAP_YMM_HL(ymm) _SWAP_YMM_HL(ymm)
-
-#define MOVE_to_REG(ymm, mem) _MOVE_to_REG(ymm, mem)
-#define MOVE_to_MEM(mem, ymm) _MOVE_to_MEM(mem, ymm)
-#define BYTE_SWAP(ymm, map) _BYTE_SWAP(ymm, map)
-#define MOVE_128(ymm0, ymm1, ymm2, map) _MOVE_128(ymm0, ymm1, ymm2, map)
-#define MOVE_BYTE(ymm0, ymm1, map) _MOVE_BYTE(ymm0, ymm1, map)
-#define XOR(dest, src1, src2) _XOR(dest, src1, src2)
-#define OR(dest, src1, src2) _OR(dest, src1, src2)
-#define ADD(dest, src1, src2) _ADD(dest, src1, src2)
-#define ADD_MEM(dest, src1, mem) _ADD_MEM(dest, src1, mem)
-#define BLEND(map, dest, src1, src2) _BLEND(map, dest, src1, src2)
-
-#define S_TMP(dest, src, bits, temp) _S_TEMP(dest, src, bits, temp);
-#define AVX2_S(dest, src, bits) S_TMP(dest, src, bits, S_TEMP)
-#define AVX2_R(dest, src, bits) _AVX2_R(dest, src, bits)
-
-#define GAMMA0(dest, src) AVX2_S(dest, src, 7); AVX2_S(G_TEMP, src, 18); \
- XOR(dest, G_TEMP, dest) ; AVX2_R(G_TEMP, src, 3); XOR(dest, G_TEMP, dest) ;
-#define GAMMA0_1(dest, src) AVX2_S(dest, src, 7); AVX2_S(G_TEMP, src, 18);
-#define GAMMA0_2(dest, src) XOR(dest, G_TEMP, dest) ; AVX2_R(G_TEMP, src, 3); \
- XOR(dest, G_TEMP, dest) ;
-
-#define GAMMA1(dest, src) AVX2_S(dest, src, 17); AVX2_S(G_TEMP, src, 19); \
- XOR(dest, G_TEMP, dest) ; AVX2_R(G_TEMP, src, 10); XOR(dest, G_TEMP, dest) ;
-#define GAMMA1_1(dest, src) AVX2_S(dest, src, 17); AVX2_S(G_TEMP, src, 19);
-#define GAMMA1_2(dest, src) XOR(dest, G_TEMP, dest) ; AVX2_R(G_TEMP, src, 10); \
- XOR(dest, G_TEMP, dest) ;
-
-#define FEEDBACK1_to_W_I_2 MOVE_BYTE(YMM_TEMP0, W_I, mMAP1toW_I_2[0]) ; \
- BLEND(0x0c, W_I_2, YMM_TEMP0, W_I_2) ;
-#define FEEDBACK2_to_W_I_2 MOVE_128(YMM_TEMP0, W_I, W_I, 0x08) ; \
- MOVE_BYTE(YMM_TEMP0, YMM_TEMP0, mMAP2toW_I_2[0]) ; BLEND(0x30, W_I_2, YMM_TEMP0, W_I_2) ;
-#define FEEDBACK3_to_W_I_2 MOVE_BYTE(YMM_TEMP0, W_I, mMAP3toW_I_2[0]) ; \
- BLEND(0xc0, W_I_2, YMM_TEMP0, W_I_2) ;
-
-#define FEEDBACK_to_W_I_7 MOVE_128(YMM_TEMP0, W_I, W_I, 0x08) ;\
- MOVE_BYTE(YMM_TEMP0, YMM_TEMP0, mMAPtoW_I_7[0]) ; BLEND(0x80, W_I_7, YMM_TEMP0, W_I_7) ;
-
-#undef voitle
-
-#define W_I_16 ymm8
-#define W_I_15 ymm9
-#define W_I_7 ymm10
-#define W_I_2 ymm11
-#define W_I ymm12
-#define G_TEMP ymm13
-#define S_TEMP ymm14
-#define YMM_TEMP0 ymm15
-#define YMM_TEMP0x xmm15
-#define W_I_TEMP ymm7
-#define W_K_TEMP ymm15
-#define W_K_TEMPx xmm15
-
-#define YMM_REGs /* Registers are saved in Sha256Update/Finel */
- /* "%ymm7","%ymm8","%ymm9","%ymm10","%ymm11","%ymm12","%ymm13","%ymm14","%ymm15"*/
-
-
-#define MOVE_15_to_16(w_i_16, w_i_15, w_i_7)\
- __asm__ volatile("vperm2i128 $0x01, %%"#w_i_15", %%"#w_i_15", %%"#w_i_15" ":::YMM_REGs) ;\
- __asm__ volatile("vpblendd $0x08, %%"#w_i_15", %%"#w_i_7", %%"#w_i_16" ":::YMM_REGs) ;\
- __asm__ volatile("vperm2i128 $0x01, %%"#w_i_7", %%"#w_i_7", %%"#w_i_15" ":::YMM_REGs) ;\
- __asm__ volatile("vpblendd $0x80, %%"#w_i_15", %%"#w_i_16", %%"#w_i_16" ":::YMM_REGs) ;\
- __asm__ volatile("vpshufd $0x93, %%"#w_i_16", %%"#w_i_16" ":::YMM_REGs) ;\
-
-#define MOVE_7_to_15(w_i_15, w_i_7)\
- __asm__ volatile("vmovdqu %%"#w_i_7", %%"#w_i_15" ":::YMM_REGs) ;\
-
-#define MOVE_I_to_7(w_i_7, w_i)\
- __asm__ volatile("vperm2i128 $0x01, %%"#w_i", %%"#w_i", %%"#w_i_7" ":::YMM_REGs) ;\
- __asm__ volatile("vpblendd $0x01, %%"#w_i_7", %%"#w_i", %%"#w_i_7" ":::YMM_REGs) ;\
- __asm__ volatile("vpshufd $0x39, %%"#w_i_7", %%"#w_i_7" ":::YMM_REGs) ;\
-
-#define MOVE_I_to_2(w_i_2, w_i)\
- __asm__ volatile("vperm2i128 $0x01, %%"#w_i", %%"#w_i", %%"#w_i_2" ":::YMM_REGs) ;\
- __asm__ volatile("vpshufd $0x0e, %%"#w_i_2", %%"#w_i_2" ":::YMM_REGs) ;\
-
-#define ROTATE_W(w_i_16, w_i_15, w_i_7, w_i_2, w_i)\
- MOVE_15_to_16(w_i_16, w_i_15, w_i_7) ; \
- MOVE_7_to_15(w_i_15, w_i_7) ; \
- MOVE_I_to_7(w_i_7, w_i) ; \
- MOVE_I_to_2(w_i_2, w_i) ;\
-
-#define _RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- { word32 d ;\
- __asm__ volatile("movl %"#S_0", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[0] += d;\
- __asm__ volatile("movl %"#S_1", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[1] += d;\
- __asm__ volatile("movl %"#S_2", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[2] += d;\
- __asm__ volatile("movl %"#S_3", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[3] += d;\
- __asm__ volatile("movl %"#S_4", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[4] += d;\
- __asm__ volatile("movl %"#S_5", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[5] += d;\
- __asm__ volatile("movl %"#S_6", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[6] += d;\
- __asm__ volatile("movl %"#S_7", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[7] += d;\
-}
+#elif defined(WOLFSSL_DEVCRYPTO_HASH)
+ /* implemented in wolfcrypt/src/port/devcrypto/devcrypt_hash.c */
-#define _DumpS(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- { word32 d[8] ;\
- __asm__ volatile("movl %"#S_0", %0":"=r"(d[0])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_1", %0":"=r"(d[1])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_2", %0":"=r"(d[2])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_3", %0":"=r"(d[3])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_4", %0":"=r"(d[4])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_5", %0":"=r"(d[5])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_6", %0":"=r"(d[6])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_7", %0":"=r"(d[7])::SSE_REGs) ;\
- printf("S[0..7]=%08x,%08x,%08x,%08x,%08x,%08x,%08x,%08x\n", d[0],d[1],d[2],d[3],d[4],d[5],d[6],d[7]);\
- __asm__ volatile("movl %0, %"#S_0::"r"(d[0]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_1::"r"(d[1]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_2::"r"(d[2]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_3::"r"(d[3]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_4::"r"(d[4]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_5::"r"(d[5]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_6::"r"(d[6]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_7::"r"(d[7]):SSE_REGs) ;\
-}
+#elif defined(WOLFSSL_RENESAS_TSIP_CRYPT) && \
+ !defined(NO_WOLFSSL_RENESAS_TSIP_CRYPT_HASH)
+ /* implemented in wolfcrypt/src/port/Renesas/renesas_tsip_sha.c */
+#else
-#define DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
-
-#define RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
-
-#define DumS(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _DumpS(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
-
-
- /* Byte swap Masks to ensure that rest of the words are filled with zero's. */
- static const unsigned long mBYTE_FLIP_MASK_16[] =
- { 0x0405060700010203, 0x0c0d0e0f08090a0b, 0x0405060700010203, 0x0c0d0e0f08090a0b } ;
- static const unsigned long mBYTE_FLIP_MASK_15[] =
- { 0x0405060700010203, 0x0c0d0e0f08090a0b, 0x0405060700010203, 0x0c0d0e0f08090a0b } ;
- static const unsigned long mBYTE_FLIP_MASK_7 [] =
- { 0x0405060700010203, 0x0c0d0e0f08090a0b, 0x0405060700010203, 0x8080808008090a0b } ;
- static const unsigned long mBYTE_FLIP_MASK_2 [] =
- { 0x0405060700010203, 0x8080808080808080, 0x8080808080808080, 0x8080808080808080 } ;
-
- static const unsigned long mMAPtoW_I_7[] =
- { 0x8080808080808080, 0x8080808080808080, 0x8080808080808080, 0x0302010080808080 } ;
- static const unsigned long mMAP1toW_I_2[] =
- { 0x8080808080808080, 0x0706050403020100, 0x8080808080808080, 0x8080808080808080 } ;
- static const unsigned long mMAP2toW_I_2[] =
- { 0x8080808080808080, 0x8080808080808080, 0x0f0e0d0c0b0a0908, 0x8080808080808080 } ;
- static const unsigned long mMAP3toW_I_2[] =
- { 0x8080808080808080, 0x8080808080808080, 0x8080808080808080, 0x0706050403020100 } ;
-
-static int Transform_AVX2(Sha256* sha256)
+int wc_Sha256GetHash(wc_Sha256* sha256, byte* hash)
{
+ int ret;
+ wc_Sha256 tmpSha256;
- #ifdef WOLFSSL_SMALL_STACK
- word32* W_K;
- W_K = (word32*) XMALLOC(sizeof(word32) * 64, NULL, DYNAMIC_TYPE_TMP_BUFFER);
- if (W_K == NULL)
- return MEMORY_E;
- #else
- word32 W_K[64] ;
- #endif
+ if (sha256 == NULL || hash == NULL)
+ return BAD_FUNC_ARG;
- MOVE_to_REG(W_I_16, sha256->buffer[0]); BYTE_SWAP(W_I_16, mBYTE_FLIP_MASK_16[0]) ;
- MOVE_to_REG(W_I_15, sha256->buffer[1]); BYTE_SWAP(W_I_15, mBYTE_FLIP_MASK_15[0]) ;
- MOVE_to_REG(W_I, sha256->buffer[8]) ; BYTE_SWAP(W_I, mBYTE_FLIP_MASK_16[0]) ;
- MOVE_to_REG(W_I_7, sha256->buffer[16-7]) ; BYTE_SWAP(W_I_7, mBYTE_FLIP_MASK_7[0]) ;
- MOVE_to_REG(W_I_2, sha256->buffer[16-2]) ; BYTE_SWAP(W_I_2, mBYTE_FLIP_MASK_2[0]) ;
-
- DigestToReg(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- ADD_MEM(W_K_TEMP, W_I_16, K[0]) ;
- MOVE_to_MEM(W_K[0], W_K_TEMP) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,0) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,1) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,2) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,3) ;
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,4) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,5) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,6) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,7) ;
-
- ADD_MEM(YMM_TEMP0, W_I, K[8]) ;
- MOVE_to_MEM(W_K[8], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,9) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,9) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,9) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,10) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,10) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,10) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,11) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,11) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,11) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,12) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,12) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,12) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,13) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,13) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,13) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,14) ;
- GAMMA1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,14) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,14) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,15) ;
-
- MOVE_to_REG(YMM_TEMP0, K[16]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,15) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,15) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[16], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,17) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,17) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,17) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,18) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,18) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,18) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,19) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,19) ;
- GAMMA1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,19) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,20) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,20) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,20) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,21) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,21) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,21) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,22) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,22) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,22) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,23) ;
-
- MOVE_to_REG(YMM_TEMP0, K[24]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,23) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,23) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[24], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,25) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,25) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,25) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,26) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,26) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,26) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,27) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,27) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,27) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,28) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,28) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,28) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,29) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,29) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,29) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,30) ;
- GAMMA1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,30) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,30) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,31) ;
-
- MOVE_to_REG(YMM_TEMP0, K[32]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,31) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,31) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[32], YMM_TEMP0) ;
-
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,33) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,33) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,33) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,34) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,34) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,34) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,35) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,35) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,35) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,36) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,36) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,36) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,37) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,37) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,37) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,38) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,38) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,38) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,39) ;
-
- MOVE_to_REG(YMM_TEMP0, K[40]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,39) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,39) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[40], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,41) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,41) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,41) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,42) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,42) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,42) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,43) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,43) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,43) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,44) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,44) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,44) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,45) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,45) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,45) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,46) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,46) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,46) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,47) ;
-
- MOVE_to_REG(YMM_TEMP0, K[48]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,47) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,47) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[48], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
-
- MOVE_to_REG(YMM_TEMP0, K[56]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[56], YMM_TEMP0) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,56) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,57) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,58) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,59) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,60) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,61) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,62) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,63) ;
-
- RegToDigest(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- #ifdef WOLFSSL_SMALL_STACK
- XFREE(W, NULL, DYNAMIC_TYPE_TMP_BUFFER);
- #endif
-
- return 0;
+#if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if(sha256->ctx.mode == ESP32_SHA_INIT){
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if(sha256->ctx.mode == ESP32_SHA_HW)
+ {
+ esp_sha256_digest_process(sha256, 0);
+ }
+#endif
+ ret = wc_Sha256Copy(sha256, &tmpSha256);
+ if (ret == 0) {
+ ret = wc_Sha256Final(&tmpSha256, hash);
+#if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ sha256->ctx.mode = ESP32_SHA_SW;
+#endif
+
+ wc_Sha256Free(&tmpSha256);
+ }
+ return ret;
}
+int wc_Sha256Copy(wc_Sha256* src, wc_Sha256* dst)
+{
+ int ret = 0;
-#endif /* HAVE_INTEL_AVX2 */
+ if (src == NULL || dst == NULL)
+ return BAD_FUNC_ARG;
-#endif /* HAVE_FIPS */
+ XMEMCPY(dst, src, sizeof(wc_Sha256));
+#ifdef WOLFSSL_SMALL_STACK_CACHE
+ dst->W = NULL;
+#endif
-#endif /* WOLFSSL_TI_HAHS */
+#ifdef WOLFSSL_ASYNC_CRYPT
+ ret = wolfAsync_DevCopy(&src->asyncDev, &dst->asyncDev);
+#endif
+#ifdef WOLFSSL_PIC32MZ_HASH
+ ret = wc_Pic32HashCopy(&src->cache, &dst->cache);
+#endif
+#if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ dst->ctx.mode = src->ctx.mode;
+ dst->ctx.isfirstblock = src->ctx.isfirstblock;
+ dst->ctx.sha_type = src->ctx.sha_type;
+#endif
+#if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ dst->flags |= WC_HASH_FLAG_ISCOPY;
+#endif
-#endif /* NO_SHA256 */
+ return ret;
+}
+#endif
+
+#if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+int wc_Sha256SetFlags(wc_Sha256* sha256, word32 flags)
+{
+ if (sha256) {
+ sha256->flags = flags;
+ }
+ return 0;
+}
+int wc_Sha256GetFlags(wc_Sha256* sha256, word32* flags)
+{
+ if (sha256 && flags) {
+ *flags = sha256->flags;
+ }
+ return 0;
+}
+#endif
+#endif /* !WOLFSSL_TI_HASH */
+#endif /* NO_SHA256 */