summaryrefslogtreecommitdiff
path: root/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/asm.c
diff options
context:
space:
mode:
Diffstat (limited to 'FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/asm.c')
-rw-r--r--FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/asm.c799
1 files changed, 482 insertions, 317 deletions
diff --git a/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/asm.c b/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/asm.c
index 9f8458588..0af4447c7 100644
--- a/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/asm.c
+++ b/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/asm.c
@@ -1,8 +1,8 @@
/* asm.c
*
- * Copyright (C) 2006-2015 wolfSSL Inc.
+ * Copyright (C) 2006-2020 wolfSSL Inc.
*
- * This file is part of wolfSSL. (formerly known as CyaSSL)
+ * This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -16,9 +16,10 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
+
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
@@ -50,7 +51,7 @@
#else
#include <intrin.h>
- #define cpuid(a,b) __cpuid((int*)a,b)
+ #define cpuid(a,b,c) __cpuidex((int*)a,b,c)
#define XASM_LINK(f)
@@ -58,9 +59,9 @@
#define EAX 0
#define EBX 1
-#define ECX 2
+#define ECX 2
#define EDX 3
-
+
#define CPUID_AVX1 0x1
#define CPUID_AVX2 0x2
#define CPUID_RDRAND 0x4
@@ -74,30 +75,40 @@
#define IS_INTEL_ADX (cpuid_flags&CPUID_ADX)
#define IS_INTEL_RDRAND (cpuid_flags&CPUID_RDRAND)
#define IS_INTEL_RDSEED (cpuid_flags&CPUID_RDSEED)
-#define SET_FLAGS
+#define SET_FLAGS
static word32 cpuid_check = 0 ;
static word32 cpuid_flags = 0 ;
static word32 cpuid_flag(word32 leaf, word32 sub, word32 num, word32 bit) {
- int got_intel_cpu=0;
- unsigned int reg[5];
-
+ int got_intel_cpu = 0;
+ int got_amd_cpu = 0;
+ unsigned int reg[5];
+
reg[4] = '\0' ;
- cpuid(reg, 0, 0);
- if(memcmp((char *)&(reg[EBX]), "Genu", 4) == 0 &&
- memcmp((char *)&(reg[EDX]), "ineI", 4) == 0 &&
- memcmp((char *)&(reg[ECX]), "ntel", 4) == 0) {
- got_intel_cpu = 1;
- }
- if (got_intel_cpu) {
+ cpuid(reg, 0, 0);
+
+ /* check for intel cpu */
+ if( memcmp((char *)&(reg[EBX]), "Genu", 4) == 0 &&
+ memcmp((char *)&(reg[EDX]), "ineI", 4) == 0 &&
+ memcmp((char *)&(reg[ECX]), "ntel", 4) == 0) {
+ got_intel_cpu = 1;
+ }
+
+ /* check for AMD cpu */
+ if( memcmp((char *)&(reg[EBX]), "Auth", 4) == 0 &&
+ memcmp((char *)&(reg[EDX]), "enti", 4) == 0 &&
+ memcmp((char *)&(reg[ECX]), "cAMD", 4) == 0) {
+ got_amd_cpu = 1;
+ }
+ if (got_intel_cpu || got_amd_cpu) {
cpuid(reg, leaf, sub);
return((reg[num]>>bit)&0x1) ;
}
return 0 ;
}
-INLINE static int set_cpuid_flags(void) {
+WC_INLINE static int set_cpuid_flags(void) {
if(cpuid_check == 0) {
if(cpuid_flag(7, 0, EBX, 8)){ cpuid_flags |= CPUID_BMI2 ; }
if(cpuid_flag(7, 0, EBX,19)){ cpuid_flags |= CPUID_ADX ; }
@@ -116,17 +127,17 @@ INLINE static int set_cpuid_flags(void) {
#define IF_HAVE_INTEL_MULX(func, ret)
#endif
-#if defined(TFM_X86) && !defined(TFM_SSE2)
+#if defined(TFM_X86) && !defined(TFM_SSE2)
/* x86-32 code */
-#define MONT_START
+#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
#define INNERMUL \
-__asm__( \
+__asm__( \
"movl %5,%%eax \n\t" \
"mull %4 \n\t" \
"addl %1,%%eax \n\t" \
@@ -135,11 +146,11 @@ __asm__( \
"adcl $0,%%edx \n\t" \
"movl %%edx,%1 \n\t" \
:"=g"(_c[LO]), "=r"(cy) \
-:"0"(_c[LO]), "1"(cy), "g"(mu), "g"(*tmpm++) \
+:"0"(_c[LO]), "1"(cy), "r"(mu), "r"(*tmpm++) \
: "%eax", "%edx", "cc")
#define PROPCARRY \
-__asm__( \
+__asm__( \
"addl %1,%0 \n\t" \
"setb %%al \n\t" \
"movzbl %%al,%1 \n\t" \
@@ -151,14 +162,14 @@ __asm__( \
#elif defined(TFM_X86_64)
/* x86-64 code */
-#define MONT_START
+#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
- mu = c[x] * mp;
+ mu = c[x] * mp
#define INNERMUL \
-__asm__( \
+__asm__( \
"movq %5,%%rax \n\t" \
"mulq %4 \n\t" \
"addq %1,%%rax \n\t" \
@@ -171,70 +182,63 @@ __asm__( \
: "%rax", "%rdx", "cc")
#if defined(HAVE_INTEL_MULX)
-#define MULX_INIT(a0, c0, cy)\
- __asm__ volatile( \
- "xorq %%r10, %%r10\n\t" \
- "movq %1,%%rdx\n\t" \
- "addq %2, %0\n\t" /* c0+=cy; Set CF, OF */ \
- "adoxq %%r10, %%r10\n\t" /* Reset OF */ \
- :"+m"(c0):"r"(a0),"r"(cy):"%r8","%r9", "%r10","%r11","%r12","%rdx") ; \
-
-#define MULX_INNERMUL_R1(c0, c1, pre, rdx)\
- { \
- __asm__ volatile ( \
- "movq %3, %%rdx\n\t" \
- "mulx %%r11,%%r9, %%r8 \n\t" \
- "movq %2, %%r12\n\t" \
- "adoxq %%r9,%0 \n\t" \
- "adcxq %%r8,%1 \n\t" \
- :"+r"(c0),"+r"(c1):"m"(pre),"r"(rdx):"%r8","%r9", "%r10", "%r11","%r12","%rdx" \
- ); }
-
-
-#define MULX_INNERMUL_R2(c0, c1, pre, rdx)\
- { \
- __asm__ volatile ( \
- "movq %3, %%rdx\n\t" \
- "mulx %%r12,%%r9, %%r8 \n\t" \
- "movq %2, %%r11\n\t" \
- "adoxq %%r9,%0 \n\t" \
- "adcxq %%r8,%1 \n\t" \
- :"+r"(c0),"+r"(c1):"m"(pre),"r"(rdx):"%r8","%r9", "%r10", "%r11","%r12","%rdx" \
- ); }
-
-#define MULX_LOAD_R1(val)\
- __asm__ volatile ( \
- "movq %0, %%r11\n\t"\
- ::"m"(val):"%r8","%r9", "%r10", "%r11","%r12","%rdx"\
-) ;
-
-#define MULX_INNERMUL_LAST(c0, c1, rdx)\
- { \
- __asm__ volatile ( \
- "movq %2, %%rdx\n\t" \
- "mulx %%r12,%%r9, %%r8 \n\t" \
- "movq $0, %%r10 \n\t" \
- "adoxq %%r10, %%r9 \n\t" \
- "adcq $0,%%r8 \n\t" \
- "addq %%r9,%0 \n\t" \
- "adcq $0,%%r8 \n\t" \
- "movq %%r8,%1 \n\t" \
- :"+m"(c0),"=m"(c1):"r"(rdx):"%r8","%r9","%r10", "%r11", "%r12","%rdx"\
- ); }
-
-#define MULX_INNERMUL8(x,y,z,cy)\
-{ word64 rdx = y ;\
- MULX_LOAD_R1(x[0]) ;\
- MULX_INIT(y, _c0, cy) ; /* rdx=y; z0+=cy; */ \
- MULX_INNERMUL_R1(_c0, _c1, x[1], rdx) ;\
- MULX_INNERMUL_R2(_c1, _c2, x[2], rdx) ;\
- MULX_INNERMUL_R1(_c2, _c3, x[3], rdx) ;\
- MULX_INNERMUL_R2(_c3, _c4, x[4], rdx) ;\
- MULX_INNERMUL_R1(_c4, _c5, x[5], rdx) ;\
- MULX_INNERMUL_R2(_c5, _c6, x[6], rdx) ;\
- MULX_INNERMUL_R1(_c6, _c7, x[7], rdx) ;\
- MULX_INNERMUL_LAST(_c7, cy, rdx) ;\
-}
+#define MULX_INNERMUL8(x,y,z,cy) \
+ __asm__ volatile ( \
+ "movq %[yn], %%rdx\n\t" \
+ "xorq %%rcx, %%rcx\n\t" \
+ "movq 0(%[c]), %%r8\n\t" \
+ "movq 8(%[c]), %%r9\n\t" \
+ "movq 16(%[c]), %%r10\n\t" \
+ "movq 24(%[c]), %%r11\n\t" \
+ "movq 32(%[c]), %%r12\n\t" \
+ "movq 40(%[c]), %%r13\n\t" \
+ "movq 48(%[c]), %%r14\n\t" \
+ "movq 56(%[c]), %%r15\n\t" \
+ \
+ "mulx 0(%[xp]), %%rax, %%rcx\n\t" \
+ "adcxq %[cy], %%r8\n\t" \
+ "adoxq %%rax, %%r8\n\t" \
+ "mulx 8(%[xp]), %%rax, %[cy]\n\t" \
+ "adcxq %%rcx, %%r9\n\t" \
+ "adoxq %%rax, %%r9\n\t" \
+ "mulx 16(%[xp]), %%rax, %%rcx\n\t" \
+ "adcxq %[cy], %%r10\n\t" \
+ "adoxq %%rax, %%r10\n\t" \
+ "mulx 24(%[xp]), %%rax, %[cy]\n\t" \
+ "adcxq %%rcx, %%r11\n\t" \
+ "adoxq %%rax, %%r11\n\t" \
+ "mulx 32(%[xp]), %%rax, %%rcx\n\t" \
+ "adcxq %[cy], %%r12\n\t" \
+ "adoxq %%rax, %%r12\n\t" \
+ "mulx 40(%[xp]), %%rax, %[cy]\n\t" \
+ "adcxq %%rcx, %%r13\n\t" \
+ "adoxq %%rax, %%r13\n\t" \
+ "mulx 48(%[xp]), %%rax, %%rcx\n\t" \
+ "adcxq %[cy], %%r14\n\t" \
+ "adoxq %%rax, %%r14\n\t" \
+ "adcxq %%rcx, %%r15\n\t" \
+ "mulx 56(%[xp]), %%rax, %[cy]\n\t" \
+ "movq $0, %%rdx\n\t" \
+ "adoxq %%rdx, %%rax\n\t" \
+ "adcxq %%rdx, %[cy]\n\t" \
+ "adoxq %%rdx, %[cy]\n\t" \
+ "addq %%rax, %%r15\n\t" \
+ "adcq $0, %[cy]\n\t" \
+ \
+ "movq %%r8, 0(%[c])\n\t" \
+ "movq %%r9, 8(%[c])\n\t" \
+ "movq %%r10, 16(%[c])\n\t" \
+ "movq %%r11, 24(%[c])\n\t" \
+ "movq %%r12, 32(%[c])\n\t" \
+ "movq %%r13, 40(%[c])\n\t" \
+ "movq %%r14, 48(%[c])\n\t" \
+ "movq %%r15, 56(%[c])\n\t" \
+ : [cy] "+r" (cy) \
+ : [xp] "r" (x), [c] "r" (c_mulx), [yn] "rm" (y) \
+ :"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", \
+ "%rdx", "%rax", "%rcx" \
+ )
+
#define INNERMUL8_MULX \
{\
MULX_INNERMUL8(tmpm, mu, _c, cy);\
@@ -242,7 +246,7 @@ __asm__( \
#endif
#define INNERMUL8 \
- __asm__( \
+ __asm__( \
"movq 0(%5),%%rax \n\t" \
"movq 0(%2),%%r10 \n\t" \
"movq 0x8(%5),%%r11 \n\t" \
@@ -332,10 +336,10 @@ __asm__( \
\
:"=r"(_c), "=r"(cy) \
: "0"(_c), "1"(cy), "g"(mu), "r"(tmpm)\
-: "%rax", "%rdx", "%r10", "%r11", "cc")\
+: "%rax", "%rdx", "%r10", "%r11", "cc")
#define PROPCARRY \
-__asm__( \
+__asm__( \
"addq %1,%0 \n\t" \
"setb %%al \n\t" \
"movzbq %%al,%1 \n\t" \
@@ -344,7 +348,7 @@ __asm__( \
: "%rax", "cc")
/******************************************************************/
-#elif defined(TFM_SSE2)
+#elif defined(TFM_SSE2)
/* SSE2 code (assumes 32-bit fp_digits) */
/* XMM register assignments:
* xmm0 *tmpm++, then Mu * (*tmpm++)
@@ -361,7 +365,7 @@ __asm__( \
__asm__("emms")
#define LOOP_START \
-__asm__( \
+__asm__( \
"movd %0,%%mm1 \n\t" \
"pxor %%mm3,%%mm3 \n\t" \
"pmuludq %%mm2,%%mm1 \n\t" \
@@ -369,7 +373,7 @@ __asm__( \
/* pmuludq on mmx registers does a 32x32->64 multiply. */
#define INNERMUL \
-__asm__( \
+__asm__( \
"movd %1,%%mm4 \n\t" \
"movd %2,%%mm0 \n\t" \
"paddq %%mm4,%%mm3 \n\t" \
@@ -380,7 +384,7 @@ __asm__( \
:"=g"(_c[LO]) : "0"(_c[LO]), "g"(*tmpm++) );
#define INNERMUL8 \
-__asm__( \
+__asm__( \
"movd 0(%1),%%mm4 \n\t" \
"movd 0(%2),%%mm0 \n\t" \
"paddq %%mm4,%%mm3 \n\t" \
@@ -453,7 +457,7 @@ __asm__( \
__asm__( "movd %%mm3,%0 \n" :"=r"(cy))
#define PROPCARRY \
-__asm__( \
+__asm__( \
"addl %1,%0 \n\t" \
"setb %%al \n\t" \
"movzbl %%al,%1 \n\t" \
@@ -465,7 +469,7 @@ __asm__( \
#elif defined(TFM_ARM)
/* ARMv4 code */
-#define MONT_START
+#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
@@ -475,7 +479,7 @@ __asm__( \
#ifdef __thumb__
#define INNERMUL \
-__asm__( \
+__asm__( \
" LDR r0,%1 \n\t" \
" ADDS r0,r0,%0 \n\t" \
" ITE CS \n\t" \
@@ -486,7 +490,7 @@ __asm__( \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"m"(_c[0]):"r0","cc");
#define PROPCARRY \
-__asm__( \
+__asm__( \
" LDR r0,%1 \n\t" \
" ADDS r0,r0,%0 \n\t" \
" STR r0,%1 \n\t" \
@@ -502,7 +506,7 @@ __asm__( \
#else /* __thumb__ */
#define INNERMUL \
-__asm__( \
+__asm__( \
" LDR r0,%1 \n\t" \
" ADDS r0,r0,%0 \n\t" \
" MOVCS %0,#1 \n\t" \
@@ -512,7 +516,7 @@ __asm__( \
:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c[0]):"r0","cc");
#define PROPCARRY \
-__asm__( \
+__asm__( \
" LDR r0,%1 \n\t" \
" ADDS r0,r0,%0 \n\t" \
" STR r0,%1 \n\t" \
@@ -525,76 +529,72 @@ __asm__( \
#elif defined(TFM_PPC32)
/* PPC32 */
-#define MONT_START
+#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
#define INNERMUL \
-__asm__( \
+__asm__( \
" mullw 16,%3,%4 \n\t" \
" mulhwu 17,%3,%4 \n\t" \
- " addc 16,16,%0 \n\t" \
+ " addc 16,16,%2 \n\t" \
" addze 17,17 \n\t" \
- " lwz 18,%1 \n\t" \
- " addc 16,16,18 \n\t" \
+ " addc %1,16,%5 \n\t" \
" addze %0,17 \n\t" \
- " stw 16,%1 \n\t" \
-:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "18","cc"); ++tmpm;
+:"=r"(cy),"=r"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "cc"); ++tmpm;
#define PROPCARRY \
-__asm__( \
- " lwz 16,%1 \n\t" \
- " addc 16,16,%0 \n\t" \
- " stw 16,%1 \n\t" \
- " xor %0,%0,%0 \n\t" \
- " addze %0,%0 \n\t" \
-:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"16","cc");
+__asm__( \
+ " addc %1,%3,%2 \n\t" \
+ " xor %0,%2,%2 \n\t" \
+ " addze %0,%2 \n\t" \
+:"=r"(cy),"=r"(_c[0]):"0"(cy),"1"(_c[0]):"cc");
#elif defined(TFM_PPC64)
/* PPC64 */
-#define MONT_START
+#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
-#define INNERMUL \
-__asm__( \
- " mulld 16,%3,%4 \n\t" \
- " mulhdu 17,%3,%4 \n\t" \
- " addc 16,16,%0 \n\t" \
- " addze 17,17 \n\t" \
- " ldx 18,0,%1 \n\t" \
- " addc 16,16,18 \n\t" \
- " addze %0,17 \n\t" \
- " sdx 16,0,%1 \n\t" \
-:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "18","cc"); ++tmpm;
-
-#define PROPCARRY \
-__asm__( \
- " ldx 16,0,%1 \n\t" \
- " addc 16,16,%0 \n\t" \
- " sdx 16,0,%1 \n\t" \
- " xor %0,%0,%0 \n\t" \
- " addze %0,%0 \n\t" \
-:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"16","cc");
+#define INNERMUL \
+__asm__( \
+ " mulld r16,%3,%4 \n\t" \
+ " mulhdu r17,%3,%4 \n\t" \
+ " addc r16,16,%0 \n\t" \
+ " addze r17,r17 \n\t" \
+ " ldx r18,0,%1 \n\t" \
+ " addc r16,r16,r18 \n\t" \
+ " addze %0,r17 \n\t" \
+ " sdx r16,0,%1 \n\t" \
+:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"r16", "r17", "r18","cc"); ++tmpm;
+
+#define PROPCARRY \
+__asm__( \
+ " ldx r16,0,%1 \n\t" \
+ " addc r16,r16,%0 \n\t" \
+ " sdx r16,0,%1 \n\t" \
+ " xor %0,%0,%0 \n\t" \
+ " addze %0,%0 \n\t" \
+:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r16","cc");
/******************************************************************/
#elif defined(TFM_AVR32)
/* AVR32 */
-#define MONT_START
+#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
mu = c[x] * mp
#define INNERMUL \
-__asm__( \
+__asm__( \
" ld.w r2,%1 \n\t" \
" add r2,%0 \n\t" \
" eor r3,r3 \n\t" \
@@ -605,7 +605,7 @@ __asm__( \
:"=r"(cy),"=r"(_c):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c):"r2","r3");
#define PROPCARRY \
-__asm__( \
+__asm__( \
" ld.w r2,%1 \n\t" \
" add r2,%0 \n\t" \
" st.w %1,r2 \n\t" \
@@ -613,10 +613,44 @@ __asm__( \
" acr %0 \n\t" \
:"=r"(cy),"=r"(&_c[0]):"0"(cy),"1"(&_c[0]):"r2","cc");
+/******************************************************************/
+#elif defined(TFM_MIPS)
+
+/* MIPS */
+#define MONT_START
+#define MONT_FINI
+#define LOOP_END
+#define LOOP_START \
+ mu = c[x] * mp
+
+#define INNERMUL \
+__asm__( \
+ " multu %3,%4 \n\t" \
+ " mflo $12 \n\t" \
+ " mfhi $13 \n\t" \
+ " addu $12,$12,%0 \n\t" \
+ " sltu $10,$12,%0 \n\t" \
+ " addu $13,$13,$10 \n\t" \
+ " lw $10,%1 \n\t" \
+ " addu $12,$12,$10 \n\t" \
+ " sltu $10,$12,$10 \n\t" \
+ " addu %0,$13,$10 \n\t" \
+ " sw $12,%1 \n\t" \
+:"+r"(cy),"+m"(_c[0]):""(cy),"r"(mu),"r"(tmpm[0]),""(_c[0]):"$10","$12","$13"); ++tmpm;
+
+#define PROPCARRY \
+__asm__( \
+ " lw $10,%1 \n\t" \
+ " addu $10,$10,%0 \n\t" \
+ " sw $10,%1 \n\t" \
+ " sltu %0,$10,%0 \n\t" \
+:"+r"(cy),"+m"(_c[0]):""(cy),""(_c[0]):"$10");
+
+/******************************************************************/
#else
/* ISO C code */
-#define MONT_START
+#define MONT_START
#define MONT_FINI
#define LOOP_END
#define LOOP_START \
@@ -663,7 +697,7 @@ __asm__( \
#define COMBA_FINI
#define SQRADD(i, j) \
-__asm__( \
+__asm__( \
"movl %6,%%eax \n\t" \
"mull %%eax \n\t" \
"addl %%eax,%0 \n\t" \
@@ -672,7 +706,7 @@ __asm__( \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","%edx","cc");
#define SQRADD2(i, j) \
-__asm__( \
+__asm__( \
"movl %6,%%eax \n\t" \
"mull %7 \n\t" \
"addl %%eax,%0 \n\t" \
@@ -692,10 +726,8 @@ __asm__( \
"xorl %2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "g"(i), "g"(j) :"%eax","%edx","cc");
-/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
-
#define SQRADDAC(i, j) \
-__asm__( \
+__asm__( \
"movl %6,%%eax \n\t" \
"mull %7 \n\t" \
"addl %%eax,%0 \n\t" \
@@ -704,7 +736,7 @@ __asm__( \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","cc");
#define SQRADDDB \
-__asm__( \
+__asm__( \
"addl %6,%0 \n\t" \
"adcl %7,%1 \n\t" \
"adcl %8,%2 \n\t" \
@@ -733,16 +765,16 @@ __asm__( \
#define COMBA_FINI
#define SQRADD(i, j) \
-__asm__( \
+__asm__( \
"movq %6,%%rax \n\t" \
"mulq %%rax \n\t" \
"addq %%rax,%0 \n\t" \
"adcq %%rdx,%1 \n\t" \
"adcq $0,%2 \n\t" \
- :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i) :"%rax","%rdx","cc");
+ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "x"(i) :"%rax","%rdx","cc");
#define SQRADD2(i, j) \
-__asm__( \
+__asm__( \
"movq %6,%%rax \n\t" \
"mulq %7 \n\t" \
"addq %%rax,%0 \n\t" \
@@ -754,7 +786,7 @@ __asm__( \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc");
#define SQRADDSC(i, j) \
-__asm__( \
+__asm__( \
"movq %3,%%rax \n\t" \
"mulq %4 \n\t" \
"movq %%rax,%0 \n\t" \
@@ -762,10 +794,8 @@ __asm__( \
"xorq %2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "g"(i), "g"(j) :"%rax","%rdx","cc");
-/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
-
#define SQRADDAC(i, j) \
-__asm__( \
+__asm__( \
"movq %6,%%rax \n\t" \
"mulq %7 \n\t" \
"addq %%rax,%0 \n\t" \
@@ -774,7 +804,7 @@ __asm__( \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","cc");
#define SQRADDDB \
-__asm__( \
+__asm__( \
"addq %6,%0 \n\t" \
"adcq %7,%1 \n\t" \
"adcq %8,%2 \n\t" \
@@ -804,7 +834,7 @@ __asm__( \
__asm__("emms");
#define SQRADD(i, j) \
-__asm__( \
+__asm__( \
"movd %6,%%mm0 \n\t" \
"pmuludq %%mm0,%%mm0\n\t" \
"movd %%mm0,%%eax \n\t" \
@@ -816,7 +846,7 @@ __asm__( \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","cc");
#define SQRADD2(i, j) \
-__asm__( \
+__asm__( \
"movd %6,%%mm0 \n\t" \
"movd %7,%%mm1 \n\t" \
"pmuludq %%mm1,%%mm0\n\t" \
@@ -832,7 +862,7 @@ __asm__( \
:"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc");
#define SQRADDSC(i, j) \
-__asm__( \
+__asm__( \
"movd %3,%%mm0 \n\t" \
"movd %4,%%mm1 \n\t" \
"pmuludq %%mm1,%%mm0\n\t" \
@@ -845,7 +875,7 @@ __asm__( \
/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
#define SQRADDAC(i, j) \
-__asm__( \
+__asm__( \
"movd %6,%%mm0 \n\t" \
"movd %7,%%mm1 \n\t" \
"pmuludq %%mm1,%%mm0\n\t" \
@@ -858,7 +888,7 @@ __asm__( \
:"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "m"(i), "m"(j) :"%eax","%edx","cc");
#define SQRADDDB \
-__asm__( \
+__asm__( \
"addl %6,%0 \n\t" \
"adcl %7,%1 \n\t" \
"adcl %8,%2 \n\t" \
@@ -889,16 +919,16 @@ __asm__( \
/* multiplies point i and j, updates carry "c1" and digit c2 */
#define SQRADD(i, j) \
-__asm__( \
+__asm__( \
" UMULL r0,r1,%6,%6 \n\t" \
" ADDS %0,%0,r0 \n\t" \
" ADCS %1,%1,r1 \n\t" \
" ADC %2,%2,#0 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i) : "r0", "r1", "cc");
-
+
/* for squaring some of the terms are doubled... */
#define SQRADD2(i, j) \
-__asm__( \
+__asm__( \
" UMULL r0,r1,%6,%7 \n\t" \
" ADDS %0,%0,r0 \n\t" \
" ADCS %1,%1,r1 \n\t" \
@@ -909,7 +939,7 @@ __asm__( \
:"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc");
#define SQRADDSC(i, j) \
-__asm__( \
+__asm__( \
" UMULL %0,%1,%3,%4 \n\t" \
" SUB %2,%2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "r"(i), "r"(j) : "cc");
@@ -917,7 +947,7 @@ __asm__( \
/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */
#define SQRADDAC(i, j) \
-__asm__( \
+__asm__( \
" UMULL r0,r1,%6,%7 \n\t" \
" ADDS %0,%0,r0 \n\t" \
" ADCS %1,%1,r1 \n\t" \
@@ -925,7 +955,7 @@ __asm__( \
:"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "r0", "r1", "cc");
#define SQRADDDB \
-__asm__( \
+__asm__( \
" ADDS %0,%0,%3 \n\t" \
" ADCS %1,%1,%4 \n\t" \
" ADC %2,%2,%5 \n\t" \
@@ -956,7 +986,7 @@ __asm__( \
/* multiplies point i and j, updates carry "c1" and digit c2 */
#define SQRADD(i, j) \
-__asm__( \
+__asm__( \
" mullw 16,%6,%6 \n\t" \
" addc %0,%0,16 \n\t" \
" mulhwu 16,%6,%6 \n\t" \
@@ -966,7 +996,7 @@ __asm__( \
/* for squaring some of the terms are doubled... */
#define SQRADD2(i, j) \
-__asm__( \
+__asm__( \
" mullw 16,%6,%7 \n\t" \
" mulhwu 17,%6,%7 \n\t" \
" addc %0,%0,16 \n\t" \
@@ -978,14 +1008,14 @@ __asm__( \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc");
#define SQRADDSC(i, j) \
-__asm__( \
+__asm__( \
" mullw %0,%6,%7 \n\t" \
" mulhwu %1,%6,%7 \n\t" \
" xor %2,%2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
#define SQRADDAC(i, j) \
-__asm__( \
+__asm__( \
" mullw 16,%6,%7 \n\t" \
" addc %0,%0,16 \n\t" \
" mulhwu 16,%6,%7 \n\t" \
@@ -994,7 +1024,7 @@ __asm__( \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc");
#define SQRADDDB \
-__asm__( \
+__asm__( \
" addc %0,%0,%3 \n\t" \
" adde %1,%1,%4 \n\t" \
" adde %2,%2,%5 \n\t" \
@@ -1023,46 +1053,46 @@ __asm__( \
#define COMBA_FINI
/* multiplies point i and j, updates carry "c1" and digit c2 */
-#define SQRADD(i, j) \
-__asm__( \
- " mulld 16,%6,%6 \n\t" \
- " addc %0,%0,16 \n\t" \
- " mulhdu 16,%6,%6 \n\t" \
- " adde %1,%1,16 \n\t" \
- " addze %2,%2 \n\t" \
-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","cc");
+#define SQRADD(i, j) \
+__asm__( \
+ " mulld r16,%6,%6 \n\t" \
+ " addc %0,%0,r16 \n\t" \
+ " mulhdu r16,%6,%6 \n\t" \
+ " adde %1,%1,r16 \n\t" \
+ " addze %2,%2 \n\t" \
+:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r16","cc");
/* for squaring some of the terms are doubled... */
-#define SQRADD2(i, j) \
-__asm__( \
- " mulld 16,%6,%7 \n\t" \
- " mulhdu 17,%6,%7 \n\t" \
- " addc %0,%0,16 \n\t" \
- " adde %1,%1,17 \n\t" \
- " addze %2,%2 \n\t" \
- " addc %0,%0,16 \n\t" \
- " adde %1,%1,17 \n\t" \
- " addze %2,%2 \n\t" \
-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc");
+#define SQRADD2(i, j) \
+__asm__( \
+ " mulld r16,%6,%7 \n\t" \
+ " mulhdu r17,%6,%7 \n\t" \
+ " addc %0,%0,r16 \n\t" \
+ " adde %1,%1,r17 \n\t" \
+ " addze %2,%2 \n\t" \
+ " addc %0,%0,r16 \n\t" \
+ " adde %1,%1,r17 \n\t" \
+ " addze %2,%2 \n\t" \
+:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16", "r17","cc");
#define SQRADDSC(i, j) \
-__asm__( \
+__asm__( \
" mulld %0,%6,%7 \n\t" \
" mulhdu %1,%6,%7 \n\t" \
" xor %2,%2,%2 \n\t" \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
-#define SQRADDAC(i, j) \
-__asm__( \
- " mulld 16,%6,%7 \n\t" \
- " addc %0,%0,16 \n\t" \
- " mulhdu 16,%6,%7 \n\t" \
- " adde %1,%1,16 \n\t" \
- " addze %2,%2 \n\t" \
-:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc");
+#define SQRADDAC(i, j) \
+__asm__( \
+ " mulld r16,%6,%7 \n\t" \
+ " addc %0,%0,r16 \n\t" \
+ " mulhdu r16,%6,%7 \n\t" \
+ " adde %1,%1,r16 \n\t" \
+ " addze %2,%2 \n\t" \
+:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r16", "cc");
#define SQRADDDB \
-__asm__( \
+__asm__( \
" addc %0,%0,%3 \n\t" \
" adde %1,%1,%4 \n\t" \
" adde %2,%2,%5 \n\t" \
@@ -1094,7 +1124,7 @@ __asm__( \
/* multiplies point i and j, updates carry "c1" and digit c2 */
#define SQRADD(i, j) \
-__asm__( \
+__asm__( \
" mulu.d r2,%6,%6 \n\t" \
" add %0,%0,r2 \n\t" \
" adc %1,%1,r3 \n\t" \
@@ -1103,7 +1133,7 @@ __asm__( \
/* for squaring some of the terms are doubled... */
#define SQRADD2(i, j) \
-__asm__( \
+__asm__( \
" mulu.d r2,%6,%7 \n\t" \
" add %0,%0,r2 \n\t" \
" adc %1,%1,r3 \n\t" \
@@ -1114,7 +1144,7 @@ __asm__( \
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2", "r3");
#define SQRADDSC(i, j) \
-__asm__( \
+__asm__( \
" mulu.d r2,%6,%7 \n\t" \
" mov %0,r2 \n\t" \
" mov %1,r3 \n\t" \
@@ -1122,7 +1152,7 @@ __asm__( \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "r2", "r3");
#define SQRADDAC(i, j) \
-__asm__( \
+__asm__( \
" mulu.d r2,%6,%7 \n\t" \
" add %0,%0,r2 \n\t" \
" adc %1,%1,r3 \n\t" \
@@ -1130,7 +1160,7 @@ __asm__( \
:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r2", "r3");
#define SQRADDDB \
-__asm__( \
+__asm__( \
" add %0,%0,%3 \n\t" \
" adc %1,%1,%4 \n\t" \
" adc %2,%2,%5 \n\t" \
@@ -1139,6 +1169,112 @@ __asm__( \
" adc %2,%2,%5 \n\t" \
:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc");
+#elif defined(TFM_MIPS)
+
+/* MIPS */
+#define COMBA_START
+
+#define CLEAR_CARRY \
+ c0 = c1 = c2 = 0;
+
+#define COMBA_STORE(x) \
+ x = c0;
+
+#define COMBA_STORE2(x) \
+ x = c1;
+
+#define CARRY_FORWARD \
+ do { c0 = c1; c1 = c2; c2 = 0; } while (0);
+
+#define COMBA_FINI
+
+/* multiplies point i and j, updates carry "c1" and digit c2 */
+#define SQRADD(i, j) \
+__asm__( \
+ " multu %6,%6 \n\t" \
+ " mflo $12 \n\t" \
+ " mfhi $13 \n\t" \
+ " addu %0,%0,$12 \n\t" \
+ " sltu $12,%0,$12 \n\t" \
+ " addu %1,%1,$13 \n\t" \
+ " sltu $13,%1,$13 \n\t" \
+ " addu %1,%1,$12 \n\t" \
+ " sltu $12,%1,$12 \n\t" \
+ " addu %2,%2,$13 \n\t" \
+ " addu %2,%2,$12 \n\t" \
+:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"$12","$13");
+
+/* for squaring some of the terms are doubled... */
+#define SQRADD2(i, j) \
+__asm__( \
+ " multu %6,%7 \n\t" \
+ " mflo $12 \n\t" \
+ " mfhi $13 \n\t" \
+ \
+ " addu %0,%0,$12 \n\t" \
+ " sltu $14,%0,$12 \n\t" \
+ " addu %1,%1,$13 \n\t" \
+ " sltu $15,%1,$13 \n\t" \
+ " addu %1,%1,$14 \n\t" \
+ " sltu $14,%1,$14 \n\t" \
+ " addu %2,%2,$15 \n\t" \
+ " addu %2,%2,$14 \n\t" \
+ \
+ " addu %0,%0,$12 \n\t" \
+ " sltu $14,%0,$12 \n\t" \
+ " addu %1,%1,$13 \n\t" \
+ " sltu $15,%1,$13 \n\t" \
+ " addu %1,%1,$14 \n\t" \
+ " sltu $14,%1,$14 \n\t" \
+ " addu %2,%2,$15 \n\t" \
+ " addu %2,%2,$14 \n\t" \
+:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"$12", "$13", "$14", "$15");
+
+#define SQRADDSC(i, j) \
+__asm__( \
+ " multu %6,%7 \n\t" \
+ " mflo %0 \n\t" \
+ " mfhi %1 \n\t" \
+ " xor %2,%2,%2 \n\t" \
+:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc");
+
+#define SQRADDAC(i, j) \
+__asm__( \
+ " multu %6,%7 \n\t" \
+ " mflo $12 \n\t" \
+ " mfhi $13 \n\t" \
+ " addu %0,%0,$12 \n\t" \
+ " sltu $12,%0,$12 \n\t" \
+ " addu %1,%1,$13 \n\t" \
+ " sltu $13,%1,$13 \n\t" \
+ " addu %1,%1,$12 \n\t" \
+ " sltu $12,%1,$12 \n\t" \
+ " addu %2,%2,$13 \n\t" \
+ " addu %2,%2,$12 \n\t" \
+:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"$12", "$13", "$14");
+
+#define SQRADDDB \
+__asm__( \
+ " addu %0,%0,%3 \n\t" \
+ " sltu $10,%0,%3 \n\t" \
+ " addu %1,%1,$10 \n\t" \
+ " sltu $10,%1,$10 \n\t" \
+ " addu %1,%1,%4 \n\t" \
+ " sltu $11,%1,%4 \n\t" \
+ " addu %2,%2,$10 \n\t" \
+ " addu %2,%2,$11 \n\t" \
+ " addu %2,%2,%5 \n\t" \
+ \
+ " addu %0,%0,%3 \n\t" \
+ " sltu $10,%0,%3 \n\t" \
+ " addu %1,%1,$10 \n\t" \
+ " sltu $10,%1,$10 \n\t" \
+ " addu %1,%1,%4 \n\t" \
+ " sltu $11,%1,%4 \n\t" \
+ " addu %2,%2,$10 \n\t" \
+ " addu %2,%2,$11 \n\t" \
+ " addu %2,%2,%5 \n\t" \
+:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "$10", "$11");
#else
@@ -1169,7 +1305,7 @@ __asm__( \
t = c1 + (t >> DIGIT_BIT); c1 = (fp_digit)t; \
c2 +=(fp_digit) (t >> DIGIT_BIT); \
} while (0);
-
+
/* for squaring some of the terms are doubled... */
#define SQRADD2(i, j) \
@@ -1177,10 +1313,10 @@ __asm__( \
t = ((fp_word)i) * ((fp_word)j); \
tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \
tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \
- c2 +=(fp_digit)( tt >> DIGIT_BIT); \
+ c2 +=(fp_digit)(tt >> DIGIT_BIT); \
tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \
tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \
- c2 +=(fp_digit) (tt >> DIGIT_BIT); \
+ c2 +=(fp_digit)(tt >> DIGIT_BIT); \
} while (0);
#define SQRADDSC(i, j) \
@@ -1210,46 +1346,46 @@ __asm__( \
#include "fp_sqr_comba_small_set.i"
#endif
-#if defined(TFM_SQR3)
+#if defined(TFM_SQR3) && FP_SIZE >= 6
#include "fp_sqr_comba_3.i"
#endif
-#if defined(TFM_SQR4)
+#if defined(TFM_SQR4) && FP_SIZE >= 8
#include "fp_sqr_comba_4.i"
#endif
-#if defined(TFM_SQR6)
+#if defined(TFM_SQR6) && FP_SIZE >= 12
#include "fp_sqr_comba_6.i"
#endif
-#if defined(TFM_SQR7)
+#if defined(TFM_SQR7) && FP_SIZE >= 14
#include "fp_sqr_comba_7.i"
#endif
-#if defined(TFM_SQR8)
+#if defined(TFM_SQR8) && FP_SIZE >= 16
#include "fp_sqr_comba_8.i"
#endif
-#if defined(TFM_SQR9)
+#if defined(TFM_SQR9) && FP_SIZE >= 18
#include "fp_sqr_comba_9.i"
#endif
-#if defined(TFM_SQR12)
+#if defined(TFM_SQR12) && FP_SIZE >= 24
#include "fp_sqr_comba_12.i"
#endif
-#if defined(TFM_SQR17)
+#if defined(TFM_SQR17) && FP_SIZE >= 34
#include "fp_sqr_comba_17.i"
#endif
-#if defined(TFM_SQR20)
+#if defined(TFM_SQR20) && FP_SIZE >= 40
#include "fp_sqr_comba_20.i"
#endif
-#if defined(TFM_SQR24)
+#if defined(TFM_SQR24) && FP_SIZE >= 48
#include "fp_sqr_comba_24.i"
#endif
-#if defined(TFM_SQR28)
+#if defined(TFM_SQR28) && FP_SIZE >= 56
#include "fp_sqr_comba_28.i"
#endif
-#if defined(TFM_SQR32)
+#if defined(TFM_SQR32) && FP_SIZE >= 64
#include "fp_sqr_comba_32.i"
#endif
-#if defined(TFM_SQR48)
+#if defined(TFM_SQR48) && FP_SIZE >= 96
#include "fp_sqr_comba_48.i"
#endif
-#if defined(TFM_SQR64)
+#if defined(TFM_SQR64) && FP_SIZE >= 128
#include "fp_sqr_comba_64.i"
#endif
/* end fp_sqr_comba.c asm */
@@ -1283,7 +1419,7 @@ __asm__( \
/* this should multiply i and j */
#define MULADD(i, j) \
-__asm__( \
+__asm__( \
"movl %6,%%eax \n\t" \
"mull %7 \n\t" \
"addl %%eax,%0 \n\t" \
@@ -1318,7 +1454,7 @@ __asm__( \
/* this should multiply i and j */
#define MULADD(i, j) \
-__asm__ ( \
+__asm__ ( \
"movq %6,%%rax \n\t" \
"mulq %7 \n\t" \
"addq %%rax,%0 \n\t" \
@@ -1328,61 +1464,54 @@ __asm__ ( \
#if defined(HAVE_INTEL_MULX)
-#define MULADD_MULX(b0, c0, c1, rdx)\
- __asm__ volatile ( \
- "movq %3, %%rdx\n\t" \
- "mulx %2,%%r9, %%r8 \n\t" \
- "adoxq %%r9,%0 \n\t" \
- "adcxq %%r8,%1 \n\t" \
- :"+r"(c0),"+r"(c1):"r"(b0), "r"(rdx):"%r8","%r9","%r10","%rdx"\
+#define MULADD_BODY(a,b,c) \
+ __asm__ volatile( \
+ "movq %[a0],%%rdx\n\t" \
+ "xorq %%rcx, %%rcx\n\t" \
+ "movq 0(%[cp]),%%r8\n\t" \
+ "movq 8(%[cp]),%%r9\n\t" \
+ "movq 16(%[cp]),%%r10\n\t" \
+ "movq 24(%[cp]),%%r11\n\t" \
+ "movq 32(%[cp]),%%r12\n\t" \
+ "movq 40(%[cp]),%%r13\n\t" \
+ \
+ "mulx (%[bp]),%%rax, %%rbx\n\t" \
+ "adoxq %%rax, %%r8\n\t" \
+ "mulx 8(%[bp]),%%rax, %%rcx\n\t" \
+ "adcxq %%rbx, %%r9\n\t" \
+ "adoxq %%rax, %%r9\n\t" \
+ "mulx 16(%[bp]),%%rax, %%rbx\n\t" \
+ "adcxq %%rcx, %%r10\n\t" \
+ "adoxq %%rax, %%r10\n\t" \
+ "mulx 24(%[bp]),%%rax, %%rcx\n\t" \
+ "adcxq %%rbx, %%r11\n\t" \
+ "adoxq %%rax, %%r11\n\t" \
+ "adcxq %%rcx, %%r12\n\t" \
+ "mov $0, %%rdx\n\t" \
+ "adox %%rdx, %%r12\n\t" \
+ "adcx %%rdx, %%r13\n\t" \
+ \
+ "movq %%r8, 0(%[cp])\n\t" \
+ "movq %%r9, 8(%[cp])\n\t" \
+ "movq %%r10, 16(%[cp])\n\t" \
+ "movq %%r11, 24(%[cp])\n\t" \
+ "movq %%r12, 32(%[cp])\n\t" \
+ "movq %%r13, 40(%[cp])\n\t" \
+ : \
+ : [a0] "r" (a->dp[ix]), [bp] "r" (&(b->dp[iy])), \
+ [cp] "r" (&(c->dp[iz])) \
+ : "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", \
+ "%rdx", "%rax", "%rcx", "%rbx" \
)
-
-#define MULADD_MULX_ADD_CARRY(c0, c1)\
- __asm__ volatile(\
- "mov $0, %%r10\n\t"\
- "movq %1, %%r8\n\t"\
- "adox %%r10, %0\n\t"\
- "adcx %%r10, %1\n\t"\
- :"+r"(c0),"+r"(c1)::"%r8","%r9","%r10","%rdx") ;
-
-#define MULADD_SET_A(a0)\
- __asm__ volatile("add $0, %%r8\n\t" \
- "movq %0,%%rdx\n\t" \
- ::"r"(a0):"%r8","%r9","%r10","%rdx") ;
-
-#define MULADD_BODY(a,b,c)\
- { word64 rdx = a->dp[ix] ; \
- cp = &(c->dp[iz]) ; \
- c0 = cp[0] ; c1 = cp[1]; \
- MULADD_SET_A(rdx) ; \
- MULADD_MULX(b0, c0, c1, rdx) ;\
- cp[0]=c0; c0=cp[2]; \
- MULADD_MULX(b1, c1, c0, rdx) ;\
- cp[1]=c1; c1=cp[3]; \
- MULADD_MULX(b2, c0, c1, rdx) ;\
- cp[2]=c0; c0=cp[4]; \
- MULADD_MULX(b3, c1, c0, rdx) ;\
- cp[3]=c1; c1=cp[5]; \
- MULADD_MULX_ADD_CARRY(c0, c1);\
- cp[4]=c0; cp[5]=c1; \
+#define TFM_INTEL_MUL_COMBA(a, b, c) \
+ for (iz=0; iz<pa; iz++) c->dp[iz] = 0; \
+ for (ix=0; ix<a->used; ix++) { \
+ for (iy=0; iy<b->used; iy+=4) { \
+ iz = ix + iy; \
+ MULADD_BODY(a, b, c); \
+ } \
}
-
-#define TFM_INTEL_MUL_COMBA(a, b, c)\
- for(ix=0; ix<pa; ix++)c->dp[ix]=0 ; \
- for(iy=0; (iy<b->used); iy+=4) { \
- fp_digit *bp ; \
- bp = &(b->dp[iy+0]) ; \
- fp_digit b0 = bp[0] , b1= bp[1], \
- b2= bp[2], b3= bp[3]; \
- ix=0, iz=iy; \
- while(ix<a->used) { \
- fp_digit c0, c1; \
- fp_digit *cp ; \
- MULADD_BODY(a,b,c); \
- ix++ ; iz++ ; \
- } \
-};
#endif
#elif defined(TFM_SSE2)
@@ -1413,7 +1542,7 @@ __asm__ ( \
/* this should multiply i and j */
#define MULADD(i, j) \
-__asm__( \
+__asm__( \
"movd %6,%%mm0 \n\t" \
"movd %7,%%mm1 \n\t" \
"pmuludq %%mm1,%%mm0\n\t" \
@@ -1428,7 +1557,7 @@ __asm__( \
#elif defined(TFM_ARM)
/* ARM code */
-#define COMBA_START
+#define COMBA_START
#define COMBA_CLEAR \
c0 = c1 = c2 = 0;
@@ -1445,7 +1574,7 @@ __asm__( \
#define COMBA_FINI
#define MULADD(i, j) \
-__asm__( \
+__asm__( \
" UMULL r0,r1,%6,%7 \n\t" \
" ADDS %0,%0,r0 \n\t" \
" ADCS %1,%1,r1 \n\t" \
@@ -1469,11 +1598,11 @@ __asm__( \
#define COMBA_STORE2(x) \
x = c1;
-#define COMBA_FINI
-
+#define COMBA_FINI
+
/* untested: will mulhwu change the flags? Docs say no */
-#define MULADD(i, j) \
-__asm__( \
+#define MULADD(i, j) \
+__asm__( \
" mullw 16,%6,%7 \n\t" \
" addc %0,%0,16 \n\t" \
" mulhwu 16,%6,%7 \n\t" \
@@ -1498,17 +1627,17 @@ __asm__( \
#define COMBA_STORE2(x) \
x = c1;
-#define COMBA_FINI
-
-/* untested: will mulhwu change the flags? Docs say no */
+#define COMBA_FINI
+
+/* untested: will mulhdu change the flags? Docs say no */
#define MULADD(i, j) \
-____asm__( \
- " mulld 16,%6,%7 \n\t" \
- " addc %0,%0,16 \n\t" \
- " mulhdu 16,%6,%7 \n\t" \
- " adde %1,%1,16 \n\t" \
- " addze %2,%2 \n\t" \
-:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16");
+____asm__( \
+ " mulld r16,%6,%7 \n\t" \
+ " addc %0,%0,16 \n\t" \
+ " mulhdu r16,%6,%7 \n\t" \
+ " adde %1,%1,16 \n\t" \
+ " addze %2,%2 \n\t" \
+:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16");
#elif defined(TFM_AVR32)
@@ -1528,16 +1657,50 @@ ____asm__( \
#define COMBA_STORE2(x) \
x = c1;
-#define COMBA_FINI
-
+#define COMBA_FINI
+
#define MULADD(i, j) \
-____asm__( \
+____asm__( \
" mulu.d r2,%6,%7 \n\t"\
" add %0,r2 \n\t"\
" adc %1,%1,r3 \n\t"\
" acr %2 \n\t"\
:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2","r3");
+#elif defined(TFM_MIPS)
+
+/* MIPS */
+#define COMBA_START
+
+#define COMBA_CLEAR \
+ c0 = c1 = c2 = 0;
+
+#define COMBA_FORWARD \
+ do { c0 = c1; c1 = c2; c2 = 0; } while (0);
+
+#define COMBA_STORE(x) \
+ x = c0;
+
+#define COMBA_STORE2(x) \
+ x = c1;
+
+#define COMBA_FINI
+
+#define MULADD(i, j) \
+__asm__( \
+ " multu %6,%7 \n\t" \
+ " mflo $12 \n\t" \
+ " mfhi $13 \n\t" \
+ " addu %0,%0,$12 \n\t" \
+ " sltu $12,%0,$12 \n\t" \
+ " addu %1,%1,$13 \n\t" \
+ " sltu $13,%1,$13 \n\t" \
+ " addu %1,%1,$12 \n\t" \
+ " sltu $12,%1,$12 \n\t" \
+ " addu %2,%2,$13 \n\t" \
+ " addu %2,%2,$12 \n\t" \
+:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"$12","$13");
+
#else
/* ISO C code */
@@ -1555,13 +1718,15 @@ ____asm__( \
#define COMBA_STORE2(x) \
x = c1;
-#define COMBA_FINI
-
+#define COMBA_FINI
+
#define MULADD(i, j) \
- do { fp_word t; \
- t = (fp_word)c0 + ((fp_word)i) * ((fp_word)j); c0 = (fp_digit)t; \
- t = (fp_word)c1 + (t >> DIGIT_BIT); \
- c1 = (fp_digit)t; c2 += (fp_digit)(t >> DIGIT_BIT); \
+ do { fp_word t; \
+ t = (fp_word)c0 + ((fp_word)i) * ((fp_word)j); \
+ c0 = (fp_digit)t; \
+ t = (fp_word)c1 + (t >> DIGIT_BIT); \
+ c1 = (fp_digit)t; \
+ c2 += (fp_digit)(t >> DIGIT_BIT); \
} while (0);
#endif
@@ -1571,46 +1736,46 @@ ____asm__( \
#include "fp_mul_comba_small_set.i"
#endif
-#if defined(TFM_MUL3)
+#if defined(TFM_MUL3) && FP_SIZE >= 6
#include "fp_mul_comba_3.i"
#endif
-#if defined(TFM_MUL4)
+#if defined(TFM_MUL4) && FP_SIZE >= 8
#include "fp_mul_comba_4.i"
#endif
-#if defined(TFM_MUL6)
+#if defined(TFM_MUL6) && FP_SIZE >= 12
#include "fp_mul_comba_6.i"
#endif
-#if defined(TFM_MUL7)
+#if defined(TFM_MUL7) && FP_SIZE >= 14
#include "fp_mul_comba_7.i"
#endif
-#if defined(TFM_MUL8)
+#if defined(TFM_MUL8) && FP_SIZE >= 16
#include "fp_mul_comba_8.i"
#endif
-#if defined(TFM_MUL9)
+#if defined(TFM_MUL9) && FP_SIZE >= 18
#include "fp_mul_comba_9.i"
#endif
-#if defined(TFM_MUL12)
+#if defined(TFM_MUL12) && FP_SIZE >= 24
#include "fp_mul_comba_12.i"
#endif
-#if defined(TFM_MUL17)
+#if defined(TFM_MUL17) && FP_SIZE >= 34
#include "fp_mul_comba_17.i"
#endif
-#if defined(TFM_MUL20)
+#if defined(TFM_MUL20) && FP_SIZE >= 40
#include "fp_mul_comba_20.i"
#endif
-#if defined(TFM_MUL24)
+#if defined(TFM_MUL24) && FP_SIZE >= 48
#include "fp_mul_comba_24.i"
#endif
-#if defined(TFM_MUL28)
+#if defined(TFM_MUL28) && FP_SIZE >= 56
#include "fp_mul_comba_28.i"
#endif
-#if defined(TFM_MUL32)
+#if defined(TFM_MUL32) && FP_SIZE >= 64
#include "fp_mul_comba_32.i"
#endif
-#if defined(TFM_MUL48)
+#if defined(TFM_MUL48) && FP_SIZE >= 96
#include "fp_mul_comba_48.i"
#endif
-#if defined(TFM_MUL64)
+#if defined(TFM_MUL64) && FP_SIZE >= 128
#include "fp_mul_comba_64.i"
#endif