summaryrefslogtreecommitdiff
path: root/core/cortex-m0
diff options
context:
space:
mode:
authorNicolas Boichat <drinkcat@chromium.org>2018-05-24 14:33:06 +0800
committerchrome-bot <chrome-bot@chromium.org>2018-05-29 06:02:19 -0700
commitecd0d1b5767c829f4c73a79a9eb6abae343284fb (patch)
tree72be322444b05435927078bf9ac16f810c35d06e /core/cortex-m0
parentcc7889bfaec9243ff35b6a366f6f2c7c65c33a13 (diff)
downloadchrome-ec-ecd0d1b5767c829f4c73a79a9eb6abae343284fb.tar.gz
rsa: Further optimization of multiplications for Cortex-M0
In RSA, we often need to actually compute (a*b)+c+d: provide some assembly optimized functions for that. With -O3, 3072-bit exponent, lower verification time from 104 ms to 88 ms on STM32F072 @48Mhz. BRANCH=poppy BUG=b:35647963 BUG=b:77608104 TEST=On staff, flash, verification successful TEST=make test-rsa, make test-rsa3 TEST=make BOARD=hammer test-utils test-rsa3, test on board Change-Id: I80e8a7258d091e4f6adea11797729ac657dfd85d Signed-off-by: Nicolas Boichat <drinkcat@chromium.org> Reviewed-on: https://chromium-review.googlesource.com/1071411 Reviewed-by: Vincent Palatin <vpalatin@chromium.org>
Diffstat (limited to 'core/cortex-m0')
-rw-r--r--core/cortex-m0/mula.S37
1 files changed, 36 insertions, 1 deletions
diff --git a/core/cortex-m0/mula.S b/core/cortex-m0/mula.S
index fc0a6f3ee0..02e617c328 100644
--- a/core/cortex-m0/mula.S
+++ b/core/cortex-m0/mula.S
@@ -2,7 +2,7 @@
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
- * Cortex-M0 multiply-accumulate functions
+ * Cortex-M0 multiply-accumulate[-accumulate] functions
*/
.syntax unified
@@ -44,3 +44,38 @@ mula32:
pop {r4, r5}
bx lr
+@ uint64_t mulaa32(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+@
+@ Multiply a (r0) and b (r1), add c (r2), add d (r3) and return the product in
+@ r1:r0
+ .thumb_func
+ .section .text.mulaa32
+ .global mulaa32
+mulaa32:
+
+ push {r4, r5, r6}
+ uxth r5, r0 /* r5 = a.lo16 */
+ uxth r6, r1 /* r6 = b.lo16 */
+ uxth r4, r2 /* r4 = c.lo16 */
+ muls r5, r6 /* r5 = a.lo16 * b.lo16 */
+ adds r5, r4 /* r5 = a.lo16 * b.lo16 + c.lo16 == r.lo32 */
+ lsrs r4, r0, 16 /* r4 = a.hi16 */
+ lsrs r2, r2, 16 /* r2 = c.hi16 */
+ muls r6, r4 /* r6 = a.hi16 * b.lo16 */
+ adds r6, r2 /* r6 = a.hi16 * b.lo16 + c.hi16 == r.mid32.1 */
+ uxth r2, r0 /* r2 = a.lo16 */
+ lsrs r1, r1, 16 /* r1 = b.hi16 */
+ muls r2, r1 /* r2 = a.lo16 * b.hi16 == r.mid32.2 */
+ muls r1, r4 /* r1 = b.hi16 * a.hi16 == r.hi32 */
+ movs r4, 0 /* r4 = 0 */
+ adds r6, r2 /* r6 = (r.mid32.1 + r.mid32.2).lo32 == r.mid.lo32 */
+ adcs r4, r4 /* r4 = (r.mid32.1 + r.mid32.2).hi32 == r.mid.hi32 */
+ lsls r0, r6, 16 /* r0 = r.mid.lo32.lo16 << 16 == r.mid.inpos.lo32 */
+ lsrs r6, r6, 16 /* r6 = r.mid.lo32.hi16 >> 16 */
+ lsls r4, r4, 16 /* r4 = r.mid.hi.lo16 << 16 */
+ adds r0, r3 /* r0 = r.mid.inposition.lo32 + d */
+ adcs r4, r6 /* r4 = r6 + r4 + carry = r.mid.inpos.hi32 */
+ adds r0, r5 /* r0 = r.lo32 */
+ adcs r1, r4 /* r1 = r.hi32 */
+ pop {r4, r5, r6}
+ bx lr