summaryrefslogtreecommitdiff
path: root/ecc-mod-inv.c
diff options
context:
space:
mode:
authorNiels Möller <nisse@lysator.liu.se>2014-09-22 20:58:12 +0200
committerNiels Möller <nisse@lysator.liu.se>2014-09-22 21:06:22 +0200
commitb524402cb9a4a8d2580eb47f0cf459e620b582b3 (patch)
treee3a98eb0fbd76e891fb6036618dc351a1b2ccfb5 /ecc-mod-inv.c
parentfeec2348921af323cf7d6d12488bc83d8b83252b (diff)
downloadnettle-b524402cb9a4a8d2580eb47f0cf459e620b582b3.tar.gz
Added invert function pointer to struct ecc_modulo.
Updated and renamed sec_modinv -> ecc_mod_inv, and deleted the ecc_modp_inv and ecc_modq_inv wrapper functions.
Diffstat (limited to 'ecc-mod-inv.c')
-rw-r--r--ecc-mod-inv.c170
1 files changed, 170 insertions, 0 deletions
diff --git a/ecc-mod-inv.c b/ecc-mod-inv.c
new file mode 100644
index 00000000..22e533f2
--- /dev/null
+++ b/ecc-mod-inv.c
@@ -0,0 +1,170 @@
+/* ecc-mod-inv.c
+
+ Copyright (C) 2013, 2014 Niels Möller
+
+ This file is part of GNU Nettle.
+
+ GNU Nettle is free software: you can redistribute it and/or
+ modify it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+ or
+
+ * the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your
+ option) any later version.
+
+ or both in parallel, as here.
+
+ GNU Nettle is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received copies of the GNU General Public License and
+ the GNU Lesser General Public License along with this program. If
+ not, see http://www.gnu.org/licenses/.
+*/
+
+/* Development of Nettle's ECC support was funded by the .SE Internet Fund. */
+
+#if HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <assert.h>
+
+#include "ecc-internal.h"
+
+static void
+cnd_neg (int cnd, mp_limb_t *rp, const mp_limb_t *ap, mp_size_t n)
+{
+ mp_limb_t cy = (cnd != 0);
+ mp_limb_t mask = -cy;
+ mp_size_t i;
+
+ for (i = 0; i < n; i++)
+ {
+ mp_limb_t r = (ap[i] ^ mask) + cy;
+ cy = r < cy;
+ rp[i] = r;
+ }
+}
+
+/* Compute a^{-1} mod m, with running time depending only on the size.
+ Returns zero if a == 0 (mod m), to be consistent with a^{phi(m)-1}.
+ Also needs (m+1)/2, and m must be odd. */
+
+/* FIXME: Could use mpn_sec_invert (in GMP-6), but with a bit more
+ scratch need since it doesn't precompute (m+1)/2. */
+void
+ecc_mod_inv (const struct ecc_modulo *m,
+ mp_limb_t *vp, mp_limb_t *ap,
+ mp_limb_t *scratch)
+{
+#define bp scratch
+#define dp (scratch + n)
+#define up (scratch + 2*n)
+
+ mp_size_t n = m->size;
+ /* Avoid the mp_bitcnt_t type for compatibility with older GMP
+ versions. */
+ unsigned i;
+
+ /* Maintain
+
+ a = u * orig_a (mod m)
+ b = v * orig_a (mod m)
+
+ and b odd at all times. Initially,
+
+ a = a_orig, u = 1
+ b = m, v = 0
+ */
+
+ assert (ap != vp);
+
+ up[0] = 1;
+ mpn_zero (up+1, n - 1);
+ mpn_copyi (bp, m->m, n);
+ mpn_zero (vp, n);
+
+ for (i = m->bit_size + GMP_NUMB_BITS * n; i-- > 0; )
+ {
+ mp_limb_t odd, swap, cy;
+
+ /* Always maintain b odd. The logic of the iteration is as
+ follows. For a, b:
+
+ odd = a & 1
+ a -= odd * b
+ if (underflow from a-b)
+ {
+ b += a, assigns old a
+ a = B^n-a
+ }
+
+ a /= 2
+
+ For u, v:
+
+ if (underflow from a - b)
+ swap u, v
+ u -= odd * v
+ if (underflow from u - v)
+ u += m
+
+ u /= 2
+ if (a one bit was shifted out)
+ u += (m+1)/2
+
+ As long as a > 0, the quantity
+
+ (bitsize of a) + (bitsize of b)
+
+ is reduced by at least one bit per iteration, hence after
+ (bit_size of orig_a) + (bit_size of m) - 1 iterations we
+ surely have a = 0. Then b = gcd(orig_a, m) and if b = 1 then
+ also v = orig_a^{-1} (mod m)
+ */
+
+ assert (bp[0] & 1);
+ odd = ap[0] & 1;
+
+ /* Which variant is fastest depends on the speed of the various
+ cnd_* functions. Assembly implementation would help. */
+#if 1
+ swap = cnd_sub_n (odd, ap, bp, n);
+ cnd_add_n (swap, bp, ap, n);
+ cnd_neg (swap, ap, ap, n);
+#else
+ swap = odd & mpn_sub_n (dp, ap, bp, n);
+ cnd_copy (swap, bp, ap, n);
+ cnd_neg (swap, dp, dp, n);
+ cnd_copy (odd, ap, dp, n);
+#endif
+
+#if 1
+ cnd_swap (swap, up, vp, n);
+ cy = cnd_sub_n (odd, up, vp, n);
+ cy -= cnd_add_n (cy, up, m->m, n);
+#else
+ cy = cnd_sub_n (odd, up, vp, n);
+ cnd_add_n (swap, vp, up, n);
+ cnd_neg (swap, up, up, n);
+ cnd_add_n (cy ^ swap, up, m->p, n);
+#endif
+ cy = mpn_rshift (ap, ap, n, 1);
+ assert (cy == 0);
+ cy = mpn_rshift (up, up, n, 1);
+ cy = cnd_add_n (cy, up, m->mp1h, n);
+ assert (cy == 0);
+ }
+ assert ( (ap[0] | ap[n-1]) == 0);
+#undef bp
+#undef dp
+#undef up
+}