summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--rsa-sign-tr.c252
-rw-r--r--rsa.h9
2 files changed, 261 insertions, 0 deletions
diff --git a/rsa-sign-tr.c b/rsa-sign-tr.c
index 8542cae2..a51e3ea4 100644
--- a/rsa-sign-tr.c
+++ b/rsa-sign-tr.c
@@ -36,8 +36,15 @@
# include "config.h"
#endif
+#include <assert.h>
+
+#include "gmp-glue.h"
#include "rsa.h"
+#include "rsa-internal.h"
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#if NETTLE_USE_MINI_GMP
/* Blinds m, by computing c = m r^e (mod n), for a random r. Also
returns the inverse (ri), for use by rsa_unblind. */
static void
@@ -118,3 +125,248 @@ rsa_compute_root_tr(const struct rsa_public_key *pub,
return res;
}
+
+int
+rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
+ const struct rsa_private_key *key,
+ void *random_ctx, nettle_random_func *random,
+ mp_limb_t *x, const mp_limb_t *m, size_t mn)
+{
+ mpz_t mz;
+ mpz_t xz;
+ int res;
+
+ mpz_init(mz);
+ mpz_init(xz);
+
+ mpn_copyi(mpz_limbs_write(mz, mn), m, mn);
+ mpz_limbs_finish(mz, mn);
+
+ res = rsa_compute_root_tr(pub, key, random_ctx, random, xz, mz);
+
+ if (res)
+ mpz_limbs_copy(x, xz, mpz_size(pub->n));
+
+ mpz_clear(mz);
+ mpz_clear(xz);
+ return res;
+}
+#else
+/* Blinds m, by computing c = m r^e (mod n), for a random r. Also
+ returns the inverse (ri), for use by rsa_unblind. */
+static void
+rsa_sec_blind (const struct rsa_public_key *pub,
+ void *random_ctx, nettle_random_func *random,
+ mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m,
+ mp_size_t mn)
+{
+ const mp_limb_t *ep = mpz_limbs_read (pub->e);
+ const mp_limb_t *np = mpz_limbs_read (pub->n);
+ mp_bitcnt_t ebn = mpz_sizeinbase (pub->e, 2);
+ mp_size_t nn = mpz_size (pub->n);
+ size_t itch;
+ size_t i2;
+ mp_limb_t *scratch;
+ TMP_GMP_DECL (tp, mp_limb_t);
+ TMP_GMP_DECL (rp, mp_limb_t);
+ TMP_GMP_DECL (r, uint8_t);
+
+ TMP_GMP_ALLOC (rp, nn);
+ TMP_GMP_ALLOC (r, nn * sizeof(mp_limb_t));
+
+ /* c = m*(r^e) mod n */
+ itch = mpn_sec_powm_itch(nn, ebn, nn);
+ i2 = mpn_sec_mul_itch(nn, mn);
+ itch = MAX(itch, i2);
+ i2 = mpn_sec_div_r_itch(nn + mn, nn);
+ itch = MAX(itch, i2);
+ i2 = mpn_sec_invert_itch(nn);
+ itch = MAX(itch, i2);
+
+ TMP_GMP_ALLOC (tp, nn + mn + itch);
+ scratch = tp + nn + mn;
+
+ /* ri = r^(-1) */
+ do
+ {
+ random(random_ctx, nn * sizeof(mp_limb_t), (uint8_t *)r);
+ mpn_set_base256(rp, nn, r, nn * sizeof(mp_limb_t));
+ mpn_copyi(tp, rp, nn);
+ /* invert r */
+ }
+ while (!mpn_sec_invert (ri, tp, np, nn, 2 * nn * GMP_NUMB_BITS, scratch));
+
+ mpn_sec_powm (c, rp, nn, ep, ebn, np, nn, scratch);
+ /* normally mn == nn, but m can be smaller in some cases */
+ mpn_sec_mul (tp, c, nn, m, mn, scratch);
+ mpn_sec_div_r (tp, nn + mn, np, nn, scratch);
+ mpn_copyi(c, tp, nn);
+
+ TMP_GMP_FREE (r);
+ TMP_GMP_FREE (rp);
+ TMP_GMP_FREE (tp);
+}
+
+/* m = c ri mod n */
+static void
+rsa_sec_unblind (const struct rsa_public_key *pub,
+ mp_limb_t *x, mp_limb_t *ri, const mp_limb_t *c)
+{
+ const mp_limb_t *np = mpz_limbs_read (pub->n);
+ mp_size_t nn = mpz_size (pub->n);
+
+ size_t itch;
+ size_t i2;
+ mp_limb_t *scratch;
+ TMP_GMP_DECL(tp, mp_limb_t);
+
+ itch = mpn_sec_mul_itch(nn, nn);
+ i2 = mpn_sec_div_r_itch(nn + nn, nn);
+ itch = MAX(itch, i2);
+
+ TMP_GMP_ALLOC (tp, nn + nn + itch);
+ scratch = tp + nn + nn;
+
+ mpn_sec_mul (tp, c, nn, ri, nn, scratch);
+ mpn_sec_div_r (tp, nn + nn, np, nn, scratch);
+ mpn_copyi(x, tp, nn);
+
+ TMP_GMP_FREE (tp);
+}
+
+static int
+sec_equal(const mp_limb_t *a, const mp_limb_t *b, size_t limbs)
+{
+ volatile mp_limb_t z = 0;
+
+ for (size_t i = 0; i < limbs; i++)
+ {
+ z |= (a[i] ^ b[i]);
+ }
+
+ return z == 0;
+}
+
+static int
+rsa_sec_check_root(const struct rsa_public_key *pub,
+ const mp_limb_t *x, const mp_limb_t *m)
+{
+ mp_size_t nn = mpz_size (pub->n);
+ mp_size_t ebn = mpz_sizeinbase (pub->e, 2);
+ const mp_limb_t *np = mpz_limbs_read (pub->n);
+ const mp_limb_t *ep = mpz_limbs_read (pub->e);
+ int ret;
+
+ mp_size_t itch;
+
+ mp_limb_t *scratch;
+ TMP_GMP_DECL(tp, mp_limb_t);
+
+ itch = mpn_sec_powm_itch (nn, ebn, nn);
+ TMP_GMP_ALLOC (tp, nn + itch);
+ scratch = tp + nn;
+
+ mpn_sec_powm(tp, x, nn, ep, ebn, np, nn, scratch);
+ ret = sec_equal(tp, m, nn);
+
+ TMP_GMP_FREE (tp);
+ return ret;
+}
+
+static void
+cnd_mpn_zero (int cnd, volatile mp_ptr rp, mp_size_t n)
+{
+ volatile mp_limb_t c;
+
+ while (--n >= 0)
+ {
+ c = rp[n];
+ c &= ((mp_limb_t)cnd - 1);
+ rp[n] = c;
+ }
+}
+
+/* Checks for any errors done in the RSA computation. That avoids
+ * attacks which rely on faults on hardware, or even software MPI
+ * implementation.
+ * This version is side-channel silent even in case of error,
+ * the destination buffer is always overwritten */
+int
+rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
+ const struct rsa_private_key *key,
+ void *random_ctx, nettle_random_func *random,
+ mp_limb_t *x, const mp_limb_t *m, size_t mn)
+{
+ TMP_GMP_DECL (c, mp_limb_t);
+ TMP_GMP_DECL (ri, mp_limb_t);
+ TMP_GMP_DECL (scratch, mp_limb_t);
+ size_t key_limb_size;
+ int ret;
+
+ key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size);
+
+ /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the
+ key is invalid and rejected by rsa_private_key_prepare. However,
+ some applications, notably gnutls, don't use this function, and
+ we don't want an invalid key to lead to a crash down inside
+ mpz_powm_sec. So do an additional check here. */
+ if (mpz_even_p (pub->n) || mpz_even_p (key->p) || mpz_even_p (key->q))
+ {
+ mpn_zero(x, key_limb_size);
+ return 0;
+ }
+
+ assert(mpz_size(pub->n) == key_limb_size);
+ assert(mn <= key_limb_size);
+
+ TMP_GMP_ALLOC (c, key_limb_size);
+ TMP_GMP_ALLOC (ri, key_limb_size);
+ TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key));
+
+ rsa_sec_blind (pub, random_ctx, random, x, ri, m, mn);
+
+ _rsa_sec_compute_root(key, c, x, scratch);
+
+ ret = rsa_sec_check_root(pub, c, x);
+
+ rsa_sec_unblind(pub, x, ri, c);
+
+ cnd_mpn_zero(1 - ret, x, key_limb_size);
+
+ TMP_GMP_FREE (scratch);
+ TMP_GMP_FREE (ri);
+ TMP_GMP_FREE (c);
+ return ret;
+}
+
+/* Checks for any errors done in the RSA computation. That avoids
+ * attacks which rely on faults on hardware, or even software MPI
+ * implementation.
+ * This version is maintained for API compatibility reasons. It
+ * is not completely side-channel silent. There are conditionals
+ * in buffer copying both in case of success or error.
+ */
+int
+rsa_compute_root_tr(const struct rsa_public_key *pub,
+ const struct rsa_private_key *key,
+ void *random_ctx, nettle_random_func *random,
+ mpz_t x, const mpz_t m)
+{
+ TMP_GMP_DECL (l, mp_limb_t);
+ int res;
+
+ mp_size_t l_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size);
+ TMP_GMP_ALLOC (l, l_size);
+
+ res = rsa_sec_compute_root_tr (pub, key, random_ctx, random, l,
+ mpz_limbs_read(m), mpz_size(m));
+ if (res) {
+ mp_limb_t *xp = mpz_limbs_write (x, l_size);
+ mpn_copyi (xp, l, l_size);
+ mpz_limbs_finish (x, l_size);
+ }
+
+ TMP_GMP_FREE (l);
+ return res;
+}
+#endif
diff --git a/rsa.h b/rsa.h
index 2143fcd2..1be7dbad 100644
--- a/rsa.h
+++ b/rsa.h
@@ -90,6 +90,7 @@ extern "C" {
#define rsa_decrypt_tr nettle_rsa_decrypt_tr
#define rsa_compute_root nettle_rsa_compute_root
#define rsa_compute_root_tr nettle_rsa_compute_root_tr
+#define rsa_sec_compute_root_tr _nettle_rsa_sec_compute_root_tr
#define rsa_generate_keypair nettle_rsa_generate_keypair
#define rsa_keypair_to_sexp nettle_rsa_keypair_to_sexp
#define rsa_keypair_from_sexp_alist nettle_rsa_keypair_from_sexp_alist
@@ -436,6 +437,14 @@ rsa_compute_root_tr(const struct rsa_public_key *pub,
void *random_ctx, nettle_random_func *random,
mpz_t x, const mpz_t m);
+/* Safe side-channel silent variant, using RSA blinding, and checking the
+ * result after CRT. */
+int
+rsa_sec_compute_root_tr(const struct rsa_public_key *pub,
+ const struct rsa_private_key *key,
+ void *random_ctx, nettle_random_func *random,
+ mp_limb_t *x, const mp_limb_t *m, size_t mn);
+
/* Key generation */
/* Note that the key structs must be initialized first. */