From 6487ef7eeee95457361bda72b13aed8723b2d6e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niels=20M=C3=B6ller?= Date: Tue, 27 Nov 2018 08:21:02 +0100 Subject: Rewrote _rsa_sec_compute_root, for clarity. Use new local helper functions, with their own itch functions. --- rsa-sec-compute-root.c | 165 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 110 insertions(+), 55 deletions(-) (limited to 'rsa-sec-compute-root.c') diff --git a/rsa-sec-compute-root.c b/rsa-sec-compute-root.c index 8383c911..98b6c2a5 100644 --- a/rsa-sec-compute-root.c +++ b/rsa-sec-compute-root.c @@ -45,45 +45,107 @@ #if !NETTLE_USE_MINI_GMP #define MAX(a, b) ((a) > (b) ? (a) : (b)) +/* Like mpn_sec_mul_itch, monotonously increasing in operand sizes. */ +static mp_size_t +sec_mul_itch (mp_size_t an, mp_size_t bn) +{ + if (an >= bn) + return mpn_sec_mul_itch (an, bn); + else + return mpn_sec_mul_itch (bn, an); +} + +/* Writes an + bn limbs to the rp area */ +static void +sec_mul (mp_limb_t *rp, + const mp_limb_t *ap, mp_size_t an, + const mp_limb_t *bp, mp_size_t bn, mp_limb_t *scratch) +{ + if (an >= bn) + mpn_sec_mul (rp, ap, an, bp, bn, scratch); + else + mpn_sec_mul (rp, bp, bn, ap, an, scratch); +} + +static mp_size_t +sec_mod_mul_itch (mp_size_t an, mp_size_t bn, mp_size_t mn) +{ + mp_size_t mul_itch = sec_mul_itch (an, bn); + mp_size_t mod_itch = mpn_sec_div_r_itch (an + bn, mn); + return MAX(mul_itch, mod_itch); +} + +/* Sets r <-- a b % m. Needs space for an + bn limbs at rp. It is + required than an + bn >= mn. */ +static void +sec_mod_mul (mp_limb_t *rp, + const mp_limb_t *ap, mp_size_t an, + const mp_limb_t *bp, mp_size_t bn, + const mp_limb_t *mp, mp_size_t mn, + mp_limb_t *scratch) +{ + assert (an + bn >= mn); + sec_mul (rp, ap, an, bp, bn, scratch); + mpn_sec_div_r (rp, an + bn, mp, mn, scratch); +} + +static mp_size_t +sec_powm_itch (mp_size_t bn, mp_size_t en, mp_size_t mn) +{ + mp_size_t mod_itch = bn + mpn_sec_div_r_itch (bn, mn); + mp_size_t pow_itch = mn + mpn_sec_powm_itch (mn, en * GMP_NUMB_BITS, mn); + return MAX (mod_itch, pow_itch); +} + +/* Sets r <-- b ^ e % m. Performs an initial reduction b mod m, and + requires bn >= mn. */ +static void +sec_powm (mp_limb_t *rp, + const mp_limb_t *bp, mp_size_t bn, + const mp_limb_t *ep, mp_size_t en, + const mp_limb_t *mp, mp_size_t mn, mp_limb_t *scratch) +{ + assert (bn >= mn); + assert (en <= mn); + mpn_copyi (scratch, bp, bn); + mpn_sec_div_r (scratch, bn, mp, mn, scratch + bn); + mpn_sec_powm (rp, scratch, mn, ep, en * GMP_NUMB_BITS, mp, mn, + scratch + mn); +} + mp_size_t _rsa_sec_compute_root_itch (const struct rsa_private_key *key) { mp_size_t nn = NETTLE_OCTET_SIZE_TO_LIMB_SIZE (key->size); mp_size_t pn = mpz_size (key->p); mp_size_t qn = mpz_size (key->q); + mp_size_t an = mpz_size (key->a); + mp_size_t bn = mpz_size (key->b); mp_size_t cn = mpz_size (key->c); - mp_size_t itch; - mp_size_t i2; - - itch = nn; /* Sufficient for mpn_sec_add_1 */ - i2 = mpn_sec_div_r_itch (nn, qn); - itch = MAX (itch, i2); - i2 = mpn_sec_div_r_itch (nn, pn); - itch = MAX (itch, i2); - i2 = mpn_sec_powm_itch (qn, mpz_size (key->b) * GMP_NUMB_BITS, qn); - itch = MAX (itch, i2); - i2 = mpn_sec_powm_itch (pn, mpz_size (key->a) * GMP_NUMB_BITS, pn); - itch = MAX (itch, i2); - i2 = mpn_sec_div_r_itch (qn, pn); - itch = MAX (itch, i2); - i2 = mpn_sec_mul_itch (pn, cn); - itch = MAX (itch, i2); - if (qn > pn) - i2 = mpn_sec_mul_itch (qn, pn); - else - i2 = mpn_sec_mul_itch (pn, qn); - itch = MAX (itch, i2); - i2 = mpn_sec_div_r_itch (pn + cn, pn); - itch = MAX (itch, i2); - itch += MAX (nn + 1, MAX (pn +cn, qn +cn)) + pn + qn; - return itch; + mp_size_t powm_p_itch = sec_powm_itch (nn, an, pn); + mp_size_t powm_q_itch = sec_powm_itch (nn, bn, qn); + mp_size_t mod_mul_itch = cn + MAX(pn, qn) + + sec_mod_mul_itch (MAX(pn, qn), cn, pn); + + mp_size_t mul_itch = sec_mul_itch (qn, pn); + mp_size_t add_1_itch = mpn_sec_add_1_itch (nn - qn); + + /* pn + qn for the product q * r_mod_p' */ + mp_size_t itch = pn + qn + MAX (mul_itch, add_1_itch); + + itch = MAX (itch, powm_p_itch); + itch = MAX (itch, powm_q_itch); + itch = MAX (itch, mod_mul_itch); + + /* pn + qn for the r_mod_p and r_mod_q temporaries. */ + return pn + qn + itch; } void _rsa_sec_compute_root (const struct rsa_private_key *key, - mp_limb_t *rp, const mp_limb_t *mp, - mp_limb_t *scratch) + mp_limb_t *rp, const mp_limb_t *mp, + mp_limb_t *scratch) { mp_size_t nn = NETTLE_OCTET_SIZE_TO_LIMB_SIZE (key->size); @@ -92,49 +154,42 @@ _rsa_sec_compute_root (const struct rsa_private_key *key, const mp_limb_t *pp = mpz_limbs_read (key->p); const mp_limb_t *qp = mpz_limbs_read (key->q); - mp_size_t cn = mpz_size (key->c); mp_size_t pn = mpz_size (key->p); mp_size_t qn = mpz_size (key->q); - mp_size_t tn = nn + 1; + mp_size_t an = mpz_size (key->a); + mp_size_t bn = mpz_size (key->b); + mp_size_t cn = mpz_size (key->c); - mp_limb_t *r_mod_p = scratch + MAX (tn, MAX (pn + cn, qn + cn)); - mp_limb_t *r_mod_q = r_mod_p + pn; - mp_limb_t *sp = r_mod_q + qn; + mp_limb_t *r_mod_p = scratch; + mp_limb_t *r_mod_q = scratch + pn; + mp_limb_t *scratch_out = r_mod_q + qn; mp_limb_t cy; - assert (pn + qn <= tn); assert (pn <= nn); assert (qn <= nn); + assert (an <= pn); + assert (bn <= qn); assert (cn <= pn); - /* Compute r_mod_q = m^d % q = (m%q)^b % q */ - mpn_copyi (scratch, mp, nn); - mpn_sec_div_r (scratch, nn, qp, qn, sp); - mpn_sec_powm (r_mod_q, scratch, qn, mpz_limbs_read (key->b), - mpz_size (key->b) * GMP_NUMB_BITS, qp, qn, sp); - /* Compute r_mod_p = m^d % p = (m%p)^a % p */ - mpn_copyi (scratch, mp, nn); - mpn_sec_div_r (scratch, nn, pp, pn, sp); - mpn_sec_powm (r_mod_p, scratch, pn, mpz_limbs_read (key->a), - mpz_size (key->a) * GMP_NUMB_BITS, pp, pn, sp); + sec_powm (r_mod_p, mp, nn, mpz_limbs_read (key->a), an, pp, pn, scratch_out); + /* Compute r_mod_q = m^d % q = (m%q)^b % q */ + sec_powm (r_mod_q, mp, nn, mpz_limbs_read (key->b), bn, qp, qn, scratch_out); /* Set r_mod_p' = r_mod_p * c % p - r_mod_q * c % p . */ - mpn_sec_mul (scratch, r_mod_p, pn, mpz_limbs_read (key->c), cn, sp); - mpn_sec_div_r (scratch, pn + cn, pp, pn, sp); - mpn_copyi (r_mod_p, scratch, pn); - mpn_sec_mul (scratch, r_mod_q, qn, mpz_limbs_read (key->c), cn, sp); - mpn_sec_div_r (scratch, qn + cn, pp, pn, sp); - cy = mpn_sub_n (r_mod_p, r_mod_p, scratch, pn); + sec_mod_mul (scratch_out, r_mod_p, pn, mpz_limbs_read (key->c), cn, pp, pn, + scratch_out + cn + pn); + mpn_copyi (r_mod_p, scratch_out, pn); + + sec_mod_mul (scratch_out, r_mod_q, qn, mpz_limbs_read (key->c), cn, pp, pn, + scratch_out + cn + qn); + cy = mpn_sub_n (r_mod_p, r_mod_p, scratch_out, pn); cnd_add_n (cy, r_mod_p, pp, pn); /* Finally, compute x = r_mod_q + q r_mod_p' */ - if (qn > pn) - mpn_sec_mul (scratch, qp, qn, r_mod_p, pn, sp); - else - mpn_sec_mul (scratch, r_mod_p, pn, qp, qn, sp); + sec_mul (scratch_out, qp, qn, r_mod_p, pn, scratch_out + pn + qn); - cy = mpn_add_n (rp, scratch, r_mod_q, qn); - mpn_sec_add_1 (rp + qn, scratch + qn, nn - qn, cy, sp); + cy = mpn_add_n (rp, scratch_out, r_mod_q, qn); + mpn_sec_add_1 (rp + qn, scratch_out + qn, nn - qn, cy, scratch_out + pn + qn); } #endif -- cgit v1.2.1