diff options
author | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2021-06-28 21:01:32 +0300 |
---|---|---|
committer | Jussi Kivilinna <jussi.kivilinna@iki.fi> | 2021-06-30 17:57:30 +0300 |
commit | fc92c609dfdbcf59a09ca3aaf53a1c1b8408c351 (patch) | |
tree | 71e4c149f795f1750dec9b9b73836c404b8bcbdb /mpi/ec-inline.h | |
parent | 6dfab8cfb94ccb485a15b13df3c499cbb06fddf2 (diff) | |
download | libgcrypt-fc92c609dfdbcf59a09ca3aaf53a1c1b8408c351.tar.gz |
ec-nist: fix 'mod p' carry adjustment and output masking
* mpi/ec-inline.h (MASK_AND64, LIMB_OR64): New.
[__x86_64__]: Use "rme" operand type instead of "g" to fix use
of large 32-bit constants.
* mpi/ec-nist.c (_gcry_mpi_ec_nist192_mod, _gcry_mpi_ec_nist224_mod)
(_gcry_mpi_ec_nist256_mod, _gcry_mpi_ec_nist384_mod): At end,
check if 's[]' is negative instead result of last addition, for
output masks; Use 'p_mult' table entry for P instead of 'ctx->p'.
(_gcry_mpi_ec_nist256_mod): Handle corner case were 2*P needs to be
added after carry based subtraction.
* tests/t-mpi-point.c (check_ec_mul_reduction): New.
(main): Call 'check_ec_mul_reduction'.
--
GnuPG-bug-id: T5510
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
Diffstat (limited to 'mpi/ec-inline.h')
-rw-r--r-- | mpi/ec-inline.h | 66 |
1 files changed, 42 insertions, 24 deletions
diff --git a/mpi/ec-inline.h b/mpi/ec-inline.h index 25c3b40d..a07826e3 100644 --- a/mpi/ec-inline.h +++ b/mpi/ec-inline.h @@ -40,6 +40,8 @@ #define HI32_LIMB64(v) (u32)((mpi_limb_t)(v) >> (BITS_PER_MPI_LIMB - 32)) #define LO32_LIMB64(v) ((u32)(v)) #define LIMB64_C(hi, lo) (((mpi_limb_t)(u32)(hi) << 32) | (u32)(lo)) +#define MASK_AND64(mask, val) ((mask) & (val)) +#define LIMB_OR64(val1, val2) ((val1) | (val2)) #define STORE64_COND(x, pos, mask1, val1, mask2, val2) \ ((x)[(pos)] = ((mask1) & (val1)) | ((mask2) & (val2))) @@ -73,9 +75,9 @@ LIMB64_HILO(u32 hi, u32 lo) : "0" ((mpi_limb_t)(B2)), \ "1" ((mpi_limb_t)(B1)), \ "2" ((mpi_limb_t)(B0)), \ - "g" ((mpi_limb_t)(C2)), \ - "g" ((mpi_limb_t)(C1)), \ - "g" ((mpi_limb_t)(C0)) \ + "rme" ((mpi_limb_t)(C2)), \ + "rme" ((mpi_limb_t)(C1)), \ + "rme" ((mpi_limb_t)(C0)) \ : "cc") #define SUB3_LIMB64(A3, A2, A1, A0, B2, B1, B0, C2, C1, C0) \ @@ -88,9 +90,9 @@ LIMB64_HILO(u32 hi, u32 lo) : "0" ((mpi_limb_t)(B2)), \ "1" ((mpi_limb_t)(B1)), \ "2" ((mpi_limb_t)(B0)), \ - "g" ((mpi_limb_t)(C2)), \ - "g" ((mpi_limb_t)(C1)), \ - "g" ((mpi_limb_t)(C0)) \ + "rme" ((mpi_limb_t)(C2)), \ + "rme" ((mpi_limb_t)(C1)), \ + "rme" ((mpi_limb_t)(C0)) \ : "cc") #define ADD4_LIMB64(A3, A2, A1, A0, B3, B2, B1, B0, C3, C2, C1, C0) \ @@ -106,10 +108,10 @@ LIMB64_HILO(u32 hi, u32 lo) "1" ((mpi_limb_t)(B2)), \ "2" ((mpi_limb_t)(B1)), \ "3" ((mpi_limb_t)(B0)), \ - "g" ((mpi_limb_t)(C3)), \ - "g" ((mpi_limb_t)(C2)), \ - "g" ((mpi_limb_t)(C1)), \ - "g" ((mpi_limb_t)(C0)) \ + "rme" ((mpi_limb_t)(C3)), \ + "rme" ((mpi_limb_t)(C2)), \ + "rme" ((mpi_limb_t)(C1)), \ + "rme" ((mpi_limb_t)(C0)) \ : "cc") #define SUB4_LIMB64(A3, A2, A1, A0, B3, B2, B1, B0, C3, C2, C1, C0) \ @@ -125,10 +127,10 @@ LIMB64_HILO(u32 hi, u32 lo) "1" ((mpi_limb_t)(B2)), \ "2" ((mpi_limb_t)(B1)), \ "3" ((mpi_limb_t)(B0)), \ - "g" ((mpi_limb_t)(C3)), \ - "g" ((mpi_limb_t)(C2)), \ - "g" ((mpi_limb_t)(C1)), \ - "g" ((mpi_limb_t)(C0)) \ + "rme" ((mpi_limb_t)(C3)), \ + "rme" ((mpi_limb_t)(C2)), \ + "rme" ((mpi_limb_t)(C1)), \ + "rme" ((mpi_limb_t)(C0)) \ : "cc") #define ADD5_LIMB64(A4, A3, A2, A1, A0, B4, B3, B2, B1, B0, \ @@ -148,11 +150,11 @@ LIMB64_HILO(u32 hi, u32 lo) "2" ((mpi_limb_t)(B2)), \ "3" ((mpi_limb_t)(B1)), \ "4" ((mpi_limb_t)(B0)), \ - "g" ((mpi_limb_t)(C4)), \ - "g" ((mpi_limb_t)(C3)), \ - "g" ((mpi_limb_t)(C2)), \ - "g" ((mpi_limb_t)(C1)), \ - "g" ((mpi_limb_t)(C0)) \ + "rme" ((mpi_limb_t)(C4)), \ + "rme" ((mpi_limb_t)(C3)), \ + "rme" ((mpi_limb_t)(C2)), \ + "rme" ((mpi_limb_t)(C1)), \ + "rme" ((mpi_limb_t)(C0)) \ : "cc") #define SUB5_LIMB64(A4, A3, A2, A1, A0, B4, B3, B2, B1, B0, \ @@ -172,11 +174,11 @@ LIMB64_HILO(u32 hi, u32 lo) "2" ((mpi_limb_t)(B2)), \ "3" ((mpi_limb_t)(B1)), \ "4" ((mpi_limb_t)(B0)), \ - "g" ((mpi_limb_t)(C4)), \ - "g" ((mpi_limb_t)(C3)), \ - "g" ((mpi_limb_t)(C2)), \ - "g" ((mpi_limb_t)(C1)), \ - "g" ((mpi_limb_t)(C0)) \ + "rme" ((mpi_limb_t)(C4)), \ + "rme" ((mpi_limb_t)(C3)), \ + "rme" ((mpi_limb_t)(C2)), \ + "rme" ((mpi_limb_t)(C1)), \ + "rme" ((mpi_limb_t)(C0)) \ : "cc") #endif /* __x86_64__ */ @@ -593,6 +595,22 @@ STORE64(mpi_ptr_t x, unsigned int pos, mpi_limb64_t v) x[pos * 2 + 1] = v.hi; } +static inline mpi_limb64_t +MASK_AND64(mpi_limb_t mask, mpi_limb64_t val) +{ + val.lo &= mask; + val.hi &= mask; + return val; +} + +static inline mpi_limb64_t +LIMB_OR64(mpi_limb64_t val1, mpi_limb64_t val2) +{ + val1.lo |= val2.lo; + val1.hi |= val2.hi; + return val1; +} + static inline void STORE64_COND(mpi_ptr_t x, unsigned int pos, mpi_limb_t mask1, mpi_limb64_t val1, mpi_limb_t mask2, mpi_limb64_t val2) |