summaryrefslogtreecommitdiff
path: root/Modules/_decimal/libmpdec
diff options
context:
space:
mode:
Diffstat (limited to 'Modules/_decimal/libmpdec')
-rw-r--r--Modules/_decimal/libmpdec/README.txt90
-rw-r--r--Modules/_decimal/libmpdec/basearith.c635
-rw-r--r--Modules/_decimal/libmpdec/basearith.h213
-rw-r--r--Modules/_decimal/libmpdec/bits.h192
-rw-r--r--Modules/_decimal/libmpdec/constants.c132
-rw-r--r--Modules/_decimal/libmpdec/constants.h83
-rw-r--r--Modules/_decimal/libmpdec/context.c286
-rw-r--r--Modules/_decimal/libmpdec/convolute.c174
-rw-r--r--Modules/_decimal/libmpdec/convolute.h43
-rw-r--r--Modules/_decimal/libmpdec/crt.c179
-rw-r--r--Modules/_decimal/libmpdec/crt.h40
-rw-r--r--Modules/_decimal/libmpdec/difradix2.c173
-rw-r--r--Modules/_decimal/libmpdec/difradix2.h41
-rw-r--r--Modules/_decimal/libmpdec/fnt.c81
-rw-r--r--Modules/_decimal/libmpdec/fnt.h42
-rw-r--r--Modules/_decimal/libmpdec/fourstep.c257
-rw-r--r--Modules/_decimal/libmpdec/fourstep.h41
-rw-r--r--Modules/_decimal/libmpdec/io.c1575
-rw-r--r--Modules/_decimal/libmpdec/io.h59
-rw-r--r--Modules/_decimal/libmpdec/literature/REFERENCES.txt51
-rw-r--r--Modules/_decimal/libmpdec/literature/bignum.txt83
-rw-r--r--Modules/_decimal/libmpdec/literature/fnt.py208
-rw-r--r--Modules/_decimal/libmpdec/literature/matrix-transform.txt256
-rw-r--r--Modules/_decimal/libmpdec/literature/mulmod-64.txt127
-rw-r--r--Modules/_decimal/libmpdec/literature/mulmod-ppro.txt269
-rw-r--r--Modules/_decimal/libmpdec/literature/six-step.txt63
-rw-r--r--Modules/_decimal/libmpdec/literature/umodarith.lisp692
-rw-r--r--Modules/_decimal/libmpdec/memory.c292
-rw-r--r--Modules/_decimal/libmpdec/memory.h44
-rw-r--r--Modules/_decimal/libmpdec/mpdecimal.c7623
-rw-r--r--Modules/_decimal/libmpdec/mpdecimal.h800
-rw-r--r--Modules/_decimal/libmpdec/numbertheory.c132
-rw-r--r--Modules/_decimal/libmpdec/numbertheory.h71
-rw-r--r--Modules/_decimal/libmpdec/sixstep.c214
-rw-r--r--Modules/_decimal/libmpdec/sixstep.h41
-rw-r--r--Modules/_decimal/libmpdec/transpose.c276
-rw-r--r--Modules/_decimal/libmpdec/transpose.h55
-rw-r--r--Modules/_decimal/libmpdec/typearith.h669
-rw-r--r--Modules/_decimal/libmpdec/umodarith.h650
-rw-r--r--Modules/_decimal/libmpdec/vccompat.h62
-rw-r--r--Modules/_decimal/libmpdec/vcdiv64.asm48
-rw-r--r--Modules/_decimal/libmpdec/vcstdint.h232
42 files changed, 17294 insertions, 0 deletions
diff --git a/Modules/_decimal/libmpdec/README.txt b/Modules/_decimal/libmpdec/README.txt
new file mode 100644
index 0000000000..ad8f88c80c
--- /dev/null
+++ b/Modules/_decimal/libmpdec/README.txt
@@ -0,0 +1,90 @@
+
+
+libmpdec
+========
+
+libmpdec is a fast C/C++ library for correctly-rounded arbitrary precision
+decimal floating point arithmetic. It is a complete implementation of
+Mike Cowlishaw/IBM's General Decimal Arithmetic Specification.
+
+
+Files required for the Python _decimal module
+=============================================
+
+ Core files for small and medium precision arithmetic
+ ----------------------------------------------------
+
+ basearith.{c,h} -> Core arithmetic in base 10**9 or 10**19.
+ bits.h -> Portable detection of least/most significant one-bit.
+ constants.{c,h} -> Constants that are used in multiple files.
+ context.c -> Context functions.
+ io.{c,h} -> Conversions between mpd_t and ASCII strings,
+ mpd_t formatting (allows UTF-8 fill character).
+ memory.{c,h} -> Allocation handlers with overflow detection
+ and functions for switching between static
+ and dynamic mpd_t.
+ mpdecimal.{c,h} -> All (quiet) functions of the specification.
+ typearith.h -> Fast primitives for double word multiplication,
+ division etc.
+
+ Visual Studio only:
+ ~~~~~~~~~~~~~~~~~~~
+ vccompat.h -> snprintf <==> sprintf_s and similar things.
+ vcstdint.h -> stdint.h (included in VS 2010 but not in VS 2008).
+ vcdiv64.asm -> Double word division used in typearith.h. VS 2008 does
+ not allow inline asm for x64. Also, it does not provide
+ an intrinsic for double word division.
+
+ Files for bignum arithmetic:
+ ----------------------------
+
+ The following files implement the Fast Number Theoretic Transform
+ used for multiplying coefficients with more than 1024 words (see
+ mpdecimal.c: _mpd_fntmul()).
+
+ umodarith.h -> Fast low level routines for unsigned modular arithmetic.
+ numbertheory.{c,h} -> Routines for setting up the Number Theoretic Transform.
+ difradix2.{c,h} -> Decimation in frequency transform, used as the
+ "base case" by the following three files:
+
+ fnt.{c,h} -> Transform arrays up to 4096 words.
+ sixstep.{c,h} -> Transform larger arrays of length 2**n.
+ fourstep.{c,h} -> Transform larger arrays of length 3 * 2**n.
+
+ convolute.{c,h} -> Fast convolution using one of the three transform
+ functions.
+ transpose.{c,h} -> Transpositions needed for the sixstep algorithm.
+ crt.{c,h} -> Chinese Remainder Theorem: use information from three
+ transforms modulo three different primes to get the
+ final result.
+
+
+Pointers to literature, proofs and more
+=======================================
+
+ literature/
+ -----------
+
+ REFERENCES.txt -> List of relevant papers.
+ bignum.txt -> Explanation of the Fast Number Theoretic Transform (FNT).
+ fnt.py -> Verify constants used in the FNT; Python demo for the
+ O(N**2) discrete transform.
+
+ matrix-transform.txt -> Proof for the Matrix Fourier Transform used in
+ fourstep.c.
+ six-step.txt -> Show that the algorithm used in sixstep.c is
+ a variant of the Matrix Fourier Transform.
+ mulmod-64.txt -> Proof for the mulmod64 algorithm from
+ umodarith.h.
+ mulmod-ppro.txt -> Proof for the x87 FPU modular multiplication
+ from umodarith.h.
+ umodarith.lisp -> ACL2 proofs for many functions from umodarith.h.
+
+
+Library Author
+==============
+
+ Stefan Krah <skrah@bytereef.org>
+
+
+
diff --git a/Modules/_decimal/libmpdec/basearith.c b/Modules/_decimal/libmpdec/basearith.c
new file mode 100644
index 0000000000..e9d5024fda
--- /dev/null
+++ b/Modules/_decimal/libmpdec/basearith.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include "constants.h"
+#include "memory.h"
+#include "typearith.h"
+#include "basearith.h"
+
+
+/*********************************************************************/
+/* Calculations in base MPD_RADIX */
+/*********************************************************************/
+
+
+/*
+ * Knuth, TAOCP, Volume 2, 4.3.1:
+ * w := sum of u (len m) and v (len n)
+ * n > 0 and m >= n
+ * The calling function has to handle a possible final carry.
+ */
+mpd_uint_t
+_mpd_baseadd(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v,
+ mpd_size_t m, mpd_size_t n)
+{
+ mpd_uint_t s;
+ mpd_uint_t carry = 0;
+ mpd_size_t i;
+
+ assert(n > 0 && m >= n);
+
+ /* add n members of u and v */
+ for (i = 0; i < n; i++) {
+ s = u[i] + (v[i] + carry);
+ carry = (s < u[i]) | (s >= MPD_RADIX);
+ w[i] = carry ? s-MPD_RADIX : s;
+ }
+ /* if there is a carry, propagate it */
+ for (; carry && i < m; i++) {
+ s = u[i] + carry;
+ carry = (s == MPD_RADIX);
+ w[i] = carry ? 0 : s;
+ }
+ /* copy the rest of u */
+ for (; i < m; i++) {
+ w[i] = u[i];
+ }
+
+ return carry;
+}
+
+/*
+ * Add the contents of u to w. Carries are propagated further. The caller
+ * has to make sure that w is big enough.
+ */
+void
+_mpd_baseaddto(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n)
+{
+ mpd_uint_t s;
+ mpd_uint_t carry = 0;
+ mpd_size_t i;
+
+ if (n == 0) return;
+
+ /* add n members of u to w */
+ for (i = 0; i < n; i++) {
+ s = w[i] + (u[i] + carry);
+ carry = (s < w[i]) | (s >= MPD_RADIX);
+ w[i] = carry ? s-MPD_RADIX : s;
+ }
+ /* if there is a carry, propagate it */
+ for (; carry; i++) {
+ s = w[i] + carry;
+ carry = (s == MPD_RADIX);
+ w[i] = carry ? 0 : s;
+ }
+}
+
+/*
+ * Add v to w (len m). The calling function has to handle a possible
+ * final carry. Assumption: m > 0.
+ */
+mpd_uint_t
+_mpd_shortadd(mpd_uint_t *w, mpd_size_t m, mpd_uint_t v)
+{
+ mpd_uint_t s;
+ mpd_uint_t carry;
+ mpd_size_t i;
+
+ assert(m > 0);
+
+ /* add v to w */
+ s = w[0] + v;
+ carry = (s < v) | (s >= MPD_RADIX);
+ w[0] = carry ? s-MPD_RADIX : s;
+
+ /* if there is a carry, propagate it */
+ for (i = 1; carry && i < m; i++) {
+ s = w[i] + carry;
+ carry = (s == MPD_RADIX);
+ w[i] = carry ? 0 : s;
+ }
+
+ return carry;
+}
+
+/* Increment u. The calling function has to handle a possible carry. */
+mpd_uint_t
+_mpd_baseincr(mpd_uint_t *u, mpd_size_t n)
+{
+ mpd_uint_t s;
+ mpd_uint_t carry = 1;
+ mpd_size_t i;
+
+ assert(n > 0);
+
+ /* if there is a carry, propagate it */
+ for (i = 0; carry && i < n; i++) {
+ s = u[i] + carry;
+ carry = (s == MPD_RADIX);
+ u[i] = carry ? 0 : s;
+ }
+
+ return carry;
+}
+
+/*
+ * Knuth, TAOCP, Volume 2, 4.3.1:
+ * w := difference of u (len m) and v (len n).
+ * number in u >= number in v;
+ */
+void
+_mpd_basesub(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v,
+ mpd_size_t m, mpd_size_t n)
+{
+ mpd_uint_t d;
+ mpd_uint_t borrow = 0;
+ mpd_size_t i;
+
+ assert(m > 0 && n > 0);
+
+ /* subtract n members of v from u */
+ for (i = 0; i < n; i++) {
+ d = u[i] - (v[i] + borrow);
+ borrow = (u[i] < d);
+ w[i] = borrow ? d + MPD_RADIX : d;
+ }
+ /* if there is a borrow, propagate it */
+ for (; borrow && i < m; i++) {
+ d = u[i] - borrow;
+ borrow = (u[i] == 0);
+ w[i] = borrow ? MPD_RADIX-1 : d;
+ }
+ /* copy the rest of u */
+ for (; i < m; i++) {
+ w[i] = u[i];
+ }
+}
+
+/*
+ * Subtract the contents of u from w. w is larger than u. Borrows are
+ * propagated further, but eventually w can absorb the final borrow.
+ */
+void
+_mpd_basesubfrom(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n)
+{
+ mpd_uint_t d;
+ mpd_uint_t borrow = 0;
+ mpd_size_t i;
+
+ if (n == 0) return;
+
+ /* subtract n members of u from w */
+ for (i = 0; i < n; i++) {
+ d = w[i] - (u[i] + borrow);
+ borrow = (w[i] < d);
+ w[i] = borrow ? d + MPD_RADIX : d;
+ }
+ /* if there is a borrow, propagate it */
+ for (; borrow; i++) {
+ d = w[i] - borrow;
+ borrow = (w[i] == 0);
+ w[i] = borrow ? MPD_RADIX-1 : d;
+ }
+}
+
+/* w := product of u (len n) and v (single word) */
+void
+_mpd_shortmul(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, mpd_uint_t v)
+{
+ mpd_uint_t hi, lo;
+ mpd_uint_t carry = 0;
+ mpd_size_t i;
+
+ assert(n > 0);
+
+ for (i=0; i < n; i++) {
+
+ _mpd_mul_words(&hi, &lo, u[i], v);
+ lo = carry + lo;
+ if (lo < carry) hi++;
+
+ _mpd_div_words_r(&carry, &w[i], hi, lo);
+ }
+ w[i] = carry;
+}
+
+/*
+ * Knuth, TAOCP, Volume 2, 4.3.1:
+ * w := product of u (len m) and v (len n)
+ * w must be initialized to zero
+ */
+void
+_mpd_basemul(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v,
+ mpd_size_t m, mpd_size_t n)
+{
+ mpd_uint_t hi, lo;
+ mpd_uint_t carry;
+ mpd_size_t i, j;
+
+ assert(m > 0 && n > 0);
+
+ for (j=0; j < n; j++) {
+ carry = 0;
+ for (i=0; i < m; i++) {
+
+ _mpd_mul_words(&hi, &lo, u[i], v[j]);
+ lo = w[i+j] + lo;
+ if (lo < w[i+j]) hi++;
+ lo = carry + lo;
+ if (lo < carry) hi++;
+
+ _mpd_div_words_r(&carry, &w[i+j], hi, lo);
+ }
+ w[j+m] = carry;
+ }
+}
+
+/*
+ * Knuth, TAOCP Volume 2, 4.3.1, exercise 16:
+ * w := quotient of u (len n) divided by a single word v
+ */
+mpd_uint_t
+_mpd_shortdiv(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n, mpd_uint_t v)
+{
+ mpd_uint_t hi, lo;
+ mpd_uint_t rem = 0;
+ mpd_size_t i;
+
+ assert(n > 0);
+
+ for (i=n-1; i != MPD_SIZE_MAX; i--) {
+
+ _mpd_mul_words(&hi, &lo, rem, MPD_RADIX);
+ lo = u[i] + lo;
+ if (lo < u[i]) hi++;
+
+ _mpd_div_words(&w[i], &rem, hi, lo, v);
+ }
+
+ return rem;
+}
+
+/*
+ * Knuth, TAOCP Volume 2, 4.3.1:
+ * q, r := quotient and remainder of uconst (len nplusm)
+ * divided by vconst (len n)
+ * nplusm >= n
+ *
+ * If r is not NULL, r will contain the remainder. If r is NULL, the
+ * return value indicates if there is a remainder: 1 for true, 0 for
+ * false. A return value of -1 indicates an error.
+ */
+int
+_mpd_basedivmod(mpd_uint_t *q, mpd_uint_t *r,
+ const mpd_uint_t *uconst, const mpd_uint_t *vconst,
+ mpd_size_t nplusm, mpd_size_t n)
+{
+ mpd_uint_t ustatic[MPD_MINALLOC_MAX];
+ mpd_uint_t vstatic[MPD_MINALLOC_MAX];
+ mpd_uint_t *u = ustatic;
+ mpd_uint_t *v = vstatic;
+ mpd_uint_t d, qhat, rhat, w2[2];
+ mpd_uint_t hi, lo, x;
+ mpd_uint_t carry;
+ mpd_size_t i, j, m;
+ int retval = 0;
+
+ assert(n > 1 && nplusm >= n);
+ m = sub_size_t(nplusm, n);
+
+ /* D1: normalize */
+ d = MPD_RADIX / (vconst[n-1] + 1);
+
+ if (nplusm >= MPD_MINALLOC_MAX) {
+ if ((u = mpd_alloc(nplusm+1, sizeof *u)) == NULL) {
+ return -1;
+ }
+ }
+ if (n >= MPD_MINALLOC_MAX) {
+ if ((v = mpd_alloc(n+1, sizeof *v)) == NULL) {
+ mpd_free(u);
+ return -1;
+ }
+ }
+
+ _mpd_shortmul(u, uconst, nplusm, d);
+ _mpd_shortmul(v, vconst, n, d);
+
+ /* D2: loop */
+ for (j=m; j != MPD_SIZE_MAX; j--) {
+
+ /* D3: calculate qhat and rhat */
+ rhat = _mpd_shortdiv(w2, u+j+n-1, 2, v[n-1]);
+ qhat = w2[1] * MPD_RADIX + w2[0];
+
+ while (1) {
+ if (qhat < MPD_RADIX) {
+ _mpd_singlemul(w2, qhat, v[n-2]);
+ if (w2[1] <= rhat) {
+ if (w2[1] != rhat || w2[0] <= u[j+n-2]) {
+ break;
+ }
+ }
+ }
+ qhat -= 1;
+ rhat += v[n-1];
+ if (rhat < v[n-1] || rhat >= MPD_RADIX) {
+ break;
+ }
+ }
+ /* D4: multiply and subtract */
+ carry = 0;
+ for (i=0; i <= n; i++) {
+
+ _mpd_mul_words(&hi, &lo, qhat, v[i]);
+
+ lo = carry + lo;
+ if (lo < carry) hi++;
+
+ _mpd_div_words_r(&hi, &lo, hi, lo);
+
+ x = u[i+j] - lo;
+ carry = (u[i+j] < x);
+ u[i+j] = carry ? x+MPD_RADIX : x;
+ carry += hi;
+ }
+ q[j] = qhat;
+ /* D5: test remainder */
+ if (carry) {
+ q[j] -= 1;
+ /* D6: add back */
+ (void)_mpd_baseadd(u+j, u+j, v, n+1, n);
+ }
+ }
+
+ /* D8: unnormalize */
+ if (r != NULL) {
+ _mpd_shortdiv(r, u, n, d);
+ /* we are not interested in the return value here */
+ retval = 0;
+ }
+ else {
+ retval = !_mpd_isallzero(u, n);
+ }
+
+
+if (u != ustatic) mpd_free(u);
+if (v != vstatic) mpd_free(v);
+return retval;
+}
+
+/*
+ * Left shift of src by 'shift' digits; src may equal dest.
+ *
+ * dest := area of n mpd_uint_t with space for srcdigits+shift digits.
+ * src := coefficient with length m.
+ *
+ * The case splits in the function are non-obvious. The following
+ * equations might help:
+ *
+ * Let msdigits denote the number of digits in the most significant
+ * word of src. Then 1 <= msdigits <= rdigits.
+ *
+ * 1) shift = q * rdigits + r
+ * 2) srcdigits = qsrc * rdigits + msdigits
+ * 3) destdigits = shift + srcdigits
+ * = q * rdigits + r + qsrc * rdigits + msdigits
+ * = q * rdigits + (qsrc * rdigits + (r + msdigits))
+ *
+ * The result has q zero words, followed by the coefficient that
+ * is left-shifted by r. The case r == 0 is trivial. For r > 0, it
+ * is important to keep in mind that we always read m source words,
+ * but write m+1 destination words if r + msdigits > rdigits, m words
+ * otherwise.
+ */
+void
+_mpd_baseshiftl(mpd_uint_t *dest, mpd_uint_t *src, mpd_size_t n, mpd_size_t m,
+ mpd_size_t shift)
+{
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
+ /* spurious uninitialized warnings */
+ mpd_uint_t l=l, lprev=lprev, h=h;
+#else
+ mpd_uint_t l, lprev, h;
+#endif
+ mpd_uint_t q, r;
+ mpd_uint_t ph;
+
+ assert(m > 0 && n >= m);
+
+ _mpd_div_word(&q, &r, (mpd_uint_t)shift, MPD_RDIGITS);
+
+ if (r != 0) {
+
+ ph = mpd_pow10[r];
+
+ --m; --n;
+ _mpd_divmod_pow10(&h, &lprev, src[m--], MPD_RDIGITS-r);
+ if (h != 0) { /* r + msdigits > rdigits <==> h != 0 */
+ dest[n--] = h;
+ }
+ /* write m-1 shifted words */
+ for (; m != MPD_SIZE_MAX; m--,n--) {
+ _mpd_divmod_pow10(&h, &l, src[m], MPD_RDIGITS-r);
+ dest[n] = ph * lprev + h;
+ lprev = l;
+ }
+ /* write least significant word */
+ dest[q] = ph * lprev;
+ }
+ else {
+ while (--m != MPD_SIZE_MAX) {
+ dest[m+q] = src[m];
+ }
+ }
+
+ mpd_uint_zero(dest, q);
+}
+
+/*
+ * Right shift of src by 'shift' digits; src may equal dest.
+ * Assumption: srcdigits-shift > 0.
+ *
+ * dest := area with space for srcdigits-shift digits.
+ * src := coefficient with length 'slen'.
+ *
+ * The case splits in the function rely on the following equations:
+ *
+ * Let msdigits denote the number of digits in the most significant
+ * word of src. Then 1 <= msdigits <= rdigits.
+ *
+ * 1) shift = q * rdigits + r
+ * 2) srcdigits = qsrc * rdigits + msdigits
+ * 3) destdigits = srcdigits - shift
+ * = qsrc * rdigits + msdigits - (q * rdigits + r)
+ * = (qsrc - q) * rdigits + msdigits - r
+ *
+ * Since destdigits > 0 and 1 <= msdigits <= rdigits:
+ *
+ * 4) qsrc >= q
+ * 5) qsrc == q ==> msdigits > r
+ *
+ * The result has slen-q words if msdigits > r, slen-q-1 words otherwise.
+ */
+mpd_uint_t
+_mpd_baseshiftr(mpd_uint_t *dest, mpd_uint_t *src, mpd_size_t slen,
+ mpd_size_t shift)
+{
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
+ /* spurious uninitialized warnings */
+ mpd_uint_t l=l, h=h, hprev=hprev; /* low, high, previous high */
+#else
+ mpd_uint_t l, h, hprev; /* low, high, previous high */
+#endif
+ mpd_uint_t rnd, rest; /* rounding digit, rest */
+ mpd_uint_t q, r;
+ mpd_size_t i, j;
+ mpd_uint_t ph;
+
+ assert(slen > 0);
+
+ _mpd_div_word(&q, &r, (mpd_uint_t)shift, MPD_RDIGITS);
+
+ rnd = rest = 0;
+ if (r != 0) {
+
+ ph = mpd_pow10[MPD_RDIGITS-r];
+
+ _mpd_divmod_pow10(&hprev, &rest, src[q], r);
+ _mpd_divmod_pow10(&rnd, &rest, rest, r-1);
+
+ if (rest == 0 && q > 0) {
+ rest = !_mpd_isallzero(src, q);
+ }
+ /* write slen-q-1 words */
+ for (j=0,i=q+1; i<slen; i++,j++) {
+ _mpd_divmod_pow10(&h, &l, src[i], r);
+ dest[j] = ph * l + hprev;
+ hprev = h;
+ }
+ /* write most significant word */
+ if (hprev != 0) { /* always the case if slen==q-1 */
+ dest[j] = hprev;
+ }
+ }
+ else {
+ if (q > 0) {
+ _mpd_divmod_pow10(&rnd, &rest, src[q-1], MPD_RDIGITS-1);
+ /* is there any non-zero digit below rnd? */
+ if (rest == 0) rest = !_mpd_isallzero(src, q-1);
+ }
+ for (j = 0; j < slen-q; j++) {
+ dest[j] = src[q+j];
+ }
+ }
+
+ /* 0-4 ==> rnd+rest < 0.5 */
+ /* 5 ==> rnd+rest == 0.5 */
+ /* 6-9 ==> rnd+rest > 0.5 */
+ return (rnd == 0 || rnd == 5) ? rnd + !!rest : rnd;
+}
+
+
+/*********************************************************************/
+/* Calculations in base b */
+/*********************************************************************/
+
+/*
+ * Add v to w (len m). The calling function has to handle a possible
+ * final carry. Assumption: m > 0.
+ */
+mpd_uint_t
+_mpd_shortadd_b(mpd_uint_t *w, mpd_size_t m, mpd_uint_t v, mpd_uint_t b)
+{
+ mpd_uint_t s;
+ mpd_uint_t carry;
+ mpd_size_t i;
+
+ assert(m > 0);
+
+ /* add v to w */
+ s = w[0] + v;
+ carry = (s < v) | (s >= b);
+ w[0] = carry ? s-b : s;
+
+ /* if there is a carry, propagate it */
+ for (i = 1; carry && i < m; i++) {
+ s = w[i] + carry;
+ carry = (s == b);
+ w[i] = carry ? 0 : s;
+ }
+
+ return carry;
+}
+
+/* w := product of u (len n) and v (single word) */
+void
+_mpd_shortmul_b(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n,
+ mpd_uint_t v, mpd_uint_t b)
+{
+ mpd_uint_t hi, lo;
+ mpd_uint_t carry = 0;
+ mpd_size_t i;
+
+ assert(n > 0);
+
+ for (i=0; i < n; i++) {
+
+ _mpd_mul_words(&hi, &lo, u[i], v);
+ lo = carry + lo;
+ if (lo < carry) hi++;
+
+ _mpd_div_words(&carry, &w[i], hi, lo, b);
+ }
+ w[i] = carry;
+}
+
+/*
+ * Knuth, TAOCP Volume 2, 4.3.1, exercise 16:
+ * w := quotient of u (len n) divided by a single word v
+ */
+mpd_uint_t
+_mpd_shortdiv_b(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n,
+ mpd_uint_t v, mpd_uint_t b)
+{
+ mpd_uint_t hi, lo;
+ mpd_uint_t rem = 0;
+ mpd_size_t i;
+
+ assert(n > 0);
+
+ for (i=n-1; i != MPD_SIZE_MAX; i--) {
+
+ _mpd_mul_words(&hi, &lo, rem, b);
+ lo = u[i] + lo;
+ if (lo < u[i]) hi++;
+
+ _mpd_div_words(&w[i], &rem, hi, lo, v);
+ }
+
+ return rem;
+}
+
+
+
diff --git a/Modules/_decimal/libmpdec/basearith.h b/Modules/_decimal/libmpdec/basearith.h
new file mode 100644
index 0000000000..94de862b6c
--- /dev/null
+++ b/Modules/_decimal/libmpdec/basearith.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef BASEARITH_H
+#define BASEARITH_H
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include "typearith.h"
+
+
+mpd_uint_t _mpd_baseadd(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v,
+ mpd_size_t m, mpd_size_t n);
+void _mpd_baseaddto(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n);
+mpd_uint_t _mpd_shortadd(mpd_uint_t *w, mpd_size_t m, mpd_uint_t v);
+mpd_uint_t _mpd_shortadd_b(mpd_uint_t *w, mpd_size_t m, mpd_uint_t v,
+ mpd_uint_t b);
+mpd_uint_t _mpd_baseincr(mpd_uint_t *u, mpd_size_t n);
+void _mpd_basesub(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v,
+ mpd_size_t m, mpd_size_t n);
+void _mpd_basesubfrom(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n);
+void _mpd_basemul(mpd_uint_t *w, const mpd_uint_t *u, const mpd_uint_t *v,
+ mpd_size_t m, mpd_size_t n);
+void _mpd_shortmul(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n,
+ mpd_uint_t v);
+void _mpd_shortmul_b(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n,
+ mpd_uint_t v, mpd_uint_t b);
+mpd_uint_t _mpd_shortdiv(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n,
+ mpd_uint_t v);
+mpd_uint_t _mpd_shortdiv_b(mpd_uint_t *w, const mpd_uint_t *u, mpd_size_t n,
+ mpd_uint_t v, mpd_uint_t b);
+int _mpd_basedivmod(mpd_uint_t *q, mpd_uint_t *r, const mpd_uint_t *uconst,
+ const mpd_uint_t *vconst, mpd_size_t nplusm, mpd_size_t n);
+void _mpd_baseshiftl(mpd_uint_t *dest, mpd_uint_t *src, mpd_size_t n,
+ mpd_size_t m, mpd_size_t shift);
+mpd_uint_t _mpd_baseshiftr(mpd_uint_t *dest, mpd_uint_t *src, mpd_size_t slen,
+ mpd_size_t shift);
+
+
+
+#ifdef CONFIG_64
+extern const mpd_uint_t mprime_rdx;
+
+/*
+ * Algorithm from: Division by Invariant Integers using Multiplication,
+ * T. Granlund and P. L. Montgomery, Proceedings of the SIGPLAN '94
+ * Conference on Programming Language Design and Implementation.
+ *
+ * http://gmplib.org/~tege/divcnst-pldi94.pdf
+ *
+ * Variables from the paper and their translations (See section 8):
+ *
+ * N := 64
+ * d := MPD_RADIX
+ * l := 64
+ * m' := floor((2**(64+64) - 1)/MPD_RADIX) - 2**64
+ *
+ * Since N-l == 0:
+ *
+ * dnorm := d
+ * n2 := hi
+ * n10 := lo
+ *
+ * ACL2 proof: mpd-div-words-r-correct
+ */
+static inline void
+_mpd_div_words_r(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo)
+{
+ mpd_uint_t n_adj, h, l, t;
+ mpd_uint_t n1_neg;
+
+ /* n1_neg = if lo >= 2**63 then MPD_UINT_MAX else 0 */
+ n1_neg = (lo & (1ULL<<63)) ? MPD_UINT_MAX : 0;
+ /* n_adj = if lo >= 2**63 then lo+MPD_RADIX else lo */
+ n_adj = lo + (n1_neg & MPD_RADIX);
+
+ /* (h, l) = if lo >= 2**63 then m'*(hi+1) else m'*hi */
+ _mpd_mul_words(&h, &l, mprime_rdx, hi-n1_neg);
+ l = l + n_adj;
+ if (l < n_adj) h++;
+ t = h + hi;
+ /* At this point t == qest, with q == qest or q == qest+1:
+ * 1) 0 <= 2**64*hi + lo - qest*MPD_RADIX < 2*MPD_RADIX
+ */
+
+ /* t = 2**64-1 - qest = 2**64 - (qest+1) */
+ t = MPD_UINT_MAX - t;
+
+ /* (h, l) = 2**64*MPD_RADIX - (qest+1)*MPD_RADIX */
+ _mpd_mul_words(&h, &l, t, MPD_RADIX);
+ l = l + lo;
+ if (l < lo) h++;
+ h += hi;
+ h -= MPD_RADIX;
+ /* (h, l) = 2**64*hi + lo - (qest+1)*MPD_RADIX (mod 2**128)
+ * Case q == qest+1:
+ * a) h == 0, l == r
+ * b) q := h - t == qest+1
+ * c) r := l
+ * Case q == qest:
+ * a) h == MPD_UINT_MAX, l == 2**64-(MPD_RADIX-r)
+ * b) q := h - t == qest
+ * c) r := l + MPD_RADIX = r
+ */
+
+ *q = (h - t);
+ *r = l + (MPD_RADIX & h);
+}
+#else
+static inline void
+_mpd_div_words_r(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo)
+{
+ _mpd_div_words(q, r, hi, lo, MPD_RADIX);
+}
+#endif
+
+
+/* Multiply two single base MPD_RADIX words, store result in array w[2]. */
+static inline void
+_mpd_singlemul(mpd_uint_t w[2], mpd_uint_t u, mpd_uint_t v)
+{
+ mpd_uint_t hi, lo;
+
+ _mpd_mul_words(&hi, &lo, u, v);
+ _mpd_div_words_r(&w[1], &w[0], hi, lo);
+}
+
+/* Multiply u (len 2) and v (len m, 1 <= m <= 2). */
+static inline void
+_mpd_mul_2_le2(mpd_uint_t w[4], mpd_uint_t u[2], mpd_uint_t v[2], mpd_ssize_t m)
+{
+ mpd_uint_t hi, lo;
+
+ _mpd_mul_words(&hi, &lo, u[0], v[0]);
+ _mpd_div_words_r(&w[1], &w[0], hi, lo);
+
+ _mpd_mul_words(&hi, &lo, u[1], v[0]);
+ lo = w[1] + lo;
+ if (lo < w[1]) hi++;
+ _mpd_div_words_r(&w[2], &w[1], hi, lo);
+ if (m == 1) return;
+
+ _mpd_mul_words(&hi, &lo, u[0], v[1]);
+ lo = w[1] + lo;
+ if (lo < w[1]) hi++;
+ _mpd_div_words_r(&w[3], &w[1], hi, lo);
+
+ _mpd_mul_words(&hi, &lo, u[1], v[1]);
+ lo = w[2] + lo;
+ if (lo < w[2]) hi++;
+ lo = w[3] + lo;
+ if (lo < w[3]) hi++;
+ _mpd_div_words_r(&w[3], &w[2], hi, lo);
+}
+
+
+/*
+ * Test if all words from data[len-1] to data[0] are zero. If len is 0, nothing
+ * is tested and the coefficient is regarded as "all zero".
+ */
+static inline int
+_mpd_isallzero(const mpd_uint_t *data, mpd_ssize_t len)
+{
+ while (--len >= 0) {
+ if (data[len] != 0) return 0;
+ }
+ return 1;
+}
+
+/*
+ * Test if all full words from data[len-1] to data[0] are MPD_RADIX-1
+ * (all nines). Return true if len == 0.
+ */
+static inline int
+_mpd_isallnine(const mpd_uint_t *data, mpd_ssize_t len)
+{
+ while (--len >= 0) {
+ if (data[len] != MPD_RADIX-1) return 0;
+ }
+ return 1;
+}
+
+
+#endif /* BASEARITH_H */
+
+
+
diff --git a/Modules/_decimal/libmpdec/bits.h b/Modules/_decimal/libmpdec/bits.h
new file mode 100644
index 0000000000..949ec944ca
--- /dev/null
+++ b/Modules/_decimal/libmpdec/bits.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef BITS_H
+#define BITS_H
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+
+
+/* Check if n is a power of 2. */
+static inline int
+ispower2(mpd_size_t n)
+{
+ return n != 0 && (n & (n-1)) == 0;
+}
+
+#if defined(ANSI)
+/*
+ * Return the most significant bit position of n from 0 to 31 (63).
+ * Assumptions: n != 0.
+ */
+static inline int
+mpd_bsr(mpd_size_t n)
+{
+ int pos = 0;
+ mpd_size_t tmp;
+
+#ifdef CONFIG_64
+ tmp = n >> 32;
+ if (tmp != 0) { n = tmp; pos += 32; }
+#endif
+ tmp = n >> 16;
+ if (tmp != 0) { n = tmp; pos += 16; }
+ tmp = n >> 8;
+ if (tmp != 0) { n = tmp; pos += 8; }
+ tmp = n >> 4;
+ if (tmp != 0) { n = tmp; pos += 4; }
+ tmp = n >> 2;
+ if (tmp != 0) { n = tmp; pos += 2; }
+ tmp = n >> 1;
+ if (tmp != 0) { n = tmp; pos += 1; }
+
+ return pos + (int)n - 1;
+}
+
+/*
+ * Return the least significant bit position of n from 0 to 31 (63).
+ * Assumptions: n != 0.
+ */
+static inline int
+mpd_bsf(mpd_size_t n)
+{
+ int pos;
+
+#ifdef CONFIG_64
+ pos = 63;
+ if (n & 0x00000000FFFFFFFFULL) { pos -= 32; } else { n >>= 32; }
+ if (n & 0x000000000000FFFFULL) { pos -= 16; } else { n >>= 16; }
+ if (n & 0x00000000000000FFULL) { pos -= 8; } else { n >>= 8; }
+ if (n & 0x000000000000000FULL) { pos -= 4; } else { n >>= 4; }
+ if (n & 0x0000000000000003ULL) { pos -= 2; } else { n >>= 2; }
+ if (n & 0x0000000000000001ULL) { pos -= 1; }
+#else
+ pos = 31;
+ if (n & 0x000000000000FFFFUL) { pos -= 16; } else { n >>= 16; }
+ if (n & 0x00000000000000FFUL) { pos -= 8; } else { n >>= 8; }
+ if (n & 0x000000000000000FUL) { pos -= 4; } else { n >>= 4; }
+ if (n & 0x0000000000000003UL) { pos -= 2; } else { n >>= 2; }
+ if (n & 0x0000000000000001UL) { pos -= 1; }
+#endif
+ return pos;
+}
+/* END ANSI */
+
+#elif defined(ASM)
+/*
+ * Bit scan reverse. Assumptions: a != 0.
+ */
+static inline int
+mpd_bsr(mpd_size_t a)
+{
+ mpd_size_t retval;
+
+ __asm__ (
+#ifdef CONFIG_64
+ "bsrq %1, %0\n\t"
+#else
+ "bsr %1, %0\n\t"
+#endif
+ :"=r" (retval)
+ :"r" (a)
+ :"cc"
+ );
+
+ return (int)retval;
+}
+
+/*
+ * Bit scan forward. Assumptions: a != 0.
+ */
+static inline int
+mpd_bsf(mpd_size_t a)
+{
+ mpd_size_t retval;
+
+ __asm__ (
+#ifdef CONFIG_64
+ "bsfq %1, %0\n\t"
+#else
+ "bsf %1, %0\n\t"
+#endif
+ :"=r" (retval)
+ :"r" (a)
+ :"cc"
+ );
+
+ return (int)retval;
+}
+/* END ASM */
+
+#elif defined(MASM)
+#include <intrin.h>
+/*
+ * Bit scan reverse. Assumptions: a != 0.
+ */
+static inline int __cdecl
+mpd_bsr(mpd_size_t a)
+{
+ unsigned long retval;
+
+#ifdef CONFIG_64
+ _BitScanReverse64(&retval, a);
+#else
+ _BitScanReverse(&retval, a);
+#endif
+
+ return (int)retval;
+}
+
+/*
+ * Bit scan forward. Assumptions: a != 0.
+ */
+static inline int __cdecl
+mpd_bsf(mpd_size_t a)
+{
+ unsigned long retval;
+
+#ifdef CONFIG_64
+ _BitScanForward64(&retval, a);
+#else
+ _BitScanForward(&retval, a);
+#endif
+
+ return (int)retval;
+}
+/* END MASM (_MSC_VER) */
+#else
+ #error "missing preprocessor definitions"
+#endif /* BSR/BSF */
+
+
+#endif /* BITS_H */
+
+
+
diff --git a/Modules/_decimal/libmpdec/constants.c b/Modules/_decimal/libmpdec/constants.c
new file mode 100644
index 0000000000..92f5891b56
--- /dev/null
+++ b/Modules/_decimal/libmpdec/constants.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include "constants.h"
+
+
+#if defined(CONFIG_64)
+
+ /* number-theory.c */
+ const mpd_uint_t mpd_moduli[3] = {
+ 18446744069414584321ULL, 18446744056529682433ULL, 18446742974197923841ULL
+ };
+ const mpd_uint_t mpd_roots[3] = {7ULL, 10ULL, 19ULL};
+
+ /* crt.c */
+ const mpd_uint_t INV_P1_MOD_P2 = 18446744055098026669ULL;
+ const mpd_uint_t INV_P1P2_MOD_P3 = 287064143708160ULL;
+ const mpd_uint_t LH_P1P2 = 18446744052234715137ULL; /* (P1*P2) % 2^64 */
+ const mpd_uint_t UH_P1P2 = 18446744052234715141ULL; /* (P1*P2) / 2^64 */
+
+ /* transpose.c */
+ const mpd_size_t mpd_bits[64] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384,
+ 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608,
+ 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824,
+ 2147483648ULL, 4294967296ULL, 8589934592ULL, 17179869184ULL, 34359738368ULL,
+ 68719476736ULL, 137438953472ULL, 274877906944ULL, 549755813888ULL,
+ 1099511627776ULL, 2199023255552ULL, 4398046511104, 8796093022208ULL,
+ 17592186044416ULL, 35184372088832ULL, 70368744177664ULL, 140737488355328ULL,
+ 281474976710656ULL, 562949953421312ULL, 1125899906842624ULL,
+ 2251799813685248ULL, 4503599627370496ULL, 9007199254740992ULL,
+ 18014398509481984ULL, 36028797018963968ULL, 72057594037927936ULL,
+ 144115188075855872ULL, 288230376151711744ULL, 576460752303423488ULL,
+ 1152921504606846976ULL, 2305843009213693952ULL, 4611686018427387904ULL,
+ 9223372036854775808ULL
+ };
+
+ /* mpdecimal.c */
+ const mpd_uint_t mpd_pow10[MPD_RDIGITS+1] = {
+ 1,10,100,1000,10000,100000,1000000,10000000,100000000,1000000000,
+ 10000000000ULL,100000000000ULL,1000000000000ULL,10000000000000ULL,
+ 100000000000000ULL,1000000000000000ULL,10000000000000000ULL,
+ 100000000000000000ULL,1000000000000000000ULL,10000000000000000000ULL
+ };
+
+ /* magic number for constant division by MPD_RADIX */
+ const mpd_uint_t mprime_rdx = 15581492618384294730ULL;
+
+#elif defined(CONFIG_32)
+
+ /* number-theory.c */
+ const mpd_uint_t mpd_moduli[3] = {2113929217UL, 2013265921UL, 1811939329UL};
+ const mpd_uint_t mpd_roots[3] = {5UL, 31UL, 13UL};
+
+ /* PentiumPro modular multiplication: These constants have to be loaded as
+ * 80 bit long doubles, which are not supported by certain compilers. */
+ const uint32_t mpd_invmoduli[3][3] = {
+ {4293885170U, 2181570688U, 16352U}, /* ((long double) 1 / 2113929217UL) */
+ {1698898177U, 2290649223U, 16352U}, /* ((long double) 1 / 2013265921UL) */
+ {2716021846U, 2545165803U, 16352U} /* ((long double) 1 / 1811939329UL) */
+ };
+
+ const float MPD_TWO63 = 9223372036854775808.0; /* 2^63 */
+
+ /* crt.c */
+ const mpd_uint_t INV_P1_MOD_P2 = 2013265901UL;
+ const mpd_uint_t INV_P1P2_MOD_P3 = 54UL;
+ const mpd_uint_t LH_P1P2 = 4127195137UL; /* (P1*P2) % 2^32 */
+ const mpd_uint_t UH_P1P2 = 990904320UL; /* (P1*P2) / 2^32 */
+
+ /* transpose.c */
+ const mpd_size_t mpd_bits[32] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384,
+ 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608,
+ 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824,
+ 2147483648UL
+ };
+
+ /* mpdecimal.c */
+ const mpd_uint_t mpd_pow10[MPD_RDIGITS+1] = {
+ 1,10,100,1000,10000,100000,1000000,10000000,100000000,1000000000
+ };
+
+#else
+ #error "CONFIG_64 or CONFIG_32 must be defined."
+#endif
+
+const char *mpd_round_string[MPD_ROUND_GUARD] = {
+ "ROUND_UP", /* round away from 0 */
+ "ROUND_DOWN", /* round toward 0 (truncate) */
+ "ROUND_CEILING", /* round toward +infinity */
+ "ROUND_FLOOR", /* round toward -infinity */
+ "ROUND_HALF_UP", /* 0.5 is rounded up */
+ "ROUND_HALF_DOWN", /* 0.5 is rounded down */
+ "ROUND_HALF_EVEN", /* 0.5 is rounded to even */
+ "ROUND_05UP", /* round zero or five away from 0 */
+ "ROUND_TRUNC", /* truncate, but set infinity */
+};
+
+const char *mpd_clamp_string[MPD_CLAMP_GUARD] = {
+ "CLAMP_DEFAULT",
+ "CLAMP_IEEE_754"
+};
+
+
diff --git a/Modules/_decimal/libmpdec/constants.h b/Modules/_decimal/libmpdec/constants.h
new file mode 100644
index 0000000000..2d63d7e3c8
--- /dev/null
+++ b/Modules/_decimal/libmpdec/constants.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef CONSTANTS_H
+#define CONSTANTS_H
+
+
+#include "mpdecimal.h"
+
+
+/* choice of optimized functions */
+#if defined(CONFIG_64)
+/* x64 */
+ #define MULMOD(a, b) x64_mulmod(a, b, umod)
+ #define MULMOD2C(a0, a1, w) x64_mulmod2c(a0, a1, w, umod)
+ #define MULMOD2(a0, b0, a1, b1) x64_mulmod2(a0, b0, a1, b1, umod)
+ #define POWMOD(base, exp) x64_powmod(base, exp, umod)
+ #define SETMODULUS(modnum) std_setmodulus(modnum, &umod)
+ #define SIZE3_NTT(x0, x1, x2, w3table) std_size3_ntt(x0, x1, x2, w3table, umod)
+#elif defined(PPRO)
+/* PentiumPro (or later) gcc inline asm */
+ #define MULMOD(a, b) ppro_mulmod(a, b, &dmod, dinvmod)
+ #define MULMOD2C(a0, a1, w) ppro_mulmod2c(a0, a1, w, &dmod, dinvmod)
+ #define MULMOD2(a0, b0, a1, b1) ppro_mulmod2(a0, b0, a1, b1, &dmod, dinvmod)
+ #define POWMOD(base, exp) ppro_powmod(base, exp, &dmod, dinvmod)
+ #define SETMODULUS(modnum) ppro_setmodulus(modnum, &umod, &dmod, dinvmod)
+ #define SIZE3_NTT(x0, x1, x2, w3table) ppro_size3_ntt(x0, x1, x2, w3table, umod, &dmod, dinvmod)
+#else
+ /* ANSI C99 */
+ #define MULMOD(a, b) std_mulmod(a, b, umod)
+ #define MULMOD2C(a0, a1, w) std_mulmod2c(a0, a1, w, umod)
+ #define MULMOD2(a0, b0, a1, b1) std_mulmod2(a0, b0, a1, b1, umod)
+ #define POWMOD(base, exp) std_powmod(base, exp, umod)
+ #define SETMODULUS(modnum) std_setmodulus(modnum, &umod)
+ #define SIZE3_NTT(x0, x1, x2, w3table) std_size3_ntt(x0, x1, x2, w3table, umod)
+#endif
+
+/* PentiumPro (or later) gcc inline asm */
+extern const float MPD_TWO63;
+extern const uint32_t mpd_invmoduli[3][3];
+
+enum {P1, P2, P3};
+
+extern const mpd_uint_t mpd_moduli[];
+extern const mpd_uint_t mpd_roots[];
+extern const mpd_size_t mpd_bits[];
+extern const mpd_uint_t mpd_pow10[];
+
+extern const mpd_uint_t INV_P1_MOD_P2;
+extern const mpd_uint_t INV_P1P2_MOD_P3;
+extern const mpd_uint_t LH_P1P2;
+extern const mpd_uint_t UH_P1P2;
+
+
+#endif /* CONSTANTS_H */
+
+
+
diff --git a/Modules/_decimal/libmpdec/context.c b/Modules/_decimal/libmpdec/context.c
new file mode 100644
index 0000000000..159f88c339
--- /dev/null
+++ b/Modules/_decimal/libmpdec/context.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+
+
+void
+mpd_dflt_traphandler(mpd_context_t *ctx UNUSED)
+{
+ raise(SIGFPE);
+}
+
+void (* mpd_traphandler)(mpd_context_t *) = mpd_dflt_traphandler;
+
+
+/* Set guaranteed minimum number of coefficient words. The function may
+ be used once at program start. Setting MPD_MINALLOC to out-of-bounds
+ values is a catastrophic error, so in that case the function exits rather
+ than relying on the user to check a return value. */
+void
+mpd_setminalloc(mpd_ssize_t n)
+{
+ static int minalloc_is_set = 0;
+
+ if (minalloc_is_set) {
+ mpd_err_warn("mpd_setminalloc: ignoring request to set "
+ "MPD_MINALLOC a second time\n");
+ return;
+ }
+ if (n < MPD_MINALLOC_MIN || n > MPD_MINALLOC_MAX) {
+ mpd_err_fatal("illegal value for MPD_MINALLOC"); /* GCOV_NOT_REACHED */
+ }
+ MPD_MINALLOC = n;
+ minalloc_is_set = 1;
+}
+
+void
+mpd_init(mpd_context_t *ctx, mpd_ssize_t prec)
+{
+ mpd_ssize_t ideal_minalloc;
+
+ mpd_defaultcontext(ctx);
+
+ if (!mpd_qsetprec(ctx, prec)) {
+ mpd_addstatus_raise(ctx, MPD_Invalid_context);
+ return;
+ }
+
+ ideal_minalloc = 2 * ((prec+MPD_RDIGITS-1) / MPD_RDIGITS);
+ if (ideal_minalloc < MPD_MINALLOC_MIN) ideal_minalloc = MPD_MINALLOC_MIN;
+ if (ideal_minalloc > MPD_MINALLOC_MAX) ideal_minalloc = MPD_MINALLOC_MAX;
+
+ mpd_setminalloc(ideal_minalloc);
+}
+
+void
+mpd_maxcontext(mpd_context_t *ctx)
+{
+ ctx->prec=MPD_MAX_PREC;
+ ctx->emax=MPD_MAX_EMAX;
+ ctx->emin=MPD_MIN_EMIN;
+ ctx->round=MPD_ROUND_HALF_EVEN;
+ ctx->traps=MPD_Traps;
+ ctx->status=0;
+ ctx->newtrap=0;
+ ctx->clamp=0;
+ ctx->allcr=1;
+}
+
+void
+mpd_defaultcontext(mpd_context_t *ctx)
+{
+ ctx->prec=2*MPD_RDIGITS;
+ ctx->emax=MPD_MAX_EMAX;
+ ctx->emin=MPD_MIN_EMIN;
+ ctx->round=MPD_ROUND_HALF_UP;
+ ctx->traps=MPD_Traps;
+ ctx->status=0;
+ ctx->newtrap=0;
+ ctx->clamp=0;
+ ctx->allcr=1;
+}
+
+void
+mpd_basiccontext(mpd_context_t *ctx)
+{
+ ctx->prec=9;
+ ctx->emax=MPD_MAX_EMAX;
+ ctx->emin=MPD_MIN_EMIN;
+ ctx->round=MPD_ROUND_HALF_UP;
+ ctx->traps=MPD_Traps|MPD_Clamped;
+ ctx->status=0;
+ ctx->newtrap=0;
+ ctx->clamp=0;
+ ctx->allcr=1;
+}
+
+int
+mpd_ieee_context(mpd_context_t *ctx, int bits)
+{
+ if (bits <= 0 || bits > MPD_IEEE_CONTEXT_MAX_BITS || bits % 32) {
+ return -1;
+ }
+
+ ctx->prec = 9 * (bits/32) - 2;
+ ctx->emax = 3 * ((mpd_ssize_t)1<<(bits/16+3));
+ ctx->emin = 1 - ctx->emax;
+ ctx->round=MPD_ROUND_HALF_EVEN;
+ ctx->traps=0;
+ ctx->status=0;
+ ctx->newtrap=0;
+ ctx->clamp=1;
+ ctx->allcr=1;
+
+ return 0;
+}
+
+mpd_ssize_t
+mpd_getprec(const mpd_context_t *ctx)
+{
+ return ctx->prec;
+}
+
+mpd_ssize_t
+mpd_getemax(const mpd_context_t *ctx)
+{
+ return ctx->emax;
+}
+
+mpd_ssize_t
+mpd_getemin(const mpd_context_t *ctx)
+{
+ return ctx->emin;
+}
+
+int
+mpd_getround(const mpd_context_t *ctx)
+{
+ return ctx->round;
+}
+
+uint32_t
+mpd_gettraps(const mpd_context_t *ctx)
+{
+ return ctx->traps;
+}
+
+uint32_t
+mpd_getstatus(const mpd_context_t *ctx)
+{
+ return ctx->status;
+}
+
+int
+mpd_getclamp(const mpd_context_t *ctx)
+{
+ return ctx->clamp;
+}
+
+int
+mpd_getcr(const mpd_context_t *ctx)
+{
+ return ctx->allcr;
+}
+
+
+int
+mpd_qsetprec(mpd_context_t *ctx, mpd_ssize_t prec)
+{
+ if (prec <= 0 || prec > MPD_MAX_PREC) {
+ return 0;
+ }
+ ctx->prec = prec;
+ return 1;
+}
+
+int
+mpd_qsetemax(mpd_context_t *ctx, mpd_ssize_t emax)
+{
+ if (emax < 0 || emax > MPD_MAX_EMAX) {
+ return 0;
+ }
+ ctx->emax = emax;
+ return 1;
+}
+
+int
+mpd_qsetemin(mpd_context_t *ctx, mpd_ssize_t emin)
+{
+ if (emin > 0 || emin < MPD_MIN_EMIN) {
+ return 0;
+ }
+ ctx->emin = emin;
+ return 1;
+}
+
+int
+mpd_qsetround(mpd_context_t *ctx, int round)
+{
+ if (!(0 <= round && round < MPD_ROUND_GUARD)) {
+ return 0;
+ }
+ ctx->round = round;
+ return 1;
+}
+
+int
+mpd_qsettraps(mpd_context_t *ctx, uint32_t traps)
+{
+ if (traps > MPD_Max_status) {
+ return 0;
+ }
+ ctx->traps = traps;
+ return 1;
+}
+
+int
+mpd_qsetstatus(mpd_context_t *ctx, uint32_t flags)
+{
+ if (flags > MPD_Max_status) {
+ return 0;
+ }
+ ctx->status = flags;
+ return 1;
+}
+
+int
+mpd_qsetclamp(mpd_context_t *ctx, int c)
+{
+ if (c != 0 && c != 1) {
+ return 0;
+ }
+ ctx->clamp = c;
+ return 1;
+}
+
+int
+mpd_qsetcr(mpd_context_t *ctx, int c)
+{
+ if (c != 0 && c != 1) {
+ return 0;
+ }
+ ctx->allcr = c;
+ return 1;
+}
+
+
+void
+mpd_addstatus_raise(mpd_context_t *ctx, uint32_t flags)
+{
+ ctx->status |= flags;
+ if (flags&ctx->traps) {
+ ctx->newtrap = (flags&ctx->traps);
+ mpd_traphandler(ctx);
+ }
+}
+
+
diff --git a/Modules/_decimal/libmpdec/convolute.c b/Modules/_decimal/libmpdec/convolute.c
new file mode 100644
index 0000000000..b5fe131b07
--- /dev/null
+++ b/Modules/_decimal/libmpdec/convolute.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include "bits.h"
+#include "constants.h"
+#include "fnt.h"
+#include "fourstep.h"
+#include "numbertheory.h"
+#include "sixstep.h"
+#include "umodarith.h"
+#include "convolute.h"
+
+
+/* Bignum: Fast convolution using the Number Theoretic Transform. Used for
+ the multiplication of very large coefficients. */
+
+
+/* Convolute the data in c1 and c2. Result is in c1. */
+int
+fnt_convolute(mpd_uint_t *c1, mpd_uint_t *c2, mpd_size_t n, int modnum)
+{
+ int (*fnt)(mpd_uint_t *, mpd_size_t, int);
+ int (*inv_fnt)(mpd_uint_t *, mpd_size_t, int);
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_uint_t n_inv, umod;
+ mpd_size_t i;
+
+
+ SETMODULUS(modnum);
+ n_inv = POWMOD(n, (umod-2));
+
+ if (ispower2(n)) {
+ if (n > SIX_STEP_THRESHOLD) {
+ fnt = six_step_fnt;
+ inv_fnt = inv_six_step_fnt;
+ }
+ else {
+ fnt = std_fnt;
+ inv_fnt = std_inv_fnt;
+ }
+ }
+ else {
+ fnt = four_step_fnt;
+ inv_fnt = inv_four_step_fnt;
+ }
+
+ if (!fnt(c1, n, modnum)) {
+ return 0;
+ }
+ if (!fnt(c2, n, modnum)) {
+ return 0;
+ }
+ for (i = 0; i < n-1; i += 2) {
+ mpd_uint_t x0 = c1[i];
+ mpd_uint_t y0 = c2[i];
+ mpd_uint_t x1 = c1[i+1];
+ mpd_uint_t y1 = c2[i+1];
+ MULMOD2(&x0, y0, &x1, y1);
+ c1[i] = x0;
+ c1[i+1] = x1;
+ }
+
+ if (!inv_fnt(c1, n, modnum)) {
+ return 0;
+ }
+ for (i = 0; i < n-3; i += 4) {
+ mpd_uint_t x0 = c1[i];
+ mpd_uint_t x1 = c1[i+1];
+ mpd_uint_t x2 = c1[i+2];
+ mpd_uint_t x3 = c1[i+3];
+ MULMOD2C(&x0, &x1, n_inv);
+ MULMOD2C(&x2, &x3, n_inv);
+ c1[i] = x0;
+ c1[i+1] = x1;
+ c1[i+2] = x2;
+ c1[i+3] = x3;
+ }
+
+ return 1;
+}
+
+/* Autoconvolute the data in c1. Result is in c1. */
+int
+fnt_autoconvolute(mpd_uint_t *c1, mpd_size_t n, int modnum)
+{
+ int (*fnt)(mpd_uint_t *, mpd_size_t, int);
+ int (*inv_fnt)(mpd_uint_t *, mpd_size_t, int);
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_uint_t n_inv, umod;
+ mpd_size_t i;
+
+
+ SETMODULUS(modnum);
+ n_inv = POWMOD(n, (umod-2));
+
+ if (ispower2(n)) {
+ if (n > SIX_STEP_THRESHOLD) {
+ fnt = six_step_fnt;
+ inv_fnt = inv_six_step_fnt;
+ }
+ else {
+ fnt = std_fnt;
+ inv_fnt = std_inv_fnt;
+ }
+ }
+ else {
+ fnt = four_step_fnt;
+ inv_fnt = inv_four_step_fnt;
+ }
+
+ if (!fnt(c1, n, modnum)) {
+ return 0;
+ }
+ for (i = 0; i < n-1; i += 2) {
+ mpd_uint_t x0 = c1[i];
+ mpd_uint_t x1 = c1[i+1];
+ MULMOD2(&x0, x0, &x1, x1);
+ c1[i] = x0;
+ c1[i+1] = x1;
+ }
+
+ if (!inv_fnt(c1, n, modnum)) {
+ return 0;
+ }
+ for (i = 0; i < n-3; i += 4) {
+ mpd_uint_t x0 = c1[i];
+ mpd_uint_t x1 = c1[i+1];
+ mpd_uint_t x2 = c1[i+2];
+ mpd_uint_t x3 = c1[i+3];
+ MULMOD2C(&x0, &x1, n_inv);
+ MULMOD2C(&x2, &x3, n_inv);
+ c1[i] = x0;
+ c1[i+1] = x1;
+ c1[i+2] = x2;
+ c1[i+3] = x3;
+ }
+
+ return 1;
+}
+
+
diff --git a/Modules/_decimal/libmpdec/convolute.h b/Modules/_decimal/libmpdec/convolute.h
new file mode 100644
index 0000000000..2f8d6d831b
--- /dev/null
+++ b/Modules/_decimal/libmpdec/convolute.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef CONVOLUTE_H
+#define CONVOLUTE_H
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+
+#define SIX_STEP_THRESHOLD 4096
+
+
+int fnt_convolute(mpd_uint_t *c1, mpd_uint_t *c2, mpd_size_t n, int modnum);
+int fnt_autoconvolute(mpd_uint_t *c1, mpd_size_t n, int modnum);
+
+
+#endif
diff --git a/Modules/_decimal/libmpdec/crt.c b/Modules/_decimal/libmpdec/crt.c
new file mode 100644
index 0000000000..c71c4ee8f8
--- /dev/null
+++ b/Modules/_decimal/libmpdec/crt.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <assert.h>
+#include "numbertheory.h"
+#include "umodarith.h"
+#include "crt.h"
+
+
+/* Bignum: Chinese Remainder Theorem, extends the maximum transform length. */
+
+
+/* Multiply P1P2 by v, store result in w. */
+static inline void
+_crt_mulP1P2_3(mpd_uint_t w[3], mpd_uint_t v)
+{
+ mpd_uint_t hi1, hi2, lo;
+
+ _mpd_mul_words(&hi1, &lo, LH_P1P2, v);
+ w[0] = lo;
+
+ _mpd_mul_words(&hi2, &lo, UH_P1P2, v);
+ lo = hi1 + lo;
+ if (lo < hi1) hi2++;
+
+ w[1] = lo;
+ w[2] = hi2;
+}
+
+/* Add 3 words from v to w. The result is known to fit in w. */
+static inline void
+_crt_add3(mpd_uint_t w[3], mpd_uint_t v[3])
+{
+ mpd_uint_t carry;
+ mpd_uint_t s;
+
+ s = w[0] + v[0];
+ carry = (s < w[0]);
+ w[0] = s;
+
+ s = w[1] + (v[1] + carry);
+ carry = (s < w[1]);
+ w[1] = s;
+
+ w[2] = w[2] + (v[2] + carry);
+}
+
+/* Divide 3 words in u by v, store result in w, return remainder. */
+static inline mpd_uint_t
+_crt_div3(mpd_uint_t *w, const mpd_uint_t *u, mpd_uint_t v)
+{
+ mpd_uint_t r1 = u[2];
+ mpd_uint_t r2;
+
+ if (r1 < v) {
+ w[2] = 0;
+ }
+ else {
+ _mpd_div_word(&w[2], &r1, u[2], v); /* GCOV_NOT_REACHED */
+ }
+
+ _mpd_div_words(&w[1], &r2, r1, u[1], v);
+ _mpd_div_words(&w[0], &r1, r2, u[0], v);
+
+ return r1;
+}
+
+
+/*
+ * Chinese Remainder Theorem:
+ * Algorithm from Joerg Arndt, "Matters Computational",
+ * Chapter 37.4.1 [http://www.jjj.de/fxt/]
+ *
+ * See also Knuth, TAOCP, Volume 2, 4.3.2, exercise 7.
+ */
+
+/*
+ * CRT with carry: x1, x2, x3 contain numbers modulo p1, p2, p3. For each
+ * triple of members of the arrays, find the unique z modulo p1*p2*p3, with
+ * zmax = p1*p2*p3 - 1.
+ *
+ * In each iteration of the loop, split z into result[i] = z % MPD_RADIX
+ * and carry = z / MPD_RADIX. Let N be the size of carry[] and cmax the
+ * maximum carry.
+ *
+ * Limits for the 32-bit build:
+ *
+ * N = 2**96
+ * cmax = 7711435591312380274
+ *
+ * Limits for the 64 bit build:
+ *
+ * N = 2**192
+ * cmax = 627710135393475385904124401220046371710
+ *
+ * The following statements hold for both versions:
+ *
+ * 1) cmax + zmax < N, so the addition does not overflow.
+ *
+ * 2) (cmax + zmax) / MPD_RADIX == cmax.
+ *
+ * 3) If c <= cmax, then c_next = (c + zmax) / MPD_RADIX <= cmax.
+ */
+void
+crt3(mpd_uint_t *x1, mpd_uint_t *x2, mpd_uint_t *x3, mpd_size_t rsize)
+{
+ mpd_uint_t p1 = mpd_moduli[P1];
+ mpd_uint_t umod;
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_uint_t a1, a2, a3;
+ mpd_uint_t s;
+ mpd_uint_t z[3], t[3];
+ mpd_uint_t carry[3] = {0,0,0};
+ mpd_uint_t hi, lo;
+ mpd_size_t i;
+
+ for (i = 0; i < rsize; i++) {
+
+ a1 = x1[i];
+ a2 = x2[i];
+ a3 = x3[i];
+
+ SETMODULUS(P2);
+ s = ext_submod(a2, a1, umod);
+ s = MULMOD(s, INV_P1_MOD_P2);
+
+ _mpd_mul_words(&hi, &lo, s, p1);
+ lo = lo + a1;
+ if (lo < a1) hi++;
+
+ SETMODULUS(P3);
+ s = dw_submod(a3, hi, lo, umod);
+ s = MULMOD(s, INV_P1P2_MOD_P3);
+
+ z[0] = lo;
+ z[1] = hi;
+ z[2] = 0;
+
+ _crt_mulP1P2_3(t, s);
+ _crt_add3(z, t);
+ _crt_add3(carry, z);
+
+ x1[i] = _crt_div3(carry, carry, MPD_RADIX);
+ }
+
+ assert(carry[0] == 0 && carry[1] == 0 && carry[2] == 0);
+}
+
+
diff --git a/Modules/_decimal/libmpdec/crt.h b/Modules/_decimal/libmpdec/crt.h
new file mode 100644
index 0000000000..0e03e5d077
--- /dev/null
+++ b/Modules/_decimal/libmpdec/crt.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef CRT_H
+#define CRT_H
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+
+
+void crt3(mpd_uint_t *x1, mpd_uint_t *x2, mpd_uint_t *x3, mpd_size_t nmemb);
+
+
+#endif
diff --git a/Modules/_decimal/libmpdec/difradix2.c b/Modules/_decimal/libmpdec/difradix2.c
new file mode 100644
index 0000000000..4ebb0b54b0
--- /dev/null
+++ b/Modules/_decimal/libmpdec/difradix2.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <assert.h>
+#include "bits.h"
+#include "numbertheory.h"
+#include "umodarith.h"
+#include "difradix2.h"
+
+
+/* Bignum: The actual transform routine (decimation in frequency). */
+
+
+/*
+ * Generate index pairs (x, bitreverse(x)) and carry out the permutation.
+ * n must be a power of two.
+ * Algorithm due to Brent/Lehmann, see Joerg Arndt, "Matters Computational",
+ * Chapter 1.14.4. [http://www.jjj.de/fxt/]
+ */
+static inline void
+bitreverse_permute(mpd_uint_t a[], mpd_size_t n)
+{
+ mpd_size_t x = 0;
+ mpd_size_t r = 0;
+ mpd_uint_t t;
+
+ do { /* Invariant: r = bitreverse(x) */
+ if (r > x) {
+ t = a[x];
+ a[x] = a[r];
+ a[r] = t;
+ }
+ /* Flip trailing consecutive 1 bits and the first zero bit
+ * that absorbs a possible carry. */
+ x += 1;
+ /* Mirror the operation on r: Flip n_trailing_zeros(x)+1
+ high bits of r. */
+ r ^= (n - (n >> (mpd_bsf(x)+1)));
+ /* The loop invariant is preserved. */
+ } while (x < n);
+}
+
+
+/* Fast Number Theoretic Transform, decimation in frequency. */
+void
+fnt_dif2(mpd_uint_t a[], mpd_size_t n, struct fnt_params *tparams)
+{
+ mpd_uint_t *wtable = tparams->wtable;
+ mpd_uint_t umod;
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_uint_t u0, u1, v0, v1;
+ mpd_uint_t w, w0, w1, wstep;
+ mpd_size_t m, mhalf;
+ mpd_size_t j, r;
+
+
+ assert(ispower2(n));
+ assert(n >= 4);
+
+ SETMODULUS(tparams->modnum);
+
+ /* m == n */
+ mhalf = n / 2;
+ for (j = 0; j < mhalf; j += 2) {
+
+ w0 = wtable[j];
+ w1 = wtable[j+1];
+
+ u0 = a[j];
+ v0 = a[j+mhalf];
+
+ u1 = a[j+1];
+ v1 = a[j+1+mhalf];
+
+ a[j] = addmod(u0, v0, umod);
+ v0 = submod(u0, v0, umod);
+
+ a[j+1] = addmod(u1, v1, umod);
+ v1 = submod(u1, v1, umod);
+
+ MULMOD2(&v0, w0, &v1, w1);
+
+ a[j+mhalf] = v0;
+ a[j+1+mhalf] = v1;
+
+ }
+
+ wstep = 2;
+ for (m = n/2; m >= 2; m>>=1, wstep<<=1) {
+
+ mhalf = m / 2;
+
+ /* j == 0 */
+ for (r = 0; r < n; r += 2*m) {
+
+ u0 = a[r];
+ v0 = a[r+mhalf];
+
+ u1 = a[m+r];
+ v1 = a[m+r+mhalf];
+
+ a[r] = addmod(u0, v0, umod);
+ v0 = submod(u0, v0, umod);
+
+ a[m+r] = addmod(u1, v1, umod);
+ v1 = submod(u1, v1, umod);
+
+ a[r+mhalf] = v0;
+ a[m+r+mhalf] = v1;
+ }
+
+ for (j = 1; j < mhalf; j++) {
+
+ w = wtable[j*wstep];
+
+ for (r = 0; r < n; r += 2*m) {
+
+ u0 = a[r+j];
+ v0 = a[r+j+mhalf];
+
+ u1 = a[m+r+j];
+ v1 = a[m+r+j+mhalf];
+
+ a[r+j] = addmod(u0, v0, umod);
+ v0 = submod(u0, v0, umod);
+
+ a[m+r+j] = addmod(u1, v1, umod);
+ v1 = submod(u1, v1, umod);
+
+ MULMOD2C(&v0, &v1, w);
+
+ a[r+j+mhalf] = v0;
+ a[m+r+j+mhalf] = v1;
+ }
+
+ }
+
+ }
+
+ bitreverse_permute(a, n);
+}
+
+
diff --git a/Modules/_decimal/libmpdec/difradix2.h b/Modules/_decimal/libmpdec/difradix2.h
new file mode 100644
index 0000000000..759442a2bc
--- /dev/null
+++ b/Modules/_decimal/libmpdec/difradix2.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef DIF_RADIX2_H
+#define DIF_RADIX2_H
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include "numbertheory.h"
+
+
+void fnt_dif2(mpd_uint_t a[], mpd_size_t n, struct fnt_params *tparams);
+
+
+#endif
diff --git a/Modules/_decimal/libmpdec/fnt.c b/Modules/_decimal/libmpdec/fnt.c
new file mode 100644
index 0000000000..93116539b9
--- /dev/null
+++ b/Modules/_decimal/libmpdec/fnt.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "bits.h"
+#include "difradix2.h"
+#include "numbertheory.h"
+#include "fnt.h"
+
+
+/* Bignum: Fast transform for medium-sized coefficients. */
+
+
+/* forward transform, sign = -1 */
+int
+std_fnt(mpd_uint_t *a, mpd_size_t n, int modnum)
+{
+ struct fnt_params *tparams;
+
+ assert(ispower2(n));
+ assert(n >= 4);
+ assert(n <= 3*MPD_MAXTRANSFORM_2N);
+
+ if ((tparams = _mpd_init_fnt_params(n, -1, modnum)) == NULL) {
+ return 0;
+ }
+ fnt_dif2(a, n, tparams);
+
+ mpd_free(tparams);
+ return 1;
+}
+
+/* reverse transform, sign = 1 */
+int
+std_inv_fnt(mpd_uint_t *a, mpd_size_t n, int modnum)
+{
+ struct fnt_params *tparams;
+
+ assert(ispower2(n));
+ assert(n >= 4);
+ assert(n <= 3*MPD_MAXTRANSFORM_2N);
+
+ if ((tparams = _mpd_init_fnt_params(n, 1, modnum)) == NULL) {
+ return 0;
+ }
+ fnt_dif2(a, n, tparams);
+
+ mpd_free(tparams);
+ return 1;
+}
+
+
+
diff --git a/Modules/_decimal/libmpdec/fnt.h b/Modules/_decimal/libmpdec/fnt.h
new file mode 100644
index 0000000000..2d701b6c8a
--- /dev/null
+++ b/Modules/_decimal/libmpdec/fnt.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef FNT_H
+#define FNT_H
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+
+
+int std_fnt(mpd_uint_t a[], mpd_size_t n, int modnum);
+int std_inv_fnt(mpd_uint_t a[], mpd_size_t n, int modnum);
+
+
+#endif
+
diff --git a/Modules/_decimal/libmpdec/fourstep.c b/Modules/_decimal/libmpdec/fourstep.c
new file mode 100644
index 0000000000..aa32c0d5cf
--- /dev/null
+++ b/Modules/_decimal/libmpdec/fourstep.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <assert.h>
+#include "numbertheory.h"
+#include "sixstep.h"
+#include "transpose.h"
+#include "umodarith.h"
+#include "fourstep.h"
+
+
+/* Bignum: Cache efficient Matrix Fourier Transform for arrays of the
+ form 3 * 2**n (See literature/matrix-transform.txt). */
+
+
+#ifndef PPRO
+static inline void
+std_size3_ntt(mpd_uint_t *x1, mpd_uint_t *x2, mpd_uint_t *x3,
+ mpd_uint_t w3table[3], mpd_uint_t umod)
+{
+ mpd_uint_t r1, r2;
+ mpd_uint_t w;
+ mpd_uint_t s, tmp;
+
+
+ /* k = 0 -> w = 1 */
+ s = *x1;
+ s = addmod(s, *x2, umod);
+ s = addmod(s, *x3, umod);
+
+ r1 = s;
+
+ /* k = 1 */
+ s = *x1;
+
+ w = w3table[1];
+ tmp = MULMOD(*x2, w);
+ s = addmod(s, tmp, umod);
+
+ w = w3table[2];
+ tmp = MULMOD(*x3, w);
+ s = addmod(s, tmp, umod);
+
+ r2 = s;
+
+ /* k = 2 */
+ s = *x1;
+
+ w = w3table[2];
+ tmp = MULMOD(*x2, w);
+ s = addmod(s, tmp, umod);
+
+ w = w3table[1];
+ tmp = MULMOD(*x3, w);
+ s = addmod(s, tmp, umod);
+
+ *x3 = s;
+ *x2 = r2;
+ *x1 = r1;
+}
+#else /* PPRO */
+static inline void
+ppro_size3_ntt(mpd_uint_t *x1, mpd_uint_t *x2, mpd_uint_t *x3, mpd_uint_t w3table[3],
+ mpd_uint_t umod, double *dmod, uint32_t dinvmod[3])
+{
+ mpd_uint_t r1, r2;
+ mpd_uint_t w;
+ mpd_uint_t s, tmp;
+
+
+ /* k = 0 -> w = 1 */
+ s = *x1;
+ s = addmod(s, *x2, umod);
+ s = addmod(s, *x3, umod);
+
+ r1 = s;
+
+ /* k = 1 */
+ s = *x1;
+
+ w = w3table[1];
+ tmp = ppro_mulmod(*x2, w, dmod, dinvmod);
+ s = addmod(s, tmp, umod);
+
+ w = w3table[2];
+ tmp = ppro_mulmod(*x3, w, dmod, dinvmod);
+ s = addmod(s, tmp, umod);
+
+ r2 = s;
+
+ /* k = 2 */
+ s = *x1;
+
+ w = w3table[2];
+ tmp = ppro_mulmod(*x2, w, dmod, dinvmod);
+ s = addmod(s, tmp, umod);
+
+ w = w3table[1];
+ tmp = ppro_mulmod(*x3, w, dmod, dinvmod);
+ s = addmod(s, tmp, umod);
+
+ *x3 = s;
+ *x2 = r2;
+ *x1 = r1;
+}
+#endif
+
+
+/* forward transform, sign = -1; transform length = 3 * 2**n */
+int
+four_step_fnt(mpd_uint_t *a, mpd_size_t n, int modnum)
+{
+ mpd_size_t R = 3; /* number of rows */
+ mpd_size_t C = n / 3; /* number of columns */
+ mpd_uint_t w3table[3];
+ mpd_uint_t kernel, w0, w1, wstep;
+ mpd_uint_t *s, *p0, *p1, *p2;
+ mpd_uint_t umod;
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_size_t i, k;
+
+
+ assert(n >= 48);
+ assert(n <= 3*MPD_MAXTRANSFORM_2N);
+
+
+ /* Length R transform on the columns. */
+ SETMODULUS(modnum);
+ _mpd_init_w3table(w3table, -1, modnum);
+ for (p0=a, p1=p0+C, p2=p0+2*C; p0<a+C; p0++,p1++,p2++) {
+
+ SIZE3_NTT(p0, p1, p2, w3table);
+ }
+
+ /* Multiply each matrix element (addressed by i*C+k) by r**(i*k). */
+ kernel = _mpd_getkernel(n, -1, modnum);
+ for (i = 1; i < R; i++) {
+ w0 = 1; /* r**(i*0): initial value for k=0 */
+ w1 = POWMOD(kernel, i); /* r**(i*1): initial value for k=1 */
+ wstep = MULMOD(w1, w1); /* r**(2*i) */
+ for (k = 0; k < C-1; k += 2) {
+ mpd_uint_t x0 = a[i*C+k];
+ mpd_uint_t x1 = a[i*C+k+1];
+ MULMOD2(&x0, w0, &x1, w1);
+ MULMOD2C(&w0, &w1, wstep); /* r**(i*(k+2)) = r**(i*k) * r**(2*i) */
+ a[i*C+k] = x0;
+ a[i*C+k+1] = x1;
+ }
+ }
+
+ /* Length C transform on the rows. */
+ for (s = a; s < a+n; s += C) {
+ if (!six_step_fnt(s, C, modnum)) {
+ return 0;
+ }
+ }
+
+#if 0
+ /* An unordered transform is sufficient for convolution. */
+ /* Transpose the matrix. */
+ transpose_3xpow2(a, R, C);
+#endif
+
+ return 1;
+}
+
+/* backward transform, sign = 1; transform length = 3 * 2**n */
+int
+inv_four_step_fnt(mpd_uint_t *a, mpd_size_t n, int modnum)
+{
+ mpd_size_t R = 3; /* number of rows */
+ mpd_size_t C = n / 3; /* number of columns */
+ mpd_uint_t w3table[3];
+ mpd_uint_t kernel, w0, w1, wstep;
+ mpd_uint_t *s, *p0, *p1, *p2;
+ mpd_uint_t umod;
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_size_t i, k;
+
+
+ assert(n >= 48);
+ assert(n <= 3*MPD_MAXTRANSFORM_2N);
+
+
+#if 0
+ /* An unordered transform is sufficient for convolution. */
+ /* Transpose the matrix, producing an R*C matrix. */
+ transpose_3xpow2(a, C, R);
+#endif
+
+ /* Length C transform on the rows. */
+ for (s = a; s < a+n; s += C) {
+ if (!inv_six_step_fnt(s, C, modnum)) {
+ return 0;
+ }
+ }
+
+ /* Multiply each matrix element (addressed by i*C+k) by r**(i*k). */
+ SETMODULUS(modnum);
+ kernel = _mpd_getkernel(n, 1, modnum);
+ for (i = 1; i < R; i++) {
+ w0 = 1;
+ w1 = POWMOD(kernel, i);
+ wstep = MULMOD(w1, w1);
+ for (k = 0; k < C; k += 2) {
+ mpd_uint_t x0 = a[i*C+k];
+ mpd_uint_t x1 = a[i*C+k+1];
+ MULMOD2(&x0, w0, &x1, w1);
+ MULMOD2C(&w0, &w1, wstep);
+ a[i*C+k] = x0;
+ a[i*C+k+1] = x1;
+ }
+ }
+
+ /* Length R transform on the columns. */
+ _mpd_init_w3table(w3table, 1, modnum);
+ for (p0=a, p1=p0+C, p2=p0+2*C; p0<a+C; p0++,p1++,p2++) {
+
+ SIZE3_NTT(p0, p1, p2, w3table);
+ }
+
+ return 1;
+}
+
+
diff --git a/Modules/_decimal/libmpdec/fourstep.h b/Modules/_decimal/libmpdec/fourstep.h
new file mode 100644
index 0000000000..61d9d6a71a
--- /dev/null
+++ b/Modules/_decimal/libmpdec/fourstep.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef FOUR_STEP_H
+#define FOUR_STEP_H
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+
+
+int four_step_fnt(mpd_uint_t *a, mpd_size_t n, int modnum);
+int inv_four_step_fnt(mpd_uint_t *a, mpd_size_t n, int modnum);
+
+
+#endif
diff --git a/Modules/_decimal/libmpdec/io.c b/Modules/_decimal/libmpdec/io.c
new file mode 100644
index 0000000000..2648135bb6
--- /dev/null
+++ b/Modules/_decimal/libmpdec/io.c
@@ -0,0 +1,1575 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <limits.h>
+#include <assert.h>
+#include <errno.h>
+#include <locale.h>
+#include "bits.h"
+#include "constants.h"
+#include "memory.h"
+#include "typearith.h"
+#include "io.h"
+
+
+/* This file contains functions for decimal <-> string conversions, including
+ PEP-3101 formatting for numeric types. */
+
+
+/*
+ * Work around the behavior of tolower() and strcasecmp() in certain
+ * locales. For example, in tr_TR.utf8:
+ *
+ * tolower((unsigned char)'I') == 'I'
+ *
+ * u is the exact uppercase version of l; n is strlen(l) or strlen(l)+1
+ */
+static inline int
+_mpd_strneq(const char *s, const char *l, const char *u, size_t n)
+{
+ while (--n != SIZE_MAX) {
+ if (*s != *l && *s != *u) {
+ return 0;
+ }
+ s++; u++; l++;
+ }
+
+ return 1;
+}
+
+static mpd_ssize_t
+strtoexp(const char *s)
+{
+ char *end;
+ mpd_ssize_t retval;
+
+ errno = 0;
+ retval = mpd_strtossize(s, &end, 10);
+ if (errno == 0 && !(*s != '\0' && *end == '\0'))
+ errno = EINVAL;
+
+ return retval;
+}
+
+/*
+ * Scan 'len' words. The most significant word contains 'r' digits,
+ * the remaining words are full words. Skip dpoint. The string 's' must
+ * consist of digits and an optional single decimal point at 'dpoint'.
+ */
+static void
+string_to_coeff(mpd_uint_t *data, const char *s, const char *dpoint, int r,
+ size_t len)
+{
+ int j;
+
+ if (r > 0) {
+ data[--len] = 0;
+ for (j = 0; j < r; j++, s++) {
+ if (s == dpoint) s++;
+ data[len] = 10 * data[len] + (*s - '0');
+ }
+ }
+
+ while (--len != SIZE_MAX) {
+ data[len] = 0;
+ for (j = 0; j < MPD_RDIGITS; j++, s++) {
+ if (s == dpoint) s++;
+ data[len] = 10 * data[len] + (*s - '0');
+ }
+ }
+}
+
+/*
+ * Partially verify a numeric string of the form:
+ *
+ * [cdigits][.][cdigits][eE][+-][edigits]
+ *
+ * If successful, return a pointer to the location of the first
+ * relevant coefficient digit. This digit is either non-zero or
+ * part of one of the following patterns:
+ *
+ * ["0\x00", "0.\x00", "0.E", "0.e", "0E", "0e"]
+ *
+ * The locations of a single optional dot or indicator are stored
+ * in 'dpoint' and 'exp'.
+ *
+ * The end of the string is stored in 'end'. If an indicator [eE]
+ * occurs without trailing [edigits], the condition is caught
+ * later by strtoexp().
+ */
+static const char *
+scan_dpoint_exp(const char *s, const char **dpoint, const char **exp,
+ const char **end)
+{
+ const char *coeff = NULL;
+
+ *dpoint = NULL;
+ *exp = NULL;
+ for (; *s != '\0'; s++) {
+ switch (*s) {
+ case '.':
+ if (*dpoint != NULL || *exp != NULL)
+ return NULL;
+ *dpoint = s;
+ break;
+ case 'E': case 'e':
+ if (*exp != NULL)
+ return NULL;
+ *exp = s;
+ if (*(s+1) == '+' || *(s+1) == '-')
+ s++;
+ break;
+ default:
+ if (!isdigit((uchar)*s))
+ return NULL;
+ if (coeff == NULL && *exp == NULL) {
+ if (*s == '0') {
+ if (!isdigit((uchar)*(s+1)))
+ if (!(*(s+1) == '.' &&
+ isdigit((uchar)*(s+2))))
+ coeff = s;
+ }
+ else {
+ coeff = s;
+ }
+ }
+ break;
+
+ }
+ }
+
+ *end = s;
+ return coeff;
+}
+
+/* scan the payload of a NaN */
+static const char *
+scan_payload(const char *s, const char **end)
+{
+ const char *coeff;
+
+ while (*s == '0')
+ s++;
+ coeff = s;
+
+ while (isdigit((uchar)*s))
+ s++;
+ *end = s;
+
+ return (*s == '\0') ? coeff : NULL;
+}
+
+/* convert a character string to a decimal */
+void
+mpd_qset_string(mpd_t *dec, const char *s, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_ssize_t q, r, len;
+ const char *coeff, *end;
+ const char *dpoint = NULL, *exp = NULL;
+ size_t digits;
+ uint8_t sign = MPD_POS;
+
+ mpd_set_flags(dec, 0);
+ dec->len = 0;
+ dec->exp = 0;
+
+ /* sign */
+ if (*s == '+') {
+ s++;
+ }
+ else if (*s == '-') {
+ mpd_set_negative(dec);
+ sign = MPD_NEG;
+ s++;
+ }
+
+ if (_mpd_strneq(s, "nan", "NAN", 3)) { /* NaN */
+ s += 3;
+ mpd_setspecial(dec, sign, MPD_NAN);
+ if (*s == '\0')
+ return;
+ /* validate payload: digits only */
+ if ((coeff = scan_payload(s, &end)) == NULL)
+ goto conversion_error;
+ /* payload consists entirely of zeros */
+ if (*coeff == '\0')
+ return;
+ digits = end - coeff;
+ /* prec >= 1, clamp is 0 or 1 */
+ if (digits > (size_t)(ctx->prec-ctx->clamp))
+ goto conversion_error;
+ } /* sNaN */
+ else if (_mpd_strneq(s, "snan", "SNAN", 4)) {
+ s += 4;
+ mpd_setspecial(dec, sign, MPD_SNAN);
+ if (*s == '\0')
+ return;
+ /* validate payload: digits only */
+ if ((coeff = scan_payload(s, &end)) == NULL)
+ goto conversion_error;
+ /* payload consists entirely of zeros */
+ if (*coeff == '\0')
+ return;
+ digits = end - coeff;
+ if (digits > (size_t)(ctx->prec-ctx->clamp))
+ goto conversion_error;
+ }
+ else if (_mpd_strneq(s, "inf", "INF", 3)) {
+ s += 3;
+ if (*s == '\0' || _mpd_strneq(s, "inity", "INITY", 6)) {
+ /* numeric-value: infinity */
+ mpd_setspecial(dec, sign, MPD_INF);
+ return;
+ }
+ goto conversion_error;
+ }
+ else {
+ /* scan for start of coefficient, decimal point, indicator, end */
+ if ((coeff = scan_dpoint_exp(s, &dpoint, &exp, &end)) == NULL)
+ goto conversion_error;
+
+ /* numeric-value: [exponent-part] */
+ if (exp) {
+ /* exponent-part */
+ end = exp; exp++;
+ dec->exp = strtoexp(exp);
+ if (errno) {
+ if (!(errno == ERANGE &&
+ (dec->exp == MPD_SSIZE_MAX ||
+ dec->exp == MPD_SSIZE_MIN)))
+ goto conversion_error;
+ }
+ }
+
+ digits = end - coeff;
+ if (dpoint) {
+ size_t fracdigits = end-dpoint-1;
+ if (dpoint > coeff) digits--;
+
+ if (fracdigits > MPD_MAX_PREC) {
+ goto conversion_error;
+ }
+ if (dec->exp < MPD_SSIZE_MIN+(mpd_ssize_t)fracdigits) {
+ dec->exp = MPD_SSIZE_MIN;
+ }
+ else {
+ dec->exp -= (mpd_ssize_t)fracdigits;
+ }
+ }
+ if (digits > MPD_MAX_PREC) {
+ goto conversion_error;
+ }
+ if (dec->exp > MPD_EXP_INF) {
+ dec->exp = MPD_EXP_INF;
+ }
+ if (dec->exp == MPD_SSIZE_MIN) {
+ dec->exp = MPD_SSIZE_MIN+1;
+ }
+ }
+
+ _mpd_idiv_word(&q, &r, (mpd_ssize_t)digits, MPD_RDIGITS);
+
+ len = (r == 0) ? q : q+1;
+ if (len == 0) {
+ goto conversion_error; /* GCOV_NOT_REACHED */
+ }
+ if (!mpd_qresize(dec, len, status)) {
+ mpd_seterror(dec, MPD_Malloc_error, status);
+ return;
+ }
+ dec->len = len;
+
+ string_to_coeff(dec->data, coeff, dpoint, (int)r, len);
+
+ mpd_setdigits(dec);
+ mpd_qfinalize(dec, ctx, status);
+ return;
+
+conversion_error:
+ /* standard wants a positive NaN */
+ mpd_seterror(dec, MPD_Conversion_syntax, status);
+}
+
+/* Print word x with n decimal digits to string s. dot is either NULL
+ or the location of a decimal point. */
+#define EXTRACT_DIGIT(s, x, d, dot) \
+ if (s == dot) *s++ = '.'; *s++ = '0' + (char)(x / d); x %= d
+static inline char *
+word_to_string(char *s, mpd_uint_t x, int n, char *dot)
+{
+ switch(n) {
+#ifdef CONFIG_64
+ case 20: EXTRACT_DIGIT(s, x, 10000000000000000000ULL, dot); /* GCOV_NOT_REACHED */
+ case 19: EXTRACT_DIGIT(s, x, 1000000000000000000ULL, dot);
+ case 18: EXTRACT_DIGIT(s, x, 100000000000000000ULL, dot);
+ case 17: EXTRACT_DIGIT(s, x, 10000000000000000ULL, dot);
+ case 16: EXTRACT_DIGIT(s, x, 1000000000000000ULL, dot);
+ case 15: EXTRACT_DIGIT(s, x, 100000000000000ULL, dot);
+ case 14: EXTRACT_DIGIT(s, x, 10000000000000ULL, dot);
+ case 13: EXTRACT_DIGIT(s, x, 1000000000000ULL, dot);
+ case 12: EXTRACT_DIGIT(s, x, 100000000000ULL, dot);
+ case 11: EXTRACT_DIGIT(s, x, 10000000000ULL, dot);
+#endif
+ case 10: EXTRACT_DIGIT(s, x, 1000000000UL, dot);
+ case 9: EXTRACT_DIGIT(s, x, 100000000UL, dot);
+ case 8: EXTRACT_DIGIT(s, x, 10000000UL, dot);
+ case 7: EXTRACT_DIGIT(s, x, 1000000UL, dot);
+ case 6: EXTRACT_DIGIT(s, x, 100000UL, dot);
+ case 5: EXTRACT_DIGIT(s, x, 10000UL, dot);
+ case 4: EXTRACT_DIGIT(s, x, 1000UL, dot);
+ case 3: EXTRACT_DIGIT(s, x, 100UL, dot);
+ case 2: EXTRACT_DIGIT(s, x, 10UL, dot);
+ default: if (s == dot) *s++ = '.'; *s++ = '0' + (char)x;
+ }
+
+ *s = '\0';
+ return s;
+}
+
+/* Print exponent x to string s. Undefined for MPD_SSIZE_MIN. */
+static inline char *
+exp_to_string(char *s, mpd_ssize_t x)
+{
+ char sign = '+';
+
+ if (x < 0) {
+ sign = '-';
+ x = -x;
+ }
+ *s++ = sign;
+
+ return word_to_string(s, x, mpd_word_digits(x), NULL);
+}
+
+/* Print the coefficient of dec to string s. len(dec) > 0. */
+static inline char *
+coeff_to_string(char *s, const mpd_t *dec)
+{
+ mpd_uint_t x;
+ mpd_ssize_t i;
+
+ /* most significant word */
+ x = mpd_msword(dec);
+ s = word_to_string(s, x, mpd_word_digits(x), NULL);
+
+ /* remaining full words */
+ for (i=dec->len-2; i >= 0; --i) {
+ x = dec->data[i];
+ s = word_to_string(s, x, MPD_RDIGITS, NULL);
+ }
+
+ return s;
+}
+
+/* Print the coefficient of dec to string s. len(dec) > 0. dot is either
+ NULL or a pointer to the location of a decimal point. */
+static inline char *
+coeff_to_string_dot(char *s, char *dot, const mpd_t *dec)
+{
+ mpd_uint_t x;
+ mpd_ssize_t i;
+
+ /* most significant word */
+ x = mpd_msword(dec);
+ s = word_to_string(s, x, mpd_word_digits(x), dot);
+
+ /* remaining full words */
+ for (i=dec->len-2; i >= 0; --i) {
+ x = dec->data[i];
+ s = word_to_string(s, x, MPD_RDIGITS, dot);
+ }
+
+ return s;
+}
+
+/* Format type */
+#define MPD_FMT_LOWER 0x00000000
+#define MPD_FMT_UPPER 0x00000001
+#define MPD_FMT_TOSCI 0x00000002
+#define MPD_FMT_TOENG 0x00000004
+#define MPD_FMT_EXP 0x00000008
+#define MPD_FMT_FIXED 0x00000010
+#define MPD_FMT_PERCENT 0x00000020
+#define MPD_FMT_SIGN_SPACE 0x00000040
+#define MPD_FMT_SIGN_PLUS 0x00000080
+
+/* Default place of the decimal point for MPD_FMT_TOSCI, MPD_FMT_EXP */
+#define MPD_DEFAULT_DOTPLACE 1
+
+/*
+ * Set *result to the string representation of a decimal. Return the length
+ * of *result, not including the terminating '\0' character.
+ *
+ * Formatting is done according to 'flags'. A return value of -1 with *result
+ * set to NULL indicates MPD_Malloc_error.
+ *
+ * 'dplace' is the default place of the decimal point. It is always set to
+ * MPD_DEFAULT_DOTPLACE except for zeros in combination with MPD_FMT_EXP.
+ */
+static mpd_ssize_t
+_mpd_to_string(char **result, const mpd_t *dec, int flags, mpd_ssize_t dplace)
+{
+ char *decstring = NULL, *cp = NULL;
+ mpd_ssize_t ldigits;
+ mpd_ssize_t mem = 0, k;
+
+ if (mpd_isspecial(dec)) {
+
+ mem = sizeof "-Infinity";
+ if (mpd_isnan(dec) && dec->len > 0) {
+ /* diagnostic code */
+ mem += dec->digits;
+ }
+ cp = decstring = mpd_alloc(mem, sizeof *decstring);
+ if (cp == NULL) {
+ *result = NULL;
+ return -1;
+ }
+
+ if (mpd_isnegative(dec)) {
+ *cp++ = '-';
+ }
+ else if (flags&MPD_FMT_SIGN_SPACE) {
+ *cp++ = ' ';
+ }
+ else if (flags&MPD_FMT_SIGN_PLUS) {
+ *cp++ = '+';
+ }
+
+ if (mpd_isnan(dec)) {
+ if (mpd_isqnan(dec)) {
+ strcpy(cp, "NaN");
+ cp += 3;
+ }
+ else {
+ strcpy(cp, "sNaN");
+ cp += 4;
+ }
+ if (dec->len > 0) { /* diagnostic code */
+ cp = coeff_to_string(cp, dec);
+ }
+ }
+ else if (mpd_isinfinite(dec)) {
+ strcpy(cp, "Infinity");
+ cp += 8;
+ }
+ else { /* debug */
+ abort(); /* GCOV_NOT_REACHED */
+ }
+ }
+ else {
+ assert(dec->len > 0);
+
+ /*
+ * For easier manipulation of the decimal point's location
+ * and the exponent that is finally printed, the number is
+ * rescaled to a virtual representation with exp = 0. Here
+ * ldigits denotes the number of decimal digits to the left
+ * of the decimal point and remains constant once initialized.
+ *
+ * dplace is the location of the decimal point relative to
+ * the start of the coefficient. Note that 3) always holds
+ * when dplace is shifted.
+ *
+ * 1) ldigits := dec->digits - dec->exp
+ * 2) dplace := ldigits (initially)
+ * 3) exp := ldigits - dplace (initially exp = 0)
+ *
+ * 0.00000_.____._____000000.
+ * ^ ^ ^ ^
+ * | | | |
+ * | | | `- dplace >= digits
+ * | | `- dplace in the middle of the coefficient
+ * | ` dplace = 1 (after the first coefficient digit)
+ * `- dplace <= 0
+ */
+
+ ldigits = dec->digits + dec->exp;
+
+ if (flags&MPD_FMT_EXP) {
+ ;
+ }
+ else if (flags&MPD_FMT_FIXED || (dec->exp <= 0 && ldigits > -6)) {
+ /* MPD_FMT_FIXED: always use fixed point notation.
+ * MPD_FMT_TOSCI, MPD_FMT_TOENG: for a certain range,
+ * override exponent notation. */
+ dplace = ldigits;
+ }
+ else if (flags&MPD_FMT_TOENG) {
+ if (mpd_iszero(dec)) {
+ /* If the exponent is divisible by three,
+ * dplace = 1. Otherwise, move dplace one
+ * or two places to the left. */
+ dplace = -1 + mod_mpd_ssize_t(dec->exp+2, 3);
+ }
+ else { /* ldigits-1 is the adjusted exponent, which
+ * should be divisible by three. If not, move
+ * dplace one or two places to the right. */
+ dplace += mod_mpd_ssize_t(ldigits-1, 3);
+ }
+ }
+
+ /*
+ * Basic space requirements:
+ *
+ * [-][.][coeffdigits][E][-][expdigits+1][%]['\0']
+ *
+ * If the decimal point lies outside of the coefficient digits,
+ * space is adjusted accordingly.
+ */
+ if (dplace <= 0) {
+ mem = -dplace + dec->digits + 2;
+ }
+ else if (dplace >= dec->digits) {
+ mem = dplace;
+ }
+ else {
+ mem = dec->digits;
+ }
+ mem += (MPD_EXPDIGITS+1+6);
+
+ cp = decstring = mpd_alloc(mem, sizeof *decstring);
+ if (cp == NULL) {
+ *result = NULL;
+ return -1;
+ }
+
+
+ if (mpd_isnegative(dec)) {
+ *cp++ = '-';
+ }
+ else if (flags&MPD_FMT_SIGN_SPACE) {
+ *cp++ = ' ';
+ }
+ else if (flags&MPD_FMT_SIGN_PLUS) {
+ *cp++ = '+';
+ }
+
+ if (dplace <= 0) {
+ /* space: -dplace+dec->digits+2 */
+ *cp++ = '0';
+ *cp++ = '.';
+ for (k = 0; k < -dplace; k++) {
+ *cp++ = '0';
+ }
+ cp = coeff_to_string(cp, dec);
+ }
+ else if (dplace >= dec->digits) {
+ /* space: dplace */
+ cp = coeff_to_string(cp, dec);
+ for (k = 0; k < dplace-dec->digits; k++) {
+ *cp++ = '0';
+ }
+ }
+ else {
+ /* space: dec->digits+1 */
+ cp = coeff_to_string_dot(cp, cp+dplace, dec);
+ }
+
+ /*
+ * Conditions for printing an exponent:
+ *
+ * MPD_FMT_TOSCI, MPD_FMT_TOENG: only if ldigits != dplace
+ * MPD_FMT_FIXED: never (ldigits == dplace)
+ * MPD_FMT_EXP: always
+ */
+ if (ldigits != dplace || flags&MPD_FMT_EXP) {
+ /* space: expdigits+2 */
+ *cp++ = (flags&MPD_FMT_UPPER) ? 'E' : 'e';
+ cp = exp_to_string(cp, ldigits-dplace);
+ }
+
+ if (flags&MPD_FMT_PERCENT) {
+ *cp++ = '%';
+ }
+ }
+
+ assert(cp < decstring+mem);
+ assert(cp-decstring < MPD_SSIZE_MAX);
+
+ *cp = '\0';
+ *result = decstring;
+ return (mpd_ssize_t)(cp-decstring);
+}
+
+char *
+mpd_to_sci(const mpd_t *dec, int fmt)
+{
+ char *res;
+ int flags = MPD_FMT_TOSCI;
+
+ flags |= fmt ? MPD_FMT_UPPER : MPD_FMT_LOWER;
+ (void)_mpd_to_string(&res, dec, flags, MPD_DEFAULT_DOTPLACE);
+ return res;
+}
+
+char *
+mpd_to_eng(const mpd_t *dec, int fmt)
+{
+ char *res;
+ int flags = MPD_FMT_TOENG;
+
+ flags |= fmt ? MPD_FMT_UPPER : MPD_FMT_LOWER;
+ (void)_mpd_to_string(&res, dec, flags, MPD_DEFAULT_DOTPLACE);
+ return res;
+}
+
+mpd_ssize_t
+mpd_to_sci_size(char **res, const mpd_t *dec, int fmt)
+{
+ int flags = MPD_FMT_TOSCI;
+
+ flags |= fmt ? MPD_FMT_UPPER : MPD_FMT_LOWER;
+ return _mpd_to_string(res, dec, flags, MPD_DEFAULT_DOTPLACE);
+}
+
+mpd_ssize_t
+mpd_to_eng_size(char **res, const mpd_t *dec, int fmt)
+{
+ int flags = MPD_FMT_TOENG;
+
+ flags |= fmt ? MPD_FMT_UPPER : MPD_FMT_LOWER;
+ return _mpd_to_string(res, dec, flags, MPD_DEFAULT_DOTPLACE);
+}
+
+/* Copy a single UTF-8 char to dest. See: The Unicode Standard, version 5.2,
+ chapter 3.9: Well-formed UTF-8 byte sequences. */
+static int
+_mpd_copy_utf8(char dest[5], const char *s)
+{
+ const uchar *cp = (const uchar *)s;
+ uchar lb, ub;
+ int count, i;
+
+
+ if (*cp == 0) {
+ /* empty string */
+ dest[0] = '\0';
+ return 0;
+ }
+ else if (*cp <= 0x7f) {
+ /* ascii */
+ dest[0] = *cp;
+ dest[1] = '\0';
+ return 1;
+ }
+ else if (0xc2 <= *cp && *cp <= 0xdf) {
+ lb = 0x80; ub = 0xbf;
+ count = 2;
+ }
+ else if (*cp == 0xe0) {
+ lb = 0xa0; ub = 0xbf;
+ count = 3;
+ }
+ else if (*cp <= 0xec) {
+ lb = 0x80; ub = 0xbf;
+ count = 3;
+ }
+ else if (*cp == 0xed) {
+ lb = 0x80; ub = 0x9f;
+ count = 3;
+ }
+ else if (*cp <= 0xef) {
+ lb = 0x80; ub = 0xbf;
+ count = 3;
+ }
+ else if (*cp == 0xf0) {
+ lb = 0x90; ub = 0xbf;
+ count = 4;
+ }
+ else if (*cp <= 0xf3) {
+ lb = 0x80; ub = 0xbf;
+ count = 4;
+ }
+ else if (*cp == 0xf4) {
+ lb = 0x80; ub = 0x8f;
+ count = 4;
+ }
+ else {
+ /* invalid */
+ goto error;
+ }
+
+ dest[0] = *cp++;
+ if (*cp < lb || ub < *cp) {
+ goto error;
+ }
+ dest[1] = *cp++;
+ for (i = 2; i < count; i++) {
+ if (*cp < 0x80 || 0xbf < *cp) {
+ goto error;
+ }
+ dest[i] = *cp++;
+ }
+ dest[i] = '\0';
+
+ return count;
+
+error:
+ dest[0] = '\0';
+ return -1;
+}
+
+int
+mpd_validate_lconv(mpd_spec_t *spec)
+{
+ size_t n;
+#if CHAR_MAX == SCHAR_MAX
+ const char *cp = spec->grouping;
+ while (*cp != '\0') {
+ if (*cp++ < 0) {
+ return -1;
+ }
+ }
+#endif
+ n = strlen(spec->dot);
+ if (n == 0 || n > 4) {
+ return -1;
+ }
+ if (strlen(spec->sep) > 4) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+mpd_parse_fmt_str(mpd_spec_t *spec, const char *fmt, int caps)
+{
+ char *cp = (char *)fmt;
+ int have_align = 0, n;
+
+ /* defaults */
+ spec->min_width = 0;
+ spec->prec = -1;
+ spec->type = caps ? 'G' : 'g';
+ spec->align = '>';
+ spec->sign = '-';
+ spec->dot = "";
+ spec->sep = "";
+ spec->grouping = "";
+
+
+ /* presume that the first character is a UTF-8 fill character */
+ if ((n = _mpd_copy_utf8(spec->fill, cp)) < 0) {
+ return 0;
+ }
+
+ /* alignment directive, prefixed by a fill character */
+ if (*cp && (*(cp+n) == '<' || *(cp+n) == '>' ||
+ *(cp+n) == '=' || *(cp+n) == '^')) {
+ cp += n;
+ spec->align = *cp++;
+ have_align = 1;
+ } /* alignment directive */
+ else {
+ /* default fill character */
+ spec->fill[0] = ' ';
+ spec->fill[1] = '\0';
+ if (*cp == '<' || *cp == '>' ||
+ *cp == '=' || *cp == '^') {
+ spec->align = *cp++;
+ have_align = 1;
+ }
+ }
+
+ /* sign formatting */
+ if (*cp == '+' || *cp == '-' || *cp == ' ') {
+ spec->sign = *cp++;
+ }
+
+ /* zero padding */
+ if (*cp == '0') {
+ /* zero padding implies alignment, which should not be
+ * specified twice. */
+ if (have_align) {
+ return 0;
+ }
+ spec->align = 'z';
+ spec->fill[0] = *cp++;
+ spec->fill[1] = '\0';
+ }
+
+ /* minimum width */
+ if (isdigit((uchar)*cp)) {
+ if (*cp == '0') {
+ return 0;
+ }
+ errno = 0;
+ spec->min_width = mpd_strtossize(cp, &cp, 10);
+ if (errno == ERANGE || errno == EINVAL) {
+ return 0;
+ }
+ }
+
+ /* thousands separator */
+ if (*cp == ',') {
+ spec->dot = ".";
+ spec->sep = ",";
+ spec->grouping = "\003\003";
+ cp++;
+ }
+
+ /* fraction digits or significant digits */
+ if (*cp == '.') {
+ cp++;
+ if (!isdigit((uchar)*cp)) {
+ return 0;
+ }
+ errno = 0;
+ spec->prec = mpd_strtossize(cp, &cp, 10);
+ if (errno == ERANGE || errno == EINVAL) {
+ return 0;
+ }
+ }
+
+ /* type */
+ if (*cp == 'E' || *cp == 'e' || *cp == 'F' || *cp == 'f' ||
+ *cp == 'G' || *cp == 'g' || *cp == '%') {
+ spec->type = *cp++;
+ }
+ else if (*cp == 'N' || *cp == 'n') {
+ /* locale specific conversion */
+ struct lconv *lc;
+ /* separator has already been specified */
+ if (*spec->sep) {
+ return 0;
+ }
+ spec->type = *cp++;
+ spec->type = (spec->type == 'N') ? 'G' : 'g';
+ lc = localeconv();
+ spec->dot = lc->decimal_point;
+ spec->sep = lc->thousands_sep;
+ spec->grouping = lc->grouping;
+ if (mpd_validate_lconv(spec) < 0) {
+ return 0; /* GCOV_NOT_REACHED */
+ }
+ }
+
+ /* check correctness */
+ if (*cp != '\0') {
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * The following functions assume that spec->min_width <= MPD_MAX_PREC, which
+ * is made sure in mpd_qformat_spec. Then, even with a spec that inserts a
+ * four-byte separator after each digit, nbytes in the following struct
+ * cannot overflow.
+ */
+
+/* Multibyte string */
+typedef struct {
+ mpd_ssize_t nbytes; /* length in bytes */
+ mpd_ssize_t nchars; /* length in chars */
+ mpd_ssize_t cur; /* current write index */
+ char *data;
+} mpd_mbstr_t;
+
+static inline void
+_mpd_bcopy(char *dest, const char *src, mpd_ssize_t n)
+{
+ while (--n >= 0) {
+ dest[n] = src[n];
+ }
+}
+
+static inline void
+_mbstr_copy_char(mpd_mbstr_t *dest, const char *src, mpd_ssize_t n)
+{
+ dest->nbytes += n;
+ dest->nchars += (n > 0 ? 1 : 0);
+ dest->cur -= n;
+
+ if (dest->data != NULL) {
+ _mpd_bcopy(dest->data+dest->cur, src, n);
+ }
+}
+
+static inline void
+_mbstr_copy_ascii(mpd_mbstr_t *dest, const char *src, mpd_ssize_t n)
+{
+ dest->nbytes += n;
+ dest->nchars += n;
+ dest->cur -= n;
+
+ if (dest->data != NULL) {
+ _mpd_bcopy(dest->data+dest->cur, src, n);
+ }
+}
+
+static inline void
+_mbstr_copy_pad(mpd_mbstr_t *dest, mpd_ssize_t n)
+{
+ dest->nbytes += n;
+ dest->nchars += n;
+ dest->cur -= n;
+
+ if (dest->data != NULL) {
+ char *cp = dest->data + dest->cur;
+ while (--n >= 0) {
+ cp[n] = '0';
+ }
+ }
+}
+
+/*
+ * Copy a numeric string to dest->data, adding separators in the integer
+ * part according to spec->grouping. If leading zero padding is enabled
+ * and the result is smaller than spec->min_width, continue adding zeros
+ * and separators until the minimum width is reached.
+ *
+ * The final length of dest->data is stored in dest->nbytes. The number
+ * of UTF-8 characters is stored in dest->nchars.
+ *
+ * First run (dest->data == NULL): determine the length of the result
+ * string and store it in dest->nbytes.
+ *
+ * Second run (write to dest->data): data is written in chunks and in
+ * reverse order, starting with the rest of the numeric string.
+ */
+static void
+_mpd_add_sep_dot(mpd_mbstr_t *dest,
+ const char *sign, /* location of optional sign */
+ const char *src, mpd_ssize_t n_src, /* integer part and length */
+ const char *dot, /* location of optional decimal point */
+ const char *rest, mpd_ssize_t n_rest, /* remaining part and length */
+ const mpd_spec_t *spec)
+{
+ mpd_ssize_t n_sep, n_sign, consume;
+ const char *g;
+ int pad = 0;
+
+ n_sign = sign ? 1 : 0;
+ n_sep = (mpd_ssize_t)strlen(spec->sep);
+ /* Initial write index: set to location of '\0' in the output string.
+ * Irrelevant for the first run. */
+ dest->cur = dest->nbytes;
+ dest->nbytes = dest->nchars = 0;
+
+ _mbstr_copy_ascii(dest, rest, n_rest);
+
+ if (dot) {
+ _mbstr_copy_char(dest, dot, (mpd_ssize_t)strlen(dot));
+ }
+
+ g = spec->grouping;
+ consume = *g;
+ while (1) {
+ /* If the group length is 0 or CHAR_MAX or greater than the
+ * number of source bytes, consume all remaining bytes. */
+ if (*g == 0 || *g == CHAR_MAX || consume > n_src) {
+ consume = n_src;
+ }
+ n_src -= consume;
+ if (pad) {
+ _mbstr_copy_pad(dest, consume);
+ }
+ else {
+ _mbstr_copy_ascii(dest, src+n_src, consume);
+ }
+
+ if (n_src == 0) {
+ /* Either the real source of intpart digits or the virtual
+ * source of padding zeros is exhausted. */
+ if (spec->align == 'z' &&
+ dest->nchars + n_sign < spec->min_width) {
+ /* Zero padding is set and length < min_width:
+ * Generate n_src additional characters. */
+ n_src = spec->min_width - (dest->nchars + n_sign);
+ /* Next iteration:
+ * case *g == 0 || *g == CHAR_MAX:
+ * consume all padding characters
+ * case consume < g*:
+ * fill remainder of current group
+ * case consume == g*
+ * copying is a no-op */
+ consume = *g - consume;
+ /* Switch on virtual source of zeros. */
+ pad = 1;
+ continue;
+ }
+ break;
+ }
+
+ if (n_sep > 0) {
+ /* If padding is switched on, separators are counted
+ * as padding characters. This rule does not apply if
+ * the separator would be the first character of the
+ * result string. */
+ if (pad && n_src > 1) n_src -= 1;
+ _mbstr_copy_char(dest, spec->sep, n_sep);
+ }
+
+ /* If non-NUL, use the next value for grouping. */
+ if (*g && *(g+1)) g++;
+ consume = *g;
+ }
+
+ if (sign) {
+ _mbstr_copy_ascii(dest, sign, 1);
+ }
+
+ if (dest->data) {
+ dest->data[dest->nbytes] = '\0';
+ }
+}
+
+/*
+ * Convert a numeric-string to its locale-specific appearance.
+ * The string must have one of these forms:
+ *
+ * 1) [sign] digits [exponent-part]
+ * 2) [sign] digits '.' [digits] [exponent-part]
+ *
+ * Not allowed, since _mpd_to_string() never returns this form:
+ *
+ * 3) [sign] '.' digits [exponent-part]
+ *
+ * Input: result->data := original numeric string (ASCII)
+ * result->bytes := strlen(result->data)
+ * result->nchars := strlen(result->data)
+ *
+ * Output: result->data := modified or original string
+ * result->bytes := strlen(result->data)
+ * result->nchars := number of characters (possibly UTF-8)
+ */
+static int
+_mpd_apply_lconv(mpd_mbstr_t *result, const mpd_spec_t *spec, uint32_t *status)
+{
+ const char *sign = NULL, *intpart = NULL, *dot = NULL;
+ const char *rest, *dp;
+ char *decstring;
+ mpd_ssize_t n_int, n_rest;
+
+ /* original numeric string */
+ dp = result->data;
+
+ /* sign */
+ if (*dp == '+' || *dp == '-' || *dp == ' ') {
+ sign = dp++;
+ }
+ /* integer part */
+ assert(isdigit((uchar)*dp));
+ intpart = dp++;
+ while (isdigit((uchar)*dp)) {
+ dp++;
+ }
+ n_int = (mpd_ssize_t)(dp-intpart);
+ /* decimal point */
+ if (*dp == '.') {
+ dp++; dot = spec->dot;
+ }
+ /* rest */
+ rest = dp;
+ n_rest = result->nbytes - (mpd_ssize_t)(dp-result->data);
+
+ if (dot == NULL && (*spec->sep == '\0' || *spec->grouping == '\0')) {
+ /* _mpd_add_sep_dot() would not change anything */
+ return 1;
+ }
+
+ /* Determine the size of the new decimal string after inserting the
+ * decimal point, optional separators and optional padding. */
+ decstring = result->data;
+ result->data = NULL;
+ _mpd_add_sep_dot(result, sign, intpart, n_int, dot,
+ rest, n_rest, spec);
+
+ result->data = mpd_alloc(result->nbytes+1, 1);
+ if (result->data == NULL) {
+ *status |= MPD_Malloc_error;
+ mpd_free(decstring);
+ return 0;
+ }
+
+ /* Perform actual writes. */
+ _mpd_add_sep_dot(result, sign, intpart, n_int, dot,
+ rest, n_rest, spec);
+
+ mpd_free(decstring);
+ return 1;
+}
+
+/* Add padding to the formatted string if necessary. */
+static int
+_mpd_add_pad(mpd_mbstr_t *result, const mpd_spec_t *spec, uint32_t *status)
+{
+ if (result->nchars < spec->min_width) {
+ mpd_ssize_t add_chars, add_bytes;
+ size_t lpad = 0, rpad = 0;
+ size_t n_fill, len, i, j;
+ char align = spec->align;
+ uint8_t err = 0;
+ char *cp;
+
+ n_fill = strlen(spec->fill);
+ add_chars = (spec->min_width - result->nchars);
+ /* max value: MPD_MAX_PREC * 4 */
+ add_bytes = add_chars * (mpd_ssize_t)n_fill;
+
+ cp = result->data = mpd_realloc(result->data,
+ result->nbytes+add_bytes+1,
+ sizeof *result->data, &err);
+ if (err) {
+ *status |= MPD_Malloc_error;
+ mpd_free(result->data);
+ return 0;
+ }
+
+ if (align == 'z') {
+ align = '=';
+ }
+
+ if (align == '<') {
+ rpad = add_chars;
+ }
+ else if (align == '>' || align == '=') {
+ lpad = add_chars;
+ }
+ else { /* align == '^' */
+ lpad = add_chars/2;
+ rpad = add_chars-lpad;
+ }
+
+ len = result->nbytes;
+ if (align == '=' && (*cp == '-' || *cp == '+' || *cp == ' ')) {
+ /* leave sign in the leading position */
+ cp++; len--;
+ }
+
+ memmove(cp+n_fill*lpad, cp, len);
+ for (i = 0; i < lpad; i++) {
+ for (j = 0; j < n_fill; j++) {
+ cp[i*n_fill+j] = spec->fill[j];
+ }
+ }
+ cp += (n_fill*lpad + len);
+ for (i = 0; i < rpad; i++) {
+ for (j = 0; j < n_fill; j++) {
+ cp[i*n_fill+j] = spec->fill[j];
+ }
+ }
+
+ result->nbytes += add_bytes;
+ result->nchars += add_chars;
+ result->data[result->nbytes] = '\0';
+ }
+
+ return 1;
+}
+
+/* Round a number to prec digits. The adjusted exponent stays the same
+ or increases by one if rounding up crosses a power of ten boundary.
+ If result->digits would exceed MPD_MAX_PREC+1, MPD_Invalid_operation
+ is set and the result is NaN. */
+static inline void
+_mpd_round(mpd_t *result, const mpd_t *a, mpd_ssize_t prec,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_ssize_t exp = a->exp + a->digits - prec;
+
+ if (prec <= 0) {
+ mpd_seterror(result, MPD_Invalid_operation, status); /* GCOV_NOT_REACHED */
+ return; /* GCOV_NOT_REACHED */
+ }
+ if (mpd_isspecial(a) || mpd_iszero(a)) {
+ mpd_qcopy(result, a, status); /* GCOV_NOT_REACHED */
+ return; /* GCOV_NOT_REACHED */
+ }
+
+ mpd_qrescale_fmt(result, a, exp, ctx, status);
+ if (result->digits > prec) {
+ mpd_qrescale_fmt(result, result, exp+1, ctx, status);
+ }
+}
+
+/*
+ * Return the string representation of an mpd_t, formatted according to 'spec'.
+ * The format specification is assumed to be valid. Memory errors are indicated
+ * as usual. This function is quiet.
+ */
+char *
+mpd_qformat_spec(const mpd_t *dec, const mpd_spec_t *spec,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_uint_t dt[MPD_MINALLOC_MAX];
+ mpd_t tmp = {MPD_STATIC|MPD_STATIC_DATA,0,0,0,MPD_MINALLOC_MAX,dt};
+ mpd_ssize_t dplace = MPD_DEFAULT_DOTPLACE;
+ mpd_mbstr_t result;
+ mpd_spec_t stackspec;
+ char type = spec->type;
+ int flags = 0;
+
+
+ if (spec->min_width > MPD_MAX_PREC) {
+ *status |= MPD_Invalid_operation;
+ return NULL;
+ }
+
+ if (isupper((uchar)type)) {
+ type = tolower((uchar)type);
+ flags |= MPD_FMT_UPPER;
+ }
+ if (spec->sign == ' ') {
+ flags |= MPD_FMT_SIGN_SPACE;
+ }
+ else if (spec->sign == '+') {
+ flags |= MPD_FMT_SIGN_PLUS;
+ }
+
+ if (mpd_isspecial(dec)) {
+ if (spec->align == 'z') {
+ stackspec = *spec;
+ stackspec.fill[0] = ' ';
+ stackspec.fill[1] = '\0';
+ stackspec.align = '>';
+ spec = &stackspec;
+ }
+ }
+ else {
+ uint32_t workstatus = 0;
+ mpd_ssize_t prec;
+
+ switch (type) {
+ case 'g': flags |= MPD_FMT_TOSCI; break;
+ case 'e': flags |= MPD_FMT_EXP; break;
+ case '%': flags |= MPD_FMT_PERCENT;
+ if (!mpd_qcopy(&tmp, dec, status)) {
+ return NULL;
+ }
+ tmp.exp += 2;
+ dec = &tmp;
+ type = 'f'; /* fall through */
+ case 'f': flags |= MPD_FMT_FIXED; break;
+ default: abort(); /* debug: GCOV_NOT_REACHED */
+ }
+
+ if (spec->prec >= 0) {
+ if (spec->prec > MPD_MAX_PREC) {
+ *status |= MPD_Invalid_operation;
+ goto error;
+ }
+
+ switch (type) {
+ case 'g':
+ prec = (spec->prec == 0) ? 1 : spec->prec;
+ if (dec->digits > prec) {
+ _mpd_round(&tmp, dec, prec, ctx,
+ &workstatus);
+ dec = &tmp;
+ }
+ break;
+ case 'e':
+ if (mpd_iszero(dec)) {
+ dplace = 1-spec->prec;
+ }
+ else {
+ _mpd_round(&tmp, dec, spec->prec+1, ctx,
+ &workstatus);
+ dec = &tmp;
+ }
+ break;
+ case 'f':
+ mpd_qrescale(&tmp, dec, -spec->prec, ctx,
+ &workstatus);
+ dec = &tmp;
+ break;
+ }
+ }
+
+ if (type == 'f') {
+ if (mpd_iszero(dec) && dec->exp > 0) {
+ mpd_qrescale(&tmp, dec, 0, ctx, &workstatus);
+ dec = &tmp;
+ }
+ }
+
+ if (workstatus&MPD_Errors) {
+ *status |= (workstatus&MPD_Errors);
+ goto error;
+ }
+ }
+
+ /*
+ * At this point, for all scaled or non-scaled decimals:
+ * 1) 1 <= digits <= MAX_PREC+1
+ * 2) adjexp(scaled) = adjexp(orig) [+1]
+ * 3) case 'g': MIN_ETINY <= exp <= MAX_EMAX+1
+ * case 'e': MIN_ETINY-MAX_PREC <= exp <= MAX_EMAX+1
+ * case 'f': MIN_ETINY <= exp <= MAX_EMAX+1
+ * 4) max memory alloc in _mpd_to_string:
+ * case 'g': MAX_PREC+36
+ * case 'e': MAX_PREC+36
+ * case 'f': 2*MPD_MAX_PREC+30
+ */
+ result.nbytes = _mpd_to_string(&result.data, dec, flags, dplace);
+ result.nchars = result.nbytes;
+ if (result.nbytes < 0) {
+ *status |= MPD_Malloc_error;
+ goto error;
+ }
+
+ if (*spec->dot != '\0' && !mpd_isspecial(dec)) {
+ if (result.nchars > MPD_MAX_PREC+36) {
+ /* Since a group length of one is not explicitly
+ * disallowed, ensure that it is always possible to
+ * insert a four byte separator after each digit. */
+ *status |= MPD_Invalid_operation;
+ mpd_free(result.data);
+ goto error;
+ }
+ if (!_mpd_apply_lconv(&result, spec, status)) {
+ goto error;
+ }
+ }
+
+ if (spec->min_width) {
+ if (!_mpd_add_pad(&result, spec, status)) {
+ goto error;
+ }
+ }
+
+ mpd_del(&tmp);
+ return result.data;
+
+error:
+ mpd_del(&tmp);
+ return NULL;
+}
+
+char *
+mpd_qformat(const mpd_t *dec, const char *fmt, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_spec_t spec;
+
+ if (!mpd_parse_fmt_str(&spec, fmt, 1)) {
+ *status |= MPD_Invalid_operation;
+ return NULL;
+ }
+
+ return mpd_qformat_spec(dec, &spec, ctx, status);
+}
+
+/*
+ * The specification has a *condition* called Invalid_operation and an
+ * IEEE *signal* called Invalid_operation. The former corresponds to
+ * MPD_Invalid_operation, the latter to MPD_IEEE_Invalid_operation.
+ * MPD_IEEE_Invalid_operation comprises the following conditions:
+ *
+ * [MPD_Conversion_syntax, MPD_Division_impossible, MPD_Division_undefined,
+ * MPD_Fpu_error, MPD_Invalid_context, MPD_Invalid_operation,
+ * MPD_Malloc_error]
+ *
+ * In the following functions, 'flag' denotes the condition, 'signal'
+ * denotes the IEEE signal.
+ */
+
+static const char *mpd_flag_string[MPD_NUM_FLAGS] = {
+ "Clamped",
+ "Conversion_syntax",
+ "Division_by_zero",
+ "Division_impossible",
+ "Division_undefined",
+ "Fpu_error",
+ "Inexact",
+ "Invalid_context",
+ "Invalid_operation",
+ "Malloc_error",
+ "Not_implemented",
+ "Overflow",
+ "Rounded",
+ "Subnormal",
+ "Underflow",
+};
+
+static const char *mpd_signal_string[MPD_NUM_FLAGS] = {
+ "Clamped",
+ "IEEE_Invalid_operation",
+ "Division_by_zero",
+ "IEEE_Invalid_operation",
+ "IEEE_Invalid_operation",
+ "IEEE_Invalid_operation",
+ "Inexact",
+ "IEEE_Invalid_operation",
+ "IEEE_Invalid_operation",
+ "IEEE_Invalid_operation",
+ "Not_implemented",
+ "Overflow",
+ "Rounded",
+ "Subnormal",
+ "Underflow",
+};
+
+/* print conditions to buffer, separated by spaces */
+int
+mpd_snprint_flags(char *dest, int nmemb, uint32_t flags)
+{
+ char *cp;
+ int n, j;
+
+ assert(nmemb >= MPD_MAX_FLAG_STRING);
+
+ *dest = '\0'; cp = dest;
+ for (j = 0; j < MPD_NUM_FLAGS; j++) {
+ if (flags & (1U<<j)) {
+ n = snprintf(cp, nmemb, "%s ", mpd_flag_string[j]);
+ if (n < 0 || n >= nmemb) return -1;
+ cp += n; nmemb -= n;
+ }
+ }
+
+ if (cp != dest) {
+ *(--cp) = '\0';
+ }
+
+ return (int)(cp-dest);
+}
+
+/* print conditions to buffer, in list form */
+int
+mpd_lsnprint_flags(char *dest, int nmemb, uint32_t flags, const char *flag_string[])
+{
+ char *cp;
+ int n, j;
+
+ assert(nmemb >= MPD_MAX_FLAG_LIST);
+ if (flag_string == NULL) {
+ flag_string = mpd_flag_string;
+ }
+
+ *dest = '[';
+ *(dest+1) = '\0';
+ cp = dest+1;
+ --nmemb;
+
+ for (j = 0; j < MPD_NUM_FLAGS; j++) {
+ if (flags & (1U<<j)) {
+ n = snprintf(cp, nmemb, "%s, ", flag_string[j]);
+ if (n < 0 || n >= nmemb) return -1;
+ cp += n; nmemb -= n;
+ }
+ }
+
+ /* erase the last ", " */
+ if (cp != dest+1) {
+ cp -= 2;
+ }
+
+ *cp++ = ']';
+ *cp = '\0';
+
+ return (int)(cp-dest); /* strlen, without NUL terminator */
+}
+
+/* print signals to buffer, in list form */
+int
+mpd_lsnprint_signals(char *dest, int nmemb, uint32_t flags, const char *signal_string[])
+{
+ char *cp;
+ int n, j;
+ int ieee_invalid_done = 0;
+
+ assert(nmemb >= MPD_MAX_SIGNAL_LIST);
+ if (signal_string == NULL) {
+ signal_string = mpd_signal_string;
+ }
+
+ *dest = '[';
+ *(dest+1) = '\0';
+ cp = dest+1;
+ --nmemb;
+
+ for (j = 0; j < MPD_NUM_FLAGS; j++) {
+ uint32_t f = flags & (1U<<j);
+ if (f) {
+ if (f&MPD_IEEE_Invalid_operation) {
+ if (ieee_invalid_done) {
+ continue;
+ }
+ ieee_invalid_done = 1;
+ }
+ n = snprintf(cp, nmemb, "%s, ", signal_string[j]);
+ if (n < 0 || n >= nmemb) return -1;
+ cp += n; nmemb -= n;
+ }
+ }
+
+ /* erase the last ", " */
+ if (cp != dest+1) {
+ cp -= 2;
+ }
+
+ *cp++ = ']';
+ *cp = '\0';
+
+ return (int)(cp-dest); /* strlen, without NUL terminator */
+}
+
+/* The following two functions are mainly intended for debugging. */
+void
+mpd_fprint(FILE *file, const mpd_t *dec)
+{
+ char *decstring;
+
+ decstring = mpd_to_sci(dec, 1);
+ if (decstring != NULL) {
+ fprintf(file, "%s\n", decstring);
+ mpd_free(decstring);
+ }
+ else {
+ fputs("mpd_fprint: output error\n", file); /* GCOV_NOT_REACHED */
+ }
+}
+
+void
+mpd_print(const mpd_t *dec)
+{
+ char *decstring;
+
+ decstring = mpd_to_sci(dec, 1);
+ if (decstring != NULL) {
+ printf("%s\n", decstring);
+ mpd_free(decstring);
+ }
+ else {
+ fputs("mpd_fprint: output error\n", stderr); /* GCOV_NOT_REACHED */
+ }
+}
+
+
diff --git a/Modules/_decimal/libmpdec/io.h b/Modules/_decimal/libmpdec/io.h
new file mode 100644
index 0000000000..3dfce732aa
--- /dev/null
+++ b/Modules/_decimal/libmpdec/io.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef IO_H
+#define IO_H
+
+
+#include <errno.h>
+#include "mpdecimal.h"
+
+
+#if SIZE_MAX == MPD_SIZE_MAX
+ #define mpd_strtossize _mpd_strtossize
+#else
+static inline mpd_ssize_t
+mpd_strtossize(const char *s, char **end, int base)
+{
+ int64_t retval;
+
+ errno = 0;
+ retval = _mpd_strtossize(s, end, base);
+ if (errno == 0 && (retval > MPD_SSIZE_MAX || retval < MPD_SSIZE_MIN)) {
+ errno = ERANGE;
+ }
+ if (errno == ERANGE) {
+ return (retval < 0) ? MPD_SSIZE_MIN : MPD_SSIZE_MAX;
+ }
+
+ return (mpd_ssize_t)retval;
+}
+#endif
+
+
+#endif
diff --git a/Modules/_decimal/libmpdec/literature/REFERENCES.txt b/Modules/_decimal/libmpdec/literature/REFERENCES.txt
new file mode 100644
index 0000000000..9ed5782656
--- /dev/null
+++ b/Modules/_decimal/libmpdec/literature/REFERENCES.txt
@@ -0,0 +1,51 @@
+
+
+This document contains links to the literature used in the process of
+creating the library. The list is probably not complete.
+
+
+Mike Cowlishaw: General Decimal Arithmetic Specification
+http://speleotrove.com/decimal/decarith.html
+
+
+Jean-Michel Muller: On the definition of ulp (x)
+lara.inist.fr/bitstream/2332/518/1/LIP-RR2005-09.pdf
+
+
+T. E. Hull, A. Abrham: Properly rounded variable precision square root
+http://portal.acm.org/citation.cfm?id=214413
+
+
+T. E. Hull, A. Abrham: Variable precision exponential function
+http://portal.acm.org/citation.cfm?id=6498
+
+
+Roman E. Maeder: Storage allocation for the Karatsuba integer multiplication
+algorithm. http://www.springerlink.com/content/w15058mj6v59t565/
+
+
+J. M. Pollard: The fast Fourier transform in a finite field
+http://www.ams.org/journals/mcom/1971-25-114/S0025-5718-1971-0301966-0/home.html
+
+
+David H. Bailey: FFTs in External or Hierarchical Memory
+http://crd.lbl.gov/~dhbailey/dhbpapers/
+
+
+W. Morven Gentleman: Matrix Multiplication and Fast Fourier Transforms
+http://www.alcatel-lucent.com/bstj/vol47-1968/articles/bstj47-6-1099.pdf
+
+
+Mikko Tommila: Apfloat documentation
+http://www.apfloat.org/apfloat/2.41/apfloat.pdf
+
+
+Joerg Arndt: "Matters Computational"
+http://www.jjj.de/fxt/
+
+
+Karl Hasselstrom: Fast Division of Large Integers
+www.treskal.com/kalle/exjobb/original-report.pdf
+
+
+
diff --git a/Modules/_decimal/libmpdec/literature/bignum.txt b/Modules/_decimal/libmpdec/literature/bignum.txt
new file mode 100644
index 0000000000..8a8731da9b
--- /dev/null
+++ b/Modules/_decimal/libmpdec/literature/bignum.txt
@@ -0,0 +1,83 @@
+
+
+Bignum support (Fast Number Theoretic Transform or FNT):
+========================================================
+
+Bignum arithmetic in libmpdec uses the scheme for fast convolution
+of integer sequences from:
+
+J. M. Pollard: The fast Fourier transform in a finite field
+http://www.ams.org/journals/mcom/1971-25-114/S0025-5718-1971-0301966-0/home.html
+
+
+The transform in a finite field can be used for convolution in the same
+way as the Fourier Transform. The main advantages of the Number Theoretic
+Transform are that it is both exact and very memory efficient.
+
+
+Convolution in pseudo-code:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ fnt_convolute(a, b):
+ x = fnt(a) # forward transform of a
+ y = fnt(b) # forward transform of b
+ z = pairwise multiply x[i] and y[i]
+ result = inv_fnt(z) # backward transform of z.
+
+
+Extending the maximum transform length (Chinese Remainder Theorem):
+-------------------------------------------------------------------
+
+The maximum transform length is quite limited when using a single
+prime field. However, it is possible to use multiple primes and
+recover the result using the Chinese Remainder Theorem.
+
+
+Multiplication in pseudo-code:
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ _mpd_fntmul(u, v):
+ c1 = fnt_convolute(u, v, P1) # convolute modulo prime1
+ c2 = fnt_convolute(u, v, P2) # convolute modulo prime2
+ c3 = fnt_convolute(u, v, P3) # convolute modulo prime3
+ result = crt3(c1, c2, c3) # Chinese Remainder Theorem
+
+
+Optimized transform functions:
+------------------------------
+
+There are three different fnt() functions:
+
+ std_fnt: "standard" decimation in frequency transform for array lengths
+ of 2**n. Performs well up to 1024 words.
+
+ sixstep: Cache-friendly algorithm for array lengths of 2**n. Outperforms
+ std_fnt for large arrays.
+
+ fourstep: Algorithm for array lengths of 3 * 2**n. Also cache friendly
+ in large parts.
+
+
+List of bignum-only files:
+--------------------------
+
+Functions from these files are only used in _mpd_fntmul().
+
+ umodarith.h -> fast low level routines for unsigned modular arithmetic
+ numbertheory.c -> routines for setting up the FNT
+ difradix2.c -> decimation in frequency transform, used as the
+ "base case" by the following three files:
+
+ fnt.c -> standard transform for smaller arrays
+ sixstep.c -> transform large arrays of length 2**n
+ fourstep.c -> transform arrays of length 3 * 2**n
+
+ convolute.c -> do the actual fast convolution, using one of
+ the three transform functions.
+ transpose.c -> transpositions needed for the sixstep algorithm.
+ crt.c -> Chinese Remainder Theorem: use information from three
+ transforms modulo three different primes to get the
+ final result.
+
+
+
diff --git a/Modules/_decimal/libmpdec/literature/fnt.py b/Modules/_decimal/libmpdec/literature/fnt.py
new file mode 100644
index 0000000000..bf937459f5
--- /dev/null
+++ b/Modules/_decimal/libmpdec/literature/fnt.py
@@ -0,0 +1,208 @@
+#
+# Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+
+######################################################################
+# This file lists and checks some of the constants and limits used #
+# in libmpdec's Number Theoretic Transform. At the end of the file #
+# there is an example function for the plain DFT transform. #
+######################################################################
+
+
+#
+# Number theoretic transforms are done in subfields of F(p). P[i]
+# are the primes, D[i] = P[i] - 1 are highly composite and w[i]
+# are the respective primitive roots of F(p).
+#
+# The strategy is to convolute two coefficients modulo all three
+# primes, then use the Chinese Remainder Theorem on the three
+# result arrays to recover the result in the usual base RADIX
+# form.
+#
+
+# ======================================================================
+# Primitive roots
+# ======================================================================
+
+#
+# Verify primitive roots:
+#
+# For a prime field, r is a primitive root if and only if for all prime
+# factors f of p-1, r**((p-1)/f) =/= 1 (mod p).
+#
+def prod(F, E):
+ """Check that the factorization of P-1 is correct. F is the list of
+ factors of P-1, E lists the number of occurrences of each factor."""
+ x = 1
+ for y, z in zip(F, E):
+ x *= y**z
+ return x
+
+def is_primitive_root(r, p, factors, exponents):
+ """Check if r is a primitive root of F(p)."""
+ if p != prod(factors, exponents) + 1:
+ return False
+ for f in factors:
+ q, control = divmod(p-1, f)
+ if control != 0:
+ return False
+ if pow(r, q, p) == 1:
+ return False
+ return True
+
+
+# =================================================================
+# Constants and limits for the 64-bit version
+# =================================================================
+
+RADIX = 10**19
+
+# Primes P1, P2 and P3:
+P = [2**64-2**32+1, 2**64-2**34+1, 2**64-2**40+1]
+
+# P-1, highly composite. The transform length d is variable and
+# must divide D = P-1. Since all D are divisible by 3 * 2**32,
+# transform lengths can be 2**n or 3 * 2**n (where n <= 32).
+D = [2**32 * 3 * (5 * 17 * 257 * 65537),
+ 2**34 * 3**2 * (7 * 11 * 31 * 151 * 331),
+ 2**40 * 3**2 * (5 * 7 * 13 * 17 * 241)]
+
+# Prime factors of P-1 and their exponents:
+F = [(2,3,5,17,257,65537), (2,3,7,11,31,151,331), (2,3,5,7,13,17,241)]
+E = [(32,1,1,1,1,1), (34,2,1,1,1,1,1), (40,2,1,1,1,1,1)]
+
+# Maximum transform length for 2**n. Above that only 3 * 2**31
+# or 3 * 2**32 are possible.
+MPD_MAXTRANSFORM_2N = 2**32
+
+
+# Limits in the terminology of Pollard's paper:
+m2 = (MPD_MAXTRANSFORM_2N * 3) // 2 # Maximum length of the smaller array.
+M1 = M2 = RADIX-1 # Maximum value per single word.
+L = m2 * M1 * M2
+P[0] * P[1] * P[2] > 2 * L
+
+
+# Primitive roots of F(P1), F(P2) and F(P3):
+w = [7, 10, 19]
+
+# The primitive roots are correct:
+for i in range(3):
+ if not is_primitive_root(w[i], P[i], F[i], E[i]):
+ print("FAIL")
+
+
+# =================================================================
+# Constants and limits for the 32-bit version
+# =================================================================
+
+RADIX = 10**9
+
+# Primes P1, P2 and P3:
+P = [2113929217, 2013265921, 1811939329]
+
+# P-1, highly composite. All D = P-1 are divisible by 3 * 2**25,
+# allowing for transform lengths up to 3 * 2**25 words.
+D = [2**25 * 3**2 * 7,
+ 2**27 * 3 * 5,
+ 2**26 * 3**3]
+
+# Prime factors of P-1 and their exponents:
+F = [(2,3,7), (2,3,5), (2,3)]
+E = [(25,2,1), (27,1,1), (26,3)]
+
+# Maximum transform length for 2**n. Above that only 3 * 2**24 or
+# 3 * 2**25 are possible.
+MPD_MAXTRANSFORM_2N = 2**25
+
+
+# Limits in the terminology of Pollard's paper:
+m2 = (MPD_MAXTRANSFORM_2N * 3) // 2 # Maximum length of the smaller array.
+M1 = M2 = RADIX-1 # Maximum value per single word.
+L = m2 * M1 * M2
+P[0] * P[1] * P[2] > 2 * L
+
+
+# Primitive roots of F(P1), F(P2) and F(P3):
+w = [5, 31, 13]
+
+# The primitive roots are correct:
+for i in range(3):
+ if not is_primitive_root(w[i], P[i], F[i], E[i]):
+ print("FAIL")
+
+
+# ======================================================================
+# Example transform using a single prime
+# ======================================================================
+
+def ntt(lst, dir):
+ """Perform a transform on the elements of lst. len(lst) must
+ be 2**n or 3 * 2**n, where n <= 25. This is the slow DFT."""
+ p = 2113929217 # prime
+ d = len(lst) # transform length
+ d_prime = pow(d, (p-2), p) # inverse of d
+ xi = (p-1)//d
+ w = 5 # primitive root of F(p)
+ r = pow(w, xi, p) # primitive root of the subfield
+ r_prime = pow(w, (p-1-xi), p) # inverse of r
+ if dir == 1: # forward transform
+ a = lst # input array
+ A = [0] * d # transformed values
+ for i in range(d):
+ s = 0
+ for j in range(d):
+ s += a[j] * pow(r, i*j, p)
+ A[i] = s % p
+ return A
+ elif dir == -1: # backward transform
+ A = lst # input array
+ a = [0] * d # transformed values
+ for j in range(d):
+ s = 0
+ for i in range(d):
+ s += A[i] * pow(r_prime, i*j, p)
+ a[j] = (d_prime * s) % p
+ return a
+
+def ntt_convolute(a, b):
+ """convolute arrays a and b."""
+ assert(len(a) == len(b))
+ x = ntt(a, 1)
+ y = ntt(b, 1)
+ for i in range(len(a)):
+ y[i] = y[i] * x[i]
+ r = ntt(y, -1)
+ return r
+
+
+# Example: Two arrays representing 21 and 81 in little-endian:
+a = [1, 2, 0, 0]
+b = [1, 8, 0, 0]
+
+assert(ntt_convolute(a, b) == [1, 10, 16, 0])
+assert(21 * 81 == (1*10**0 + 10*10**1 + 16*10**2 + 0*10**3))
diff --git a/Modules/_decimal/libmpdec/literature/matrix-transform.txt b/Modules/_decimal/libmpdec/literature/matrix-transform.txt
new file mode 100644
index 0000000000..ff6219810e
--- /dev/null
+++ b/Modules/_decimal/libmpdec/literature/matrix-transform.txt
@@ -0,0 +1,256 @@
+
+
+(* Copyright (c) 2011 Stefan Krah. All rights reserved. *)
+
+
+The Matrix Fourier Transform:
+=============================
+
+In libmpdec, the Matrix Fourier Transform [1] is called four-step transform
+after a variant that appears in [2]. The algorithm requires that the input
+array can be viewed as an R*C matrix.
+
+All operations are done modulo p. For readability, the proofs drop all
+instances of (mod p).
+
+
+Algorithm four-step (forward transform):
+----------------------------------------
+
+ a := input array
+ d := len(a) = R * C
+ p := prime
+ w := primitive root of unity of the prime field
+ r := w**((p-1)/d)
+ A := output array
+
+ 1) Apply a length R FNT to each column.
+
+ 2) Multiply each matrix element (addressed by j*C+m) by r**(j*m).
+
+ 3) Apply a length C FNT to each row.
+
+ 4) Transpose the matrix.
+
+
+Proof (forward transform):
+--------------------------
+
+ The algorithm can be derived starting from the regular definition of
+ the finite-field transform of length d:
+
+ d-1
+ ,----
+ \
+ A[k] = | a[l] * r**(k * l)
+ /
+ `----
+ l = 0
+
+
+ The sum can be rearranged into the sum of the sums of columns:
+
+ C-1 R-1
+ ,---- ,----
+ \ \
+ = | | a[i * C + j] * r**(k * (i * C + j))
+ / /
+ `---- `----
+ j = 0 i = 0
+
+
+ Extracting a constant from the inner sum:
+
+ C-1 R-1
+ ,---- ,----
+ \ \
+ = | r**k*j * | a[i * C + j] * r**(k * i * C)
+ / /
+ `---- `----
+ j = 0 i = 0
+
+
+ Without any loss of generality, let k = n * R + m,
+ where n < C and m < R:
+
+ C-1 R-1
+ ,---- ,----
+ \ \
+ A[n*R+m] = | r**(R*n*j) * r**(m*j) * | a[i*C+j] * r**(R*C*n*i) * r**(C*m*i)
+ / /
+ `---- `----
+ j = 0 i = 0
+
+
+ Since r = w ** ((p-1) / (R*C)):
+
+ a) r**(R*C*n*i) = w**((p-1)*n*i) = 1
+
+ b) r**(C*m*i) = w**((p-1) / R) ** (m*i) = r_R ** (m*i)
+
+ c) r**(R*n*j) = w**((p-1) / C) ** (n*j) = r_C ** (n*j)
+
+ r_R := root of the subfield of length R.
+ r_C := root of the subfield of length C.
+
+
+ C-1 R-1
+ ,---- ,----
+ \ \
+ A[n*R+m] = | r_C**(n*j) * [ r**(m*j) * | a[i*C+j] * r_R**(m*i) ]
+ / ^ /
+ `---- | `---- 1) transform the columns
+ j = 0 | i = 0
+ ^ |
+ | `-- 2) multiply
+ |
+ `-- 3) transform the rows
+
+
+ Note that the entire RHS is a function of n and m and that the results
+ for each pair (n, m) are stored in Fortran order.
+
+ Let the term in square brackets be f(m, j). Step 1) and 2) precalculate
+ the term for all (m, j). After that, the original matrix is now a lookup
+ table with the mth element in the jth column at location m * C + j.
+
+ Let the complete RHS be g(m, n). Step 3) does an in-place transform of
+ length n on all rows. After that, the original matrix is now a lookup
+ table with the mth element in the nth column at location m * C + n.
+
+ But each (m, n) pair should be written to location n * R + m. Therefore,
+ step 4) transposes the result of step 3).
+
+
+
+Algorithm four-step (inverse transform):
+----------------------------------------
+
+ A := input array
+ d := len(A) = R * C
+ p := prime
+ d' := d**(p-2) # inverse of d
+ w := primitive root of unity of the prime field
+ r := w**((p-1)/d) # root of the subfield
+ r' := w**((p-1) - (p-1)/d) # inverse of r
+ a := output array
+
+ 0) View the matrix as a C*R matrix.
+
+ 1) Transpose the matrix, producing an R*C matrix.
+
+ 2) Apply a length C FNT to each row.
+
+ 3) Multiply each matrix element (addressed by i*C+n) by r**(i*n).
+
+ 4) Apply a length R FNT to each column.
+
+
+Proof (inverse transform):
+--------------------------
+
+ The algorithm can be derived starting from the regular definition of
+ the finite-field inverse transform of length d:
+
+ d-1
+ ,----
+ \
+ a[k] = d' * | A[l] * r' ** (k * l)
+ /
+ `----
+ l = 0
+
+
+ The sum can be rearranged into the sum of the sums of columns. Note
+ that at this stage we still have a C*R matrix, so C denotes the number
+ of rows:
+
+ R-1 C-1
+ ,---- ,----
+ \ \
+ = d' * | | a[j * R + i] * r' ** (k * (j * R + i))
+ / /
+ `---- `----
+ i = 0 j = 0
+
+
+ Extracting a constant from the inner sum:
+
+ R-1 C-1
+ ,---- ,----
+ \ \
+ = d' * | r' ** (k*i) * | a[j * R + i] * r' ** (k * j * R)
+ / /
+ `---- `----
+ i = 0 j = 0
+
+
+ Without any loss of generality, let k = m * C + n,
+ where m < R and n < C:
+
+ R-1 C-1
+ ,---- ,----
+ \ \
+ A[m*C+n] = d' * | r' ** (C*m*i) * r' ** (n*i) * | a[j*R+i] * r' ** (R*C*m*j) * r' ** (R*n*j)
+ / /
+ `---- `----
+ i = 0 j = 0
+
+
+ Since r' = w**((p-1) - (p-1)/d) and d = R*C:
+
+ a) r' ** (R*C*m*j) = w**((p-1)*R*C*m*j - (p-1)*m*j) = 1
+
+ b) r' ** (C*m*i) = w**((p-1)*C - (p-1)/R) ** (m*i) = r_R' ** (m*i)
+
+ c) r' ** (R*n*j) = r_C' ** (n*j)
+
+ d) d' = d**(p-2) = (R*C) ** (p-2) = R**(p-2) * C**(p-2) = R' * C'
+
+ r_R' := inverse of the root of the subfield of length R.
+ r_C' := inverse of the root of the subfield of length C.
+ R' := inverse of R
+ C' := inverse of C
+
+
+ R-1 C-1
+ ,---- ,---- 2) transform the rows of a^T
+ \ \
+ A[m*C+n] = R' * | r_R' ** (m*i) * [ r' ** (n*i) * C' * | a[j*R+i] * r_C' ** (n*j) ]
+ / ^ / ^
+ `---- | `---- |
+ i = 0 | j = 0 |
+ ^ | `-- 1) Transpose input matrix
+ | `-- 3) multiply to address elements by
+ | i * C + j
+ `-- 3) transform the columns
+
+
+
+ Note that the entire RHS is a function of m and n and that the results
+ for each pair (m, n) are stored in C order.
+
+ Let the term in square brackets be f(n, i). Without step 1), the sum
+ would perform a length C transform on the columns of the input matrix.
+ This is a) inefficient and b) the results are needed in C order, so
+ step 1) exchanges rows and columns.
+
+ Step 2) and 3) precalculate f(n, i) for all (n, i). After that, the
+ original matrix is now a lookup table with the ith element in the nth
+ column at location i * C + n.
+
+ Let the complete RHS be g(m, n). Step 4) does an in-place transform of
+ length m on all columns. After that, the original matrix is now a lookup
+ table with the mth element in the nth column at location m * C + n,
+ which means that all A[k] = A[m * C + n] are in the correct order.
+
+
+--
+
+ [1] Joerg Arndt: "Matters Computational"
+ http://www.jjj.de/fxt/
+ [2] David H. Bailey: FFTs in External or Hierarchical Memory
+ http://crd.lbl.gov/~dhbailey/dhbpapers/
+
+
+
diff --git a/Modules/_decimal/libmpdec/literature/mulmod-64.txt b/Modules/_decimal/libmpdec/literature/mulmod-64.txt
new file mode 100644
index 0000000000..93bf22e9fe
--- /dev/null
+++ b/Modules/_decimal/libmpdec/literature/mulmod-64.txt
@@ -0,0 +1,127 @@
+
+
+(* Copyright (c) 2011 Stefan Krah. All rights reserved. *)
+
+
+==========================================================================
+ Calculate (a * b) % p using special primes
+==========================================================================
+
+A description of the algorithm can be found in the apfloat manual by
+Tommila [1].
+
+
+Definitions:
+------------
+
+In the whole document, "==" stands for "is congruent with".
+
+Result of a * b in terms of high/low words:
+
+ (1) hi * 2**64 + lo = a * b
+
+Special primes:
+
+ (2) p = 2**64 - z + 1, where z = 2**n
+
+Single step modular reduction:
+
+ (3) R(hi, lo) = hi * z - hi + lo
+
+
+Strategy:
+---------
+
+ a) Set (hi, lo) to the result of a * b.
+
+ b) Set (hi', lo') to the result of R(hi, lo).
+
+ c) Repeat step b) until 0 <= hi' * 2**64 + lo' < 2*p.
+
+ d) If the result is less than p, return lo'. Otherwise return lo' - p.
+
+
+The reduction step b) preserves congruence:
+-------------------------------------------
+
+ hi * 2**64 + lo == hi * z - hi + lo (mod p)
+
+ Proof:
+ ~~~~~~
+
+ hi * 2**64 + lo = (2**64 - z + 1) * hi + z * hi - hi + lo
+
+ = p * hi + z * hi - hi + lo
+
+ == z * hi - hi + lo (mod p)
+
+
+Maximum numbers of step b):
+---------------------------
+
+# To avoid unneccessary formalism, define:
+
+def R(hi, lo, z):
+ return divmod(hi * z - hi + lo, 2**64)
+
+# For simplicity, assume hi=2**64-1, lo=2**64-1 after the
+# initial multiplication a * b. This is of course impossible
+# but certainly covers all cases.
+
+# Then, for p1:
+hi=2**64-1; lo=2**64-1; z=2**32
+p1 = 2**64 - z + 1
+
+hi, lo = R(hi, lo, z) # First reduction
+hi, lo = R(hi, lo, z) # Second reduction
+hi * 2**64 + lo < 2 * p1 # True
+
+# For p2:
+hi=2**64-1; lo=2**64-1; z=2**34
+p2 = 2**64 - z + 1
+
+hi, lo = R(hi, lo, z) # First reduction
+hi, lo = R(hi, lo, z) # Second reduction
+hi, lo = R(hi, lo, z) # Third reduction
+hi * 2**64 + lo < 2 * p2 # True
+
+# For p3:
+hi=2**64-1; lo=2**64-1; z=2**40
+p3 = 2**64 - z + 1
+
+hi, lo = R(hi, lo, z) # First reduction
+hi, lo = R(hi, lo, z) # Second reduction
+hi, lo = R(hi, lo, z) # Third reduction
+hi * 2**64 + lo < 2 * p3 # True
+
+
+Step d) preserves congruence and yields a result < p:
+-----------------------------------------------------
+
+ Case hi = 0:
+
+ Case lo < p: trivial.
+
+ Case lo >= p:
+
+ lo == lo - p (mod p) # result is congruent
+
+ p <= lo < 2*p -> 0 <= lo - p < p # result is in the correct range
+
+ Case hi = 1:
+
+ p < 2**64 /\ 2**64 + lo < 2*p -> lo < p # lo is always less than p
+
+ 2**64 + lo == 2**64 + (lo - p) (mod p) # result is congruent
+
+ = lo - p # exactly the same value as the previous RHS
+ # in uint64_t arithmetic.
+
+ p < 2**64 + lo < 2*p -> 0 < 2**64 + (lo - p) < p # correct range
+
+
+
+[1] http://www.apfloat.org/apfloat/2.40/apfloat.pdf
+
+
+
diff --git a/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt b/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt
new file mode 100644
index 0000000000..43e6e4e4ba
--- /dev/null
+++ b/Modules/_decimal/libmpdec/literature/mulmod-ppro.txt
@@ -0,0 +1,269 @@
+
+
+(* Copyright (c) 2011 Stefan Krah. All rights reserved. *)
+
+
+========================================================================
+ Calculate (a * b) % p using the 80-bit x87 FPU
+========================================================================
+
+A description of the algorithm can be found in the apfloat manual by
+Tommila [1].
+
+The proof follows an argument made by Granlund/Montgomery in [2].
+
+
+Definitions and assumptions:
+----------------------------
+
+The 80-bit extended precision format uses 64 bits for the significand:
+
+ (1) F = 64
+
+The modulus is prime and less than 2**31:
+
+ (2) 2 <= p < 2**31
+
+The factors are less than p:
+
+ (3) 0 <= a < p
+ (4) 0 <= b < p
+
+The product a * b is less than 2**62 and is thus exact in 64 bits:
+
+ (5) n = a * b
+
+The product can be represented in terms of quotient and remainder:
+
+ (6) n = q * p + r
+
+Using (3), (4) and the fact that p is prime, the remainder is always
+greater than zero:
+
+ (7) 0 <= q < p /\ 1 <= r < p
+
+
+Strategy:
+---------
+
+Precalculate the 80-bit long double inverse of p, with a maximum
+relative error of 2**(1-F):
+
+ (8) pinv = (long double)1.0 / p
+
+Calculate an estimate for q = floor(n/p). The multiplication has another
+maximum relative error of 2**(1-F):
+
+ (9) qest = n * pinv
+
+If we can show that q < qest < q+1, then trunc(qest) = q. It is then
+easy to recover the remainder r. The complete algorithm is:
+
+ a) Set the control word to 64-bit precision and truncation mode.
+
+ b) n = a * b # Calculate exact product.
+
+ c) qest = n * pinv # Calculate estimate for the quotient.
+
+ d) q = (qest+2**63)-2**63 # Truncate qest to the exact quotient.
+
+ f) r = n - q * p # Calculate remainder.
+
+
+Proof for q < qest < q+1:
+-------------------------
+
+Using the cumulative error, the error bounds for qest are:
+
+ n n * (1 + 2**(1-F))**2
+ (9) --------------------- <= qest <= ---------------------
+ p * (1 + 2**(1-F))**2 p
+
+
+ Lemma 1:
+ --------
+ n q * p + r
+ (10) q < --------------------- = ---------------------
+ p * (1 + 2**(1-F))**2 p * (1 + 2**(1-F))**2
+
+
+ Proof:
+ ~~~~~~
+
+ (I) q * p * (1 + 2**(1-F))**2 < q * p + r
+
+ (II) q * p * 2**(2-F) + q * p * 2**(2-2*F) < r
+
+ Using (1) and (7), it is sufficient to show that:
+
+ (III) q * p * 2**(-62) + q * p * 2**(-126) < 1 <= r
+
+ (III) can easily be verified by substituting the largest possible
+ values p = 2**31-1 and q = 2**31-2.
+
+ The critical cases occur when r = 1, n = m * p + 1. These cases
+ can be exhaustively verified with a test program.
+
+
+ Lemma 2:
+ --------
+
+ n * (1 + 2**(1-F))**2 (q * p + r) * (1 + 2**(1-F))**2
+ (11) --------------------- = ------------------------------- < q + 1
+ p p
+
+ Proof:
+ ~~~~~~
+
+ (I) (q * p + r) + (q * p + r) * 2**(2-F) + (q * p + r) * 2**(2-2*F) < q * p + p
+
+ (II) (q * p + r) * 2**(2-F) + (q * p + r) * 2**(2-2*F) < p - r
+
+ Using (1) and (7), it is sufficient to show that:
+
+ (III) (q * p + r) * 2**(-62) + (q * p + r) * 2**(-126) < 1 <= p - r
+
+ (III) can easily be verified by substituting the largest possible
+ values p = 2**31-1, q = 2**31-2 and r = 2**31-2.
+
+ The critical cases occur when r = (p - 1), n = m * p - 1. These cases
+ can be exhaustively verified with a test program.
+
+
+[1] http://www.apfloat.org/apfloat/2.40/apfloat.pdf
+[2] http://gmplib.org/~tege/divcnst-pldi94.pdf
+ [Section 7: "Use of floating point"]
+
+
+
+(* Coq proof for (10) and (11) *)
+
+Require Import ZArith.
+Require Import QArith.
+Require Import Qpower.
+Require Import Qabs.
+Require Import Psatz.
+
+Open Scope Q_scope.
+
+
+Ltac qreduce T :=
+ rewrite <- (Qred_correct (T)); simpl (Qred (T)).
+
+Theorem Qlt_move_right :
+ forall x y z:Q, x + z < y <-> x < y - z.
+Proof.
+ intros.
+ split.
+ intros.
+ psatzl Q.
+ intros.
+ psatzl Q.
+Qed.
+
+Theorem Qlt_mult_by_z :
+ forall x y z:Q, 0 < z -> (x < y <-> x * z < y * z).
+Proof.
+ intros.
+ split.
+ intros.
+ apply Qmult_lt_compat_r. trivial. trivial.
+ intros.
+ rewrite <- (Qdiv_mult_l x z). rewrite <- (Qdiv_mult_l y z).
+ apply Qmult_lt_compat_r.
+ apply Qlt_shift_inv_l.
+ trivial. psatzl Q. trivial. psatzl Q. psatzl Q.
+Qed.
+
+Theorem Qle_mult_quad :
+ forall (a b c d:Q),
+ 0 <= a -> a <= c ->
+ 0 <= b -> b <= d ->
+ a * b <= c * d.
+ intros.
+ psatz Q.
+Qed.
+
+
+Theorem q_lt_qest:
+ forall (p q r:Q),
+ (0 < p) -> (p <= (2#1)^31 - 1) ->
+ (0 <= q) -> (q <= p - 1) ->
+ (1 <= r) -> (r <= p - 1) ->
+ q < (q * p + r) / (p * (1 + (2#1)^(-63))^2).
+Proof.
+ intros.
+ rewrite Qlt_mult_by_z with (z := (p * (1 + (2#1)^(-63))^2)).
+
+ unfold Qdiv.
+ rewrite <- Qmult_assoc.
+ rewrite (Qmult_comm (/ (p * (1 + (2 # 1) ^ (-63)) ^ 2)) (p * (1 + (2 # 1) ^ (-63)) ^ 2)).
+ rewrite Qmult_inv_r.
+ rewrite Qmult_1_r.
+
+ assert (q * (p * (1 + (2 # 1) ^ (-63)) ^ 2) == q * p + (q * p) * ((2 # 1) ^ (-62) + (2 # 1) ^ (-126))).
+ qreduce ((1 + (2 # 1) ^ (-63)) ^ 2).
+ qreduce ((2 # 1) ^ (-62) + (2 # 1) ^ (-126)).
+ ring_simplify.
+ reflexivity.
+ rewrite H5.
+
+ rewrite Qplus_comm.
+ rewrite Qlt_move_right.
+ ring_simplify (q * p + r - q * p).
+ qreduce ((2 # 1) ^ (-62) + (2 # 1) ^ (-126)).
+
+ apply Qlt_le_trans with (y := 1).
+ rewrite Qlt_mult_by_z with (z := 85070591730234615865843651857942052864 # 18446744073709551617).
+ ring_simplify.
+
+ apply Qle_lt_trans with (y := ((2 # 1) ^ 31 - (2#1)) * ((2 # 1) ^ 31 - 1)).
+ apply Qle_mult_quad.
+ assumption. psatzl Q. psatzl Q. psatzl Q. psatzl Q. psatzl Q. assumption. psatzl Q. psatzl Q.
+Qed.
+
+Theorem qest_lt_qplus1:
+ forall (p q r:Q),
+ (0 < p) -> (p <= (2#1)^31 - 1) ->
+ (0 <= q) -> (q <= p - 1) ->
+ (1 <= r) -> (r <= p - 1) ->
+ ((q * p + r) * (1 + (2#1)^(-63))^2) / p < q + 1.
+Proof.
+ intros.
+ rewrite Qlt_mult_by_z with (z := p).
+
+ unfold Qdiv.
+ rewrite <- Qmult_assoc.
+ rewrite (Qmult_comm (/ p) p).
+ rewrite Qmult_inv_r.
+ rewrite Qmult_1_r.
+
+ assert ((q * p + r) * (1 + (2 # 1) ^ (-63)) ^ 2 == q * p + r + (q * p + r) * ((2 # 1) ^ (-62) + (2 # 1) ^ (-126))).
+ qreduce ((1 + (2 # 1) ^ (-63)) ^ 2).
+ qreduce ((2 # 1) ^ (-62) + (2 # 1) ^ (-126)).
+ ring_simplify. reflexivity.
+ rewrite H5.
+
+ rewrite <- Qplus_assoc. rewrite <- Qplus_comm. rewrite Qlt_move_right.
+ ring_simplify ((q + 1) * p - q * p).
+
+ rewrite <- Qplus_comm. rewrite Qlt_move_right.
+
+ apply Qlt_le_trans with (y := 1).
+ qreduce ((2 # 1) ^ (-62) + (2 # 1) ^ (-126)).
+
+ rewrite Qlt_mult_by_z with (z := 85070591730234615865843651857942052864 # 18446744073709551617).
+ ring_simplify.
+
+ ring_simplify in H0.
+ apply Qle_lt_trans with (y := (2147483646 # 1) * (2147483647 # 1) + (2147483646 # 1)).
+
+ apply Qplus_le_compat.
+ apply Qle_mult_quad.
+ assumption. psatzl Q. auto with qarith. assumption. psatzl Q.
+ auto with qarith. auto with qarith.
+ psatzl Q. psatzl Q. assumption.
+Qed.
+
+
+
diff --git a/Modules/_decimal/libmpdec/literature/six-step.txt b/Modules/_decimal/libmpdec/literature/six-step.txt
new file mode 100644
index 0000000000..759147ff1c
--- /dev/null
+++ b/Modules/_decimal/libmpdec/literature/six-step.txt
@@ -0,0 +1,63 @@
+
+
+(* Copyright (c) 2011 Stefan Krah. All rights reserved. *)
+
+
+The Six Step Transform:
+=======================
+
+In libmpdec, the six-step transform is the Matrix Fourier Transform (See
+matrix-transform.txt) in disguise. It is called six-step transform after
+a variant that appears in [1]. The algorithm requires that the input
+array can be viewed as an R*C matrix.
+
+
+Algorithm six-step (forward transform):
+---------------------------------------
+
+ 1a) Transpose the matrix.
+
+ 1b) Apply a length R FNT to each row.
+
+ 1c) Transpose the matrix.
+
+ 2) Multiply each matrix element (addressed by j*C+m) by r**(j*m).
+
+ 3) Apply a length C FNT to each row.
+
+ 4) Transpose the matrix.
+
+Note that steps 1a) - 1c) are exactly equivalent to step 1) of the Matrix
+Fourier Transform. For large R, it is faster to transpose twice and do
+a transform on the rows than to perform a column transpose directly.
+
+
+
+Algorithm six-step (inverse transform):
+---------------------------------------
+
+ 0) View the matrix as a C*R matrix.
+
+ 1) Transpose the matrix, producing an R*C matrix.
+
+ 2) Apply a length C FNT to each row.
+
+ 3) Multiply each matrix element (addressed by i*C+n) by r**(i*n).
+
+ 4a) Transpose the matrix.
+
+ 4b) Apply a length R FNT to each row.
+
+ 4c) Transpose the matrix.
+
+Again, steps 4a) - 4c) are equivalent to step 4) of the Matrix Fourier
+Transform.
+
+
+
+--
+
+ [1] David H. Bailey: FFTs in External or Hierarchical Memory
+ http://crd.lbl.gov/~dhbailey/dhbpapers/
+
+
diff --git a/Modules/_decimal/libmpdec/literature/umodarith.lisp b/Modules/_decimal/libmpdec/literature/umodarith.lisp
new file mode 100644
index 0000000000..60a14a4e56
--- /dev/null
+++ b/Modules/_decimal/libmpdec/literature/umodarith.lisp
@@ -0,0 +1,692 @@
+;
+; Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; 1. Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+;
+; 2. Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in the
+; documentation and/or other materials provided with the distribution.
+;
+; THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+; OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+; HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+; LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+; OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+; SUCH DAMAGE.
+;
+
+
+(in-package "ACL2")
+
+(include-book "arithmetic/top-with-meta" :dir :system)
+(include-book "arithmetic-2/floor-mod/floor-mod" :dir :system)
+
+
+;; =====================================================================
+;; Proofs for several functions in umodarith.h
+;; =====================================================================
+
+
+
+;; =====================================================================
+;; Helper theorems
+;; =====================================================================
+
+(defthm elim-mod-m<x<2*m
+ (implies (and (<= m x)
+ (< x (* 2 m))
+ (rationalp x) (rationalp m))
+ (equal (mod x m)
+ (+ x (- m)))))
+
+(defthm modaux-1a
+ (implies (and (< x m) (< 0 x) (< 0 m)
+ (rationalp x) (rationalp m))
+ (equal (mod (- x) m)
+ (+ (- x) m))))
+
+(defthm modaux-1b
+ (implies (and (< (- x) m) (< x 0) (< 0 m)
+ (rationalp x) (rationalp m))
+ (equal (mod x m)
+ (+ x m)))
+ :hints (("Goal" :use ((:instance modaux-1a
+ (x (- x)))))))
+
+(defthm modaux-1c
+ (implies (and (< x m) (< 0 x) (< 0 m)
+ (rationalp x) (rationalp m))
+ (equal (mod x m)
+ x)))
+
+(defthm modaux-2a
+ (implies (and (< 0 b) (< b m)
+ (natp x) (natp b) (natp m)
+ (< (mod (+ b x) m) b))
+ (equal (mod (+ (- m) b x) m)
+ (+ (- m) b (mod x m)))))
+
+(defthm modaux-2b
+ (implies (and (< 0 b) (< b m)
+ (natp x) (natp b) (natp m)
+ (< (mod (+ b x) m) b))
+ (equal (mod (+ b x) m)
+ (+ (- m) b (mod x m))))
+ :hints (("Goal" :use (modaux-2a))))
+
+(defthm linear-mod-1
+ (implies (and (< x m) (< b m)
+ (natp x) (natp b)
+ (rationalp m))
+ (equal (< x (mod (+ (- b) x) m))
+ (< x b)))
+ :hints (("Goal" :use ((:instance modaux-1a
+ (x (+ b (- x))))))))
+
+(defthm linear-mod-2
+ (implies (and (< 0 b) (< b m)
+ (natp x) (natp b)
+ (natp m))
+ (equal (< (mod x m)
+ (mod (+ (- b) x) m))
+ (< (mod x m) b))))
+
+(defthm linear-mod-3
+ (implies (and (< x m) (< b m)
+ (natp x) (natp b)
+ (rationalp m))
+ (equal (<= b (mod (+ b x) m))
+ (< (+ b x) m)))
+ :hints (("Goal" :use ((:instance elim-mod-m<x<2*m
+ (x (+ b x)))))))
+
+(defthm modaux-2c
+ (implies (and (< 0 b) (< b m)
+ (natp x) (natp b) (natp m)
+ (<= b (mod (+ b x) m)))
+ (equal (mod (+ b x) m)
+ (+ b (mod x m))))
+ :hints (("Subgoal *1/8''" :use (linear-mod-3))))
+
+(defthmd modaux-2d
+ (implies (and (< x m) (< 0 x) (< 0 m)
+ (< (- m) b) (< b 0) (rationalp m)
+ (<= x (mod (+ b x) m))
+ (rationalp x) (rationalp b))
+ (equal (+ (- m) (mod (+ b x) m))
+ (+ b x)))
+ :hints (("Goal" :cases ((<= 0 (+ b x))))
+ ("Subgoal 2'" :use ((:instance modaux-1b
+ (x (+ b x)))))))
+
+(defthm mod-m-b
+ (implies (and (< 0 x) (< 0 b) (< 0 m)
+ (< x b) (< b m)
+ (natp x) (natp b) (natp m))
+ (equal (mod (+ (mod (- x) m) b) m)
+ (mod (- x) b))))
+
+
+;; =====================================================================
+;; addmod, submod
+;; =====================================================================
+
+(defun addmod (a b m base)
+ (let* ((s (mod (+ a b) base))
+ (s (if (< s a) (mod (- s m) base) s))
+ (s (if (>= s m) (mod (- s m) base) s)))
+ s))
+
+(defthmd addmod-correct
+ (implies (and (< 0 m) (< m base)
+ (< a m) (<= b m)
+ (natp m) (natp base)
+ (natp a) (natp b))
+ (equal (addmod a b m base)
+ (mod (+ a b) m)))
+ :hints (("Goal" :cases ((<= base (+ a b))))
+ ("Subgoal 2.1'" :use ((:instance elim-mod-m<x<2*m
+ (x (+ a b)))))))
+
+(defun submod (a b m base)
+ (let* ((d (mod (- a b) base))
+ (d (if (< a d) (mod (+ d m) base) d)))
+ d))
+
+(defthmd submod-aux1
+ (implies (and (< a (mod (+ a (- b)) base))
+ (< 0 base) (< a base) (<= b base)
+ (natp base) (natp a) (natp b))
+ (< a b))
+ :rule-classes :forward-chaining)
+
+(defthmd submod-aux2
+ (implies (and (<= (mod (+ a (- b)) base) a)
+ (< 0 base) (< a base) (< b base)
+ (natp base) (natp a) (natp b))
+ (<= b a))
+ :rule-classes :forward-chaining)
+
+(defthmd submod-correct
+ (implies (and (< 0 m) (< m base)
+ (< a m) (<= b m)
+ (natp m) (natp base)
+ (natp a) (natp b))
+ (equal (submod a b m base)
+ (mod (- a b) m)))
+ :hints (("Goal" :cases ((<= base (+ a b))))
+ ("Subgoal 2.2" :use ((:instance submod-aux1)))
+ ("Subgoal 2.2'''" :cases ((and (< 0 (+ a (- b) m))
+ (< (+ a (- b) m) m))))
+ ("Subgoal 2.1" :use ((:instance submod-aux2)))
+ ("Subgoal 1.2" :use ((:instance submod-aux1)))
+ ("Subgoal 1.1" :use ((:instance submod-aux2)))))
+
+
+(defun submod-2 (a b m base)
+ (let* ((d (mod (- a b) base))
+ (d (if (< a b) (mod (+ d m) base) d)))
+ d))
+
+(defthm submod-2-correct
+ (implies (and (< 0 m) (< m base)
+ (< a m) (<= b m)
+ (natp m) (natp base)
+ (natp a) (natp b))
+ (equal (submod-2 a b m base)
+ (mod (- a b) m)))
+ :hints (("Subgoal 2'" :cases ((and (< 0 (+ a (- b) m))
+ (< (+ a (- b) m) m))))))
+
+
+;; =========================================================================
+;; ext-submod is correct
+;; =========================================================================
+
+; a < 2*m, b < 2*m
+(defun ext-submod (a b m base)
+ (let* ((a (if (>= a m) (- a m) a))
+ (b (if (>= b m) (- b m) b))
+ (d (mod (- a b) base))
+ (d (if (< a b) (mod (+ d m) base) d)))
+ d))
+
+; a < 2*m, b < 2*m
+(defun ext-submod-2 (a b m base)
+ (let* ((a (mod a m))
+ (b (mod b m))
+ (d (mod (- a b) base))
+ (d (if (< a b) (mod (+ d m) base) d)))
+ d))
+
+(defthmd ext-submod-ext-submod-2-equal
+ (implies (and (< 0 m) (< m base)
+ (< a (* 2 m)) (< b (* 2 m))
+ (natp m) (natp base)
+ (natp a) (natp b))
+ (equal (ext-submod a b m base)
+ (ext-submod-2 a b m base))))
+
+(defthmd ext-submod-2-correct
+ (implies (and (< 0 m) (< m base)
+ (< a (* 2 m)) (< b (* 2 m))
+ (natp m) (natp base)
+ (natp a) (natp b))
+ (equal (ext-submod-2 a b m base)
+ (mod (- a b) m))))
+
+
+;; =========================================================================
+;; dw-reduce is correct
+;; =========================================================================
+
+(defun dw-reduce (hi lo m base)
+ (let* ((r1 (mod hi m))
+ (r2 (mod (+ (* r1 base) lo) m)))
+ r2))
+
+(defthmd dw-reduce-correct
+ (implies (and (< 0 m) (< m base)
+ (< hi base) (< lo base)
+ (natp m) (natp base)
+ (natp hi) (natp lo))
+ (equal (dw-reduce hi lo m base)
+ (mod (+ (* hi base) lo) m))))
+
+(defthmd <=-multiply-both-sides-by-z
+ (implies (and (rationalp x) (rationalp y)
+ (< 0 z) (rationalp z))
+ (equal (<= x y)
+ (<= (* z x) (* z y)))))
+
+(defthmd dw-reduce-aux1
+ (implies (and (< 0 m) (< m base)
+ (natp m) (natp base)
+ (< lo base) (natp lo)
+ (< x m) (natp x))
+ (< (+ lo (* base x)) (* base m)))
+ :hints (("Goal" :cases ((<= (+ x 1) m)))
+ ("Subgoal 1''" :cases ((<= (* base (+ x 1)) (* base m))))
+ ("subgoal 1.2" :use ((:instance <=-multiply-both-sides-by-z
+ (x (+ 1 x))
+ (y m)
+ (z base))))))
+
+(defthm dw-reduce-aux2
+ (implies (and (< x (* base m))
+ (< 0 m) (< m base)
+ (natp m) (natp base) (natp x))
+ (< (floor x m) base)))
+
+;; This is the necessary condition for using _mpd_div_words().
+(defthmd dw-reduce-second-quotient-fits-in-single-word
+ (implies (and (< 0 m) (< m base)
+ (< hi base) (< lo base)
+ (natp m) (natp base)
+ (natp hi) (natp lo)
+ (equal r1 (mod hi m)))
+ (< (floor (+ (* r1 base) lo) m)
+ base))
+ :hints (("Goal" :cases ((< r1 m)))
+ ("Subgoal 1''" :cases ((< (+ lo (* base (mod hi m))) (* base m))))
+ ("Subgoal 1.2" :use ((:instance dw-reduce-aux1
+ (x (mod hi m)))))))
+
+
+;; =========================================================================
+;; dw-submod is correct
+;; =========================================================================
+
+(defun dw-submod (a hi lo m base)
+ (let* ((r (dw-reduce hi lo m base))
+ (d (mod (- a r) base))
+ (d (if (< a r) (mod (+ d m) base) d)))
+ d))
+
+(defthmd dw-submod-aux1
+ (implies (and (natp a) (< 0 m) (natp m)
+ (natp x) (equal r (mod x m)))
+ (equal (mod (- a x) m)
+ (mod (- a r) m))))
+
+(defthmd dw-submod-correct
+ (implies (and (< 0 m) (< m base)
+ (natp a) (< a m)
+ (< hi base) (< lo base)
+ (natp m) (natp base)
+ (natp hi) (natp lo))
+ (equal (dw-submod a hi lo m base)
+ (mod (- a (+ (* base hi) lo)) m)))
+ :hints (("Goal" :in-theory (disable dw-reduce)
+ :use ((:instance dw-submod-aux1
+ (x (+ lo (* base hi)))
+ (r (dw-reduce hi lo m base)))
+ (:instance dw-reduce-correct)))))
+
+
+;; =========================================================================
+;; ANSI C arithmetic for uint64_t
+;; =========================================================================
+
+(defun add (a b)
+ (mod (+ a b)
+ (expt 2 64)))
+
+(defun sub (a b)
+ (mod (- a b)
+ (expt 2 64)))
+
+(defun << (w n)
+ (mod (* w (expt 2 n))
+ (expt 2 64)))
+
+(defun >> (w n)
+ (floor w (expt 2 n)))
+
+;; join upper and lower half of a double word, yielding a 128 bit number
+(defun join (hi lo)
+ (+ (* (expt 2 64) hi) lo))
+
+
+;; =============================================================================
+;; Fast modular reduction
+;; =============================================================================
+
+;; These are the three primes used in the Number Theoretic Transform.
+;; A fast modular reduction scheme exists for all of them.
+(defmacro p1 ()
+ (+ (expt 2 64) (- (expt 2 32)) 1))
+
+(defmacro p2 ()
+ (+ (expt 2 64) (- (expt 2 34)) 1))
+
+(defmacro p3 ()
+ (+ (expt 2 64) (- (expt 2 40)) 1))
+
+
+;; reduce the double word number hi*2**64 + lo (mod p1)
+(defun simple-mod-reduce-p1 (hi lo)
+ (+ (* (expt 2 32) hi) (- hi) lo))
+
+;; reduce the double word number hi*2**64 + lo (mod p2)
+(defun simple-mod-reduce-p2 (hi lo)
+ (+ (* (expt 2 34) hi) (- hi) lo))
+
+;; reduce the double word number hi*2**64 + lo (mod p3)
+(defun simple-mod-reduce-p3 (hi lo)
+ (+ (* (expt 2 40) hi) (- hi) lo))
+
+
+; ----------------------------------------------------------
+; The modular reductions given above are correct
+; ----------------------------------------------------------
+
+(defthmd congruence-p1-aux
+ (equal (* (expt 2 64) hi)
+ (+ (* (p1) hi)
+ (* (expt 2 32) hi)
+ (- hi))))
+
+(defthmd congruence-p2-aux
+ (equal (* (expt 2 64) hi)
+ (+ (* (p2) hi)
+ (* (expt 2 34) hi)
+ (- hi))))
+
+(defthmd congruence-p3-aux
+ (equal (* (expt 2 64) hi)
+ (+ (* (p3) hi)
+ (* (expt 2 40) hi)
+ (- hi))))
+
+(defthmd mod-augment
+ (implies (and (rationalp x)
+ (rationalp y)
+ (rationalp m))
+ (equal (mod (+ x y) m)
+ (mod (+ x (mod y m)) m))))
+
+(defthmd simple-mod-reduce-p1-congruent
+ (implies (and (integerp hi)
+ (integerp lo))
+ (equal (mod (simple-mod-reduce-p1 hi lo) (p1))
+ (mod (join hi lo) (p1))))
+ :hints (("Goal''" :use ((:instance congruence-p1-aux)
+ (:instance mod-augment
+ (m (p1))
+ (x (+ (- hi) lo (* (expt 2 32) hi)))
+ (y (* (p1) hi)))))))
+
+(defthmd simple-mod-reduce-p2-congruent
+ (implies (and (integerp hi)
+ (integerp lo))
+ (equal (mod (simple-mod-reduce-p2 hi lo) (p2))
+ (mod (join hi lo) (p2))))
+ :hints (("Goal''" :use ((:instance congruence-p2-aux)
+ (:instance mod-augment
+ (m (p2))
+ (x (+ (- hi) lo (* (expt 2 34) hi)))
+ (y (* (p2) hi)))))))
+
+(defthmd simple-mod-reduce-p3-congruent
+ (implies (and (integerp hi)
+ (integerp lo))
+ (equal (mod (simple-mod-reduce-p3 hi lo) (p3))
+ (mod (join hi lo) (p3))))
+ :hints (("Goal''" :use ((:instance congruence-p3-aux)
+ (:instance mod-augment
+ (m (p3))
+ (x (+ (- hi) lo (* (expt 2 40) hi)))
+ (y (* (p3) hi)))))))
+
+
+; ---------------------------------------------------------------------
+; We need a number less than 2*p, so that we can use the trick from
+; elim-mod-m<x<2*m for the final reduction.
+; For p1, two modular reductions are sufficient, for p2 and p3 three.
+; ---------------------------------------------------------------------
+
+;; p1: the first reduction is less than 2**96
+(defthmd simple-mod-reduce-p1-<-2**96
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (natp hi) (natp lo))
+ (< (simple-mod-reduce-p1 hi lo)
+ (expt 2 96))))
+
+;; p1: the second reduction is less than 2*p1
+(defthmd simple-mod-reduce-p1-<-2*p1
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (< (join hi lo) (expt 2 96))
+ (natp hi) (natp lo))
+ (< (simple-mod-reduce-p1 hi lo)
+ (* 2 (p1)))))
+
+
+;; p2: the first reduction is less than 2**98
+(defthmd simple-mod-reduce-p2-<-2**98
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (natp hi) (natp lo))
+ (< (simple-mod-reduce-p2 hi lo)
+ (expt 2 98))))
+
+;; p2: the second reduction is less than 2**69
+(defthmd simple-mod-reduce-p2-<-2*69
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (< (join hi lo) (expt 2 98))
+ (natp hi) (natp lo))
+ (< (simple-mod-reduce-p2 hi lo)
+ (expt 2 69))))
+
+;; p3: the third reduction is less than 2*p2
+(defthmd simple-mod-reduce-p2-<-2*p2
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (< (join hi lo) (expt 2 69))
+ (natp hi) (natp lo))
+ (< (simple-mod-reduce-p2 hi lo)
+ (* 2 (p2)))))
+
+
+;; p3: the first reduction is less than 2**104
+(defthmd simple-mod-reduce-p3-<-2**104
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (natp hi) (natp lo))
+ (< (simple-mod-reduce-p3 hi lo)
+ (expt 2 104))))
+
+;; p3: the second reduction is less than 2**81
+(defthmd simple-mod-reduce-p3-<-2**81
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (< (join hi lo) (expt 2 104))
+ (natp hi) (natp lo))
+ (< (simple-mod-reduce-p3 hi lo)
+ (expt 2 81))))
+
+;; p3: the third reduction is less than 2*p3
+(defthmd simple-mod-reduce-p3-<-2*p3
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (< (join hi lo) (expt 2 81))
+ (natp hi) (natp lo))
+ (< (simple-mod-reduce-p3 hi lo)
+ (* 2 (p3)))))
+
+
+; -------------------------------------------------------------------------
+; The simple modular reductions, adapted for compiler friendly C
+; -------------------------------------------------------------------------
+
+(defun mod-reduce-p1 (hi lo)
+ (let* ((y hi)
+ (x y)
+ (hi (>> hi 32))
+ (x (sub lo x))
+ (hi (if (> x lo) (+ hi -1) hi))
+ (y (<< y 32))
+ (lo (add y x))
+ (hi (if (< lo y) (+ hi 1) hi)))
+ (+ (* hi (expt 2 64)) lo)))
+
+(defun mod-reduce-p2 (hi lo)
+ (let* ((y hi)
+ (x y)
+ (hi (>> hi 30))
+ (x (sub lo x))
+ (hi (if (> x lo) (+ hi -1) hi))
+ (y (<< y 34))
+ (lo (add y x))
+ (hi (if (< lo y) (+ hi 1) hi)))
+ (+ (* hi (expt 2 64)) lo)))
+
+(defun mod-reduce-p3 (hi lo)
+ (let* ((y hi)
+ (x y)
+ (hi (>> hi 24))
+ (x (sub lo x))
+ (hi (if (> x lo) (+ hi -1) hi))
+ (y (<< y 40))
+ (lo (add y x))
+ (hi (if (< lo y) (+ hi 1) hi)))
+ (+ (* hi (expt 2 64)) lo)))
+
+
+; -------------------------------------------------------------------------
+; The compiler friendly versions are equal to the simple versions
+; -------------------------------------------------------------------------
+
+(defthm mod-reduce-aux1
+ (implies (and (<= 0 a) (natp a) (natp m)
+ (< (- m) b) (<= b 0)
+ (integerp b)
+ (< (mod (+ b a) m)
+ (mod a m)))
+ (equal (mod (+ b a) m)
+ (+ b (mod a m))))
+ :hints (("Subgoal 2" :use ((:instance modaux-1b
+ (x (+ a b)))))))
+
+(defthm mod-reduce-aux2
+ (implies (and (<= 0 a) (natp a) (natp m)
+ (< b m) (natp b)
+ (< (mod (+ b a) m)
+ (mod a m)))
+ (equal (+ m (mod (+ b a) m))
+ (+ b (mod a m)))))
+
+
+(defthm mod-reduce-aux3
+ (implies (and (< 0 a) (natp a) (natp m)
+ (< (- m) b) (< b 0)
+ (integerp b)
+ (<= (mod a m)
+ (mod (+ b a) m)))
+ (equal (+ (- m) (mod (+ b a) m))
+ (+ b (mod a m))))
+ :hints (("Subgoal 1.2'" :use ((:instance modaux-1b
+ (x b))))
+ ("Subgoal 1''" :use ((:instance modaux-2d
+ (x I))))))
+
+
+(defthm mod-reduce-aux4
+ (implies (and (< 0 a) (natp a) (natp m)
+ (< b m) (natp b)
+ (<= (mod a m)
+ (mod (+ b a) m)))
+ (equal (mod (+ b a) m)
+ (+ b (mod a m)))))
+
+
+(defthm mod-reduce-p1==simple-mod-reduce-p1
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (natp hi) (natp lo))
+ (equal (mod-reduce-p1 hi lo)
+ (simple-mod-reduce-p1 hi lo)))
+ :hints (("Goal" :in-theory (disable expt)
+ :cases ((< 0 hi)))
+ ("Subgoal 1.2.2'" :use ((:instance mod-reduce-aux1
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 32) hi)))))
+ ("Subgoal 1.2.1'" :use ((:instance mod-reduce-aux3
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 32) hi)))))
+ ("Subgoal 1.1.2'" :use ((:instance mod-reduce-aux2
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 32) hi)))))
+ ("Subgoal 1.1.1'" :use ((:instance mod-reduce-aux4
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 32) hi)))))))
+
+
+(defthm mod-reduce-p2==simple-mod-reduce-p2
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (natp hi) (natp lo))
+ (equal (mod-reduce-p2 hi lo)
+ (simple-mod-reduce-p2 hi lo)))
+ :hints (("Goal" :cases ((< 0 hi)))
+ ("Subgoal 1.2.2'" :use ((:instance mod-reduce-aux1
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 34) hi)))))
+ ("Subgoal 1.2.1'" :use ((:instance mod-reduce-aux3
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 34) hi)))))
+ ("Subgoal 1.1.2'" :use ((:instance mod-reduce-aux2
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 34) hi)))))
+ ("Subgoal 1.1.1'" :use ((:instance mod-reduce-aux4
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 34) hi)))))))
+
+
+(defthm mod-reduce-p3==simple-mod-reduce-p3
+ (implies (and (< hi (expt 2 64))
+ (< lo (expt 2 64))
+ (natp hi) (natp lo))
+ (equal (mod-reduce-p3 hi lo)
+ (simple-mod-reduce-p3 hi lo)))
+ :hints (("Goal" :cases ((< 0 hi)))
+ ("Subgoal 1.2.2'" :use ((:instance mod-reduce-aux1
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 40) hi)))))
+ ("Subgoal 1.2.1'" :use ((:instance mod-reduce-aux3
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 40) hi)))))
+ ("Subgoal 1.1.2'" :use ((:instance mod-reduce-aux2
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 40) hi)))))
+ ("Subgoal 1.1.1'" :use ((:instance mod-reduce-aux4
+ (m (expt 2 64))
+ (b (+ (- HI) LO))
+ (a (* (expt 2 40) hi)))))))
+
+
+
diff --git a/Modules/_decimal/libmpdec/memory.c b/Modules/_decimal/libmpdec/memory.c
new file mode 100644
index 0000000000..bf6350f904
--- /dev/null
+++ b/Modules/_decimal/libmpdec/memory.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "typearith.h"
+#include "memory.h"
+
+
+/* Guaranteed minimum allocation for a coefficient. May be changed once
+ at program start using mpd_setminalloc(). */
+mpd_ssize_t MPD_MINALLOC = MPD_MINALLOC_MIN;
+
+/* Custom allocation and free functions */
+void *(* mpd_mallocfunc)(size_t size) = malloc;
+void *(* mpd_reallocfunc)(void *ptr, size_t size) = realloc;
+void *(* mpd_callocfunc)(size_t nmemb, size_t size) = calloc;
+void (* mpd_free)(void *ptr) = free;
+
+
+/* emulate calloc if it is not available */
+void *
+mpd_callocfunc_em(size_t nmemb, size_t size)
+{
+ void *ptr;
+ size_t req;
+ mpd_size_t overflow;
+
+#if MPD_SIZE_MAX < SIZE_MAX
+ /* full_coverage test only */
+ if (nmemb > MPD_SIZE_MAX || size > MPD_SIZE_MAX) {
+ return NULL;
+ }
+#endif
+
+ req = mul_size_t_overflow((mpd_size_t)nmemb, (mpd_size_t)size,
+ &overflow);
+ if (overflow) {
+ return NULL;
+ }
+
+ ptr = mpd_mallocfunc(req);
+ if (ptr == NULL) {
+ return NULL;
+ }
+ /* used on uint32_t or uint64_t */
+ memset(ptr, 0, req);
+
+ return ptr;
+}
+
+
+/* malloc with overflow checking */
+void *
+mpd_alloc(mpd_size_t nmemb, mpd_size_t size)
+{
+ mpd_size_t req, overflow;
+
+ req = mul_size_t_overflow(nmemb, size, &overflow);
+ if (overflow) {
+ return NULL;
+ }
+
+ return mpd_mallocfunc(req);
+}
+
+/* calloc with overflow checking */
+void *
+mpd_calloc(mpd_size_t nmemb, mpd_size_t size)
+{
+ mpd_size_t overflow;
+
+ (void)mul_size_t_overflow(nmemb, size, &overflow);
+ if (overflow) {
+ return NULL;
+ }
+
+ return mpd_callocfunc(nmemb, size);
+}
+
+/* realloc with overflow checking */
+void *
+mpd_realloc(void *ptr, mpd_size_t nmemb, mpd_size_t size, uint8_t *err)
+{
+ void *new;
+ mpd_size_t req, overflow;
+
+ req = mul_size_t_overflow(nmemb, size, &overflow);
+ if (overflow) {
+ *err = 1;
+ return ptr;
+ }
+
+ new = mpd_reallocfunc(ptr, req);
+ if (new == NULL) {
+ *err = 1;
+ return ptr;
+ }
+
+ return new;
+}
+
+/* struct hack malloc with overflow checking */
+void *
+mpd_sh_alloc(mpd_size_t struct_size, mpd_size_t nmemb, mpd_size_t size)
+{
+ mpd_size_t req, overflow;
+
+ req = mul_size_t_overflow(nmemb, size, &overflow);
+ if (overflow) {
+ return NULL;
+ }
+
+ req = add_size_t_overflow(req, struct_size, &overflow);
+ if (overflow) {
+ return NULL;
+ }
+
+ return mpd_mallocfunc(req);
+}
+
+
+/* Allocate a new decimal with a coefficient of length 'nwords'. In case
+ of an error the return value is NULL. */
+mpd_t *
+mpd_qnew_size(mpd_ssize_t nwords)
+{
+ mpd_t *result;
+
+ nwords = (nwords < MPD_MINALLOC) ? MPD_MINALLOC : nwords;
+
+ result = mpd_alloc(1, sizeof *result);
+ if (result == NULL) {
+ return NULL;
+ }
+
+ result->data = mpd_alloc(nwords, sizeof *result->data);
+ if (result->data == NULL) {
+ mpd_free(result);
+ return NULL;
+ }
+
+ result->flags = 0;
+ result->exp = 0;
+ result->digits = 0;
+ result->len = 0;
+ result->alloc = nwords;
+
+ return result;
+}
+
+/* Allocate a new decimal with a coefficient of length MPD_MINALLOC.
+ In case of an error the return value is NULL. */
+mpd_t *
+mpd_qnew(void)
+{
+ return mpd_qnew_size(MPD_MINALLOC);
+}
+
+/* Allocate new decimal. Caller can check for NULL or MPD_Malloc_error.
+ Raises on error. */
+mpd_t *
+mpd_new(mpd_context_t *ctx)
+{
+ mpd_t *result;
+
+ result = mpd_qnew();
+ if (result == NULL) {
+ mpd_addstatus_raise(ctx, MPD_Malloc_error);
+ }
+ return result;
+}
+
+/*
+ * Input: 'result' is a static mpd_t with a static coefficient.
+ * Assumption: 'nwords' >= result->alloc.
+ *
+ * Resize the static coefficient to a larger dynamic one and copy the
+ * existing data. If successful, the value of 'result' is unchanged.
+ * Otherwise, set 'result' to NaN and update 'status' with MPD_Malloc_error.
+ */
+int
+mpd_switch_to_dyn(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
+{
+ mpd_uint_t *p = result->data;
+
+ assert(nwords >= result->alloc);
+
+ result->data = mpd_alloc(nwords, sizeof *result->data);
+ if (result->data == NULL) {
+ result->data = p;
+ mpd_set_qnan(result);
+ mpd_set_positive(result);
+ result->exp = result->digits = result->len = 0;
+ *status |= MPD_Malloc_error;
+ return 0;
+ }
+
+ memcpy(result->data, p, result->alloc * (sizeof *result->data));
+ result->alloc = nwords;
+ mpd_set_dynamic_data(result);
+ return 1;
+}
+
+/*
+ * Input: 'result' is a static mpd_t with a static coefficient.
+ *
+ * Convert the coefficient to a dynamic one that is initialized to zero. If
+ * malloc fails, set 'result' to NaN and update 'status' with MPD_Malloc_error.
+ */
+int
+mpd_switch_to_dyn_zero(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
+{
+ mpd_uint_t *p = result->data;
+
+ result->data = mpd_calloc(nwords, sizeof *result->data);
+ if (result->data == NULL) {
+ result->data = p;
+ mpd_set_qnan(result);
+ mpd_set_positive(result);
+ result->exp = result->digits = result->len = 0;
+ *status |= MPD_Malloc_error;
+ return 0;
+ }
+
+ result->alloc = nwords;
+ mpd_set_dynamic_data(result);
+
+ return 1;
+}
+
+/*
+ * Input: 'result' is a static or a dynamic mpd_t with a dynamic coefficient.
+ * Resize the coefficient to length 'nwords':
+ * Case nwords > result->alloc:
+ * If realloc is successful:
+ * 'result' has a larger coefficient but the same value. Return 1.
+ * Otherwise:
+ * Set 'result' to NaN, update status with MPD_Malloc_error and return 0.
+ * Case nwords < result->alloc:
+ * If realloc is successful:
+ * 'result' has a smaller coefficient. result->len is undefined. Return 1.
+ * Otherwise (unlikely):
+ * 'result' is unchanged. Reuse the now oversized coefficient. Return 1.
+ */
+int
+mpd_realloc_dyn(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
+{
+ uint8_t err = 0;
+
+ result->data = mpd_realloc(result->data, nwords, sizeof *result->data, &err);
+ if (!err) {
+ result->alloc = nwords;
+ }
+ else if (nwords > result->alloc) {
+ mpd_set_qnan(result);
+ mpd_set_positive(result);
+ result->exp = result->digits = result->len = 0;
+ *status |= MPD_Malloc_error;
+ return 0;
+ }
+
+ return 1;
+}
+
+
diff --git a/Modules/_decimal/libmpdec/memory.h b/Modules/_decimal/libmpdec/memory.h
new file mode 100644
index 0000000000..b3a4a56e96
--- /dev/null
+++ b/Modules/_decimal/libmpdec/memory.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef MEMORY_H
+#define MEMORY_H
+
+
+#include "mpdecimal.h"
+
+
+int mpd_switch_to_dyn(mpd_t *result, mpd_ssize_t size, uint32_t *status);
+int mpd_switch_to_dyn_zero(mpd_t *result, mpd_ssize_t size, uint32_t *status);
+int mpd_realloc_dyn(mpd_t *result, mpd_ssize_t size, uint32_t *status);
+
+
+#endif
+
+
+
diff --git a/Modules/_decimal/libmpdec/mpdecimal.c b/Modules/_decimal/libmpdec/mpdecimal.c
new file mode 100644
index 0000000000..89be5355aa
--- /dev/null
+++ b/Modules/_decimal/libmpdec/mpdecimal.c
@@ -0,0 +1,7623 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <math.h>
+#include "basearith.h"
+#include "bits.h"
+#include "convolute.h"
+#include "crt.h"
+#include "errno.h"
+#include "memory.h"
+#include "typearith.h"
+#include "umodarith.h"
+
+#ifdef PPRO
+ #if defined(_MSC_VER)
+ #include <float.h>
+ #pragma fenv_access(on)
+ #elif !defined(__OpenBSD__) && !defined(__NetBSD__)
+ /* C99 */
+ #include <fenv.h>
+ #pragma STDC FENV_ACCESS ON
+ #endif
+#endif
+
+#if defined(__x86_64__) && defined(__GLIBC__) && !defined(__INTEL_COMPILER)
+ #define USE_80BIT_LONG_DOUBLE
+#endif
+
+#if defined(_MSC_VER)
+ #define ALWAYS_INLINE __forceinline
+#elif defined(LEGACY_COMPILER)
+ #define ALWAYS_INLINE
+ #undef inline
+ #define inline
+#else
+ #ifdef TEST_COVERAGE
+ #define ALWAYS_INLINE
+ #else
+ #define ALWAYS_INLINE inline __attribute__ ((always_inline))
+ #endif
+#endif
+
+
+#define MPD_NEWTONDIV_CUTOFF 1024L
+
+#define MPD_NEW_STATIC(name, flags, exp, digits, len) \
+ mpd_uint_t name##_data[MPD_MINALLOC_MAX]; \
+ mpd_t name = {flags|MPD_STATIC|MPD_STATIC_DATA, exp, digits, \
+ len, MPD_MINALLOC_MAX, name##_data}
+
+#define MPD_NEW_CONST(name, flags, exp, digits, len, alloc, initval) \
+ mpd_uint_t name##_data[alloc] = {initval}; \
+ mpd_t name = {flags|MPD_STATIC|MPD_CONST_DATA, exp, digits, \
+ len, alloc, name##_data}
+
+#define MPD_NEW_SHARED(name, a) \
+ mpd_t name = {(a->flags&~MPD_DATAFLAGS)|MPD_STATIC|MPD_SHARED_DATA, \
+ a->exp, a->digits, a->len, a->alloc, a->data}
+
+
+static mpd_uint_t data_one[1] = {1};
+static mpd_uint_t data_zero[1] = {0};
+static const mpd_t one = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_one};
+static const mpd_t minus_one = {MPD_NEG|MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1,
+ data_one};
+static const mpd_t zero = {MPD_STATIC|MPD_CONST_DATA, 0, 1, 1, 1, data_zero};
+
+static inline void _mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx,
+ uint32_t *status);
+static void _settriple(mpd_t *result, uint8_t sign, mpd_uint_t a,
+ mpd_ssize_t exp);
+static inline mpd_ssize_t _mpd_real_size(mpd_uint_t *data, mpd_ssize_t size);
+
+static void _mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status);
+static inline void _mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status);
+static void _mpd_qbarrett_divmod(mpd_t *q, mpd_t *r, const mpd_t *a,
+ const mpd_t *b, uint32_t *status);
+static inline void _mpd_qpow_uint(mpd_t *result, mpd_t *base, mpd_uint_t exp,
+ uint8_t resultsign, const mpd_context_t *ctx, uint32_t *status);
+
+mpd_uint_t mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n);
+
+
+/******************************************************************************/
+/* Performance critical inline functions */
+/******************************************************************************/
+
+#ifdef CONFIG_64
+/* Digits in a word, primarily useful for the most significant word. */
+ALWAYS_INLINE int
+mpd_word_digits(mpd_uint_t word)
+{
+ if (word < mpd_pow10[9]) {
+ if (word < mpd_pow10[4]) {
+ if (word < mpd_pow10[2]) {
+ return (word < mpd_pow10[1]) ? 1 : 2;
+ }
+ return (word < mpd_pow10[3]) ? 3 : 4;
+ }
+ if (word < mpd_pow10[6]) {
+ return (word < mpd_pow10[5]) ? 5 : 6;
+ }
+ if (word < mpd_pow10[8]) {
+ return (word < mpd_pow10[7]) ? 7 : 8;
+ }
+ return 9;
+ }
+ if (word < mpd_pow10[14]) {
+ if (word < mpd_pow10[11]) {
+ return (word < mpd_pow10[10]) ? 10 : 11;
+ }
+ if (word < mpd_pow10[13]) {
+ return (word < mpd_pow10[12]) ? 12 : 13;
+ }
+ return 14;
+ }
+ if (word < mpd_pow10[18]) {
+ if (word < mpd_pow10[16]) {
+ return (word < mpd_pow10[15]) ? 15 : 16;
+ }
+ return (word < mpd_pow10[17]) ? 17 : 18;
+ }
+
+ return (word < mpd_pow10[19]) ? 19 : 20;
+}
+#else
+ALWAYS_INLINE int
+mpd_word_digits(mpd_uint_t word)
+{
+ if (word < mpd_pow10[4]) {
+ if (word < mpd_pow10[2]) {
+ return (word < mpd_pow10[1]) ? 1 : 2;
+ }
+ return (word < mpd_pow10[3]) ? 3 : 4;
+ }
+ if (word < mpd_pow10[6]) {
+ return (word < mpd_pow10[5]) ? 5 : 6;
+ }
+ if (word < mpd_pow10[8]) {
+ return (word < mpd_pow10[7]) ? 7 : 8;
+ }
+
+ return (word < mpd_pow10[9]) ? 9 : 10;
+}
+#endif
+
+
+/* Adjusted exponent */
+ALWAYS_INLINE mpd_ssize_t
+mpd_adjexp(const mpd_t *dec)
+{
+ return (dec->exp + dec->digits) - 1;
+}
+
+/* Etiny */
+ALWAYS_INLINE mpd_ssize_t
+mpd_etiny(const mpd_context_t *ctx)
+{
+ return ctx->emin - (ctx->prec - 1);
+}
+
+/* Etop: used for folding down in IEEE clamping */
+ALWAYS_INLINE mpd_ssize_t
+mpd_etop(const mpd_context_t *ctx)
+{
+ return ctx->emax - (ctx->prec - 1);
+}
+
+/* Most significant word */
+ALWAYS_INLINE mpd_uint_t
+mpd_msword(const mpd_t *dec)
+{
+ assert(dec->len > 0);
+ return dec->data[dec->len-1];
+}
+
+/* Most significant digit of a word */
+inline mpd_uint_t
+mpd_msd(mpd_uint_t word)
+{
+ int n;
+
+ n = mpd_word_digits(word);
+ return word / mpd_pow10[n-1];
+}
+
+/* Least significant digit of a word */
+ALWAYS_INLINE mpd_uint_t
+mpd_lsd(mpd_uint_t word)
+{
+ return word % 10;
+}
+
+/* Coefficient size needed to store 'digits' */
+ALWAYS_INLINE mpd_ssize_t
+mpd_digits_to_size(mpd_ssize_t digits)
+{
+ mpd_ssize_t q, r;
+
+ _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
+ return (r == 0) ? q : q+1;
+}
+
+/* Number of digits in the exponent. Not defined for MPD_SSIZE_MIN. */
+inline int
+mpd_exp_digits(mpd_ssize_t exp)
+{
+ exp = (exp < 0) ? -exp : exp;
+ return mpd_word_digits(exp);
+}
+
+/* Canonical */
+ALWAYS_INLINE int
+mpd_iscanonical(const mpd_t *dec UNUSED)
+{
+ return 1;
+}
+
+/* Finite */
+ALWAYS_INLINE int
+mpd_isfinite(const mpd_t *dec)
+{
+ return !(dec->flags & MPD_SPECIAL);
+}
+
+/* Infinite */
+ALWAYS_INLINE int
+mpd_isinfinite(const mpd_t *dec)
+{
+ return dec->flags & MPD_INF;
+}
+
+/* NaN */
+ALWAYS_INLINE int
+mpd_isnan(const mpd_t *dec)
+{
+ return dec->flags & (MPD_NAN|MPD_SNAN);
+}
+
+/* Negative */
+ALWAYS_INLINE int
+mpd_isnegative(const mpd_t *dec)
+{
+ return dec->flags & MPD_NEG;
+}
+
+/* Positive */
+ALWAYS_INLINE int
+mpd_ispositive(const mpd_t *dec)
+{
+ return !(dec->flags & MPD_NEG);
+}
+
+/* qNaN */
+ALWAYS_INLINE int
+mpd_isqnan(const mpd_t *dec)
+{
+ return dec->flags & MPD_NAN;
+}
+
+/* Signed */
+ALWAYS_INLINE int
+mpd_issigned(const mpd_t *dec)
+{
+ return dec->flags & MPD_NEG;
+}
+
+/* sNaN */
+ALWAYS_INLINE int
+mpd_issnan(const mpd_t *dec)
+{
+ return dec->flags & MPD_SNAN;
+}
+
+/* Special */
+ALWAYS_INLINE int
+mpd_isspecial(const mpd_t *dec)
+{
+ return dec->flags & MPD_SPECIAL;
+}
+
+/* Zero */
+ALWAYS_INLINE int
+mpd_iszero(const mpd_t *dec)
+{
+ return !mpd_isspecial(dec) && mpd_msword(dec) == 0;
+}
+
+/* Test for zero when specials have been ruled out already */
+ALWAYS_INLINE int
+mpd_iszerocoeff(const mpd_t *dec)
+{
+ return mpd_msword(dec) == 0;
+}
+
+/* Normal */
+inline int
+mpd_isnormal(const mpd_t *dec, const mpd_context_t *ctx)
+{
+ if (mpd_isspecial(dec)) return 0;
+ if (mpd_iszerocoeff(dec)) return 0;
+
+ return mpd_adjexp(dec) >= ctx->emin;
+}
+
+/* Subnormal */
+inline int
+mpd_issubnormal(const mpd_t *dec, const mpd_context_t *ctx)
+{
+ if (mpd_isspecial(dec)) return 0;
+ if (mpd_iszerocoeff(dec)) return 0;
+
+ return mpd_adjexp(dec) < ctx->emin;
+}
+
+/* Odd word */
+ALWAYS_INLINE int
+mpd_isoddword(mpd_uint_t word)
+{
+ return word & 1;
+}
+
+/* Odd coefficient */
+ALWAYS_INLINE int
+mpd_isoddcoeff(const mpd_t *dec)
+{
+ return mpd_isoddword(dec->data[0]);
+}
+
+/* 0 if dec is positive, 1 if dec is negative */
+ALWAYS_INLINE uint8_t
+mpd_sign(const mpd_t *dec)
+{
+ return dec->flags & MPD_NEG;
+}
+
+/* 1 if dec is positive, -1 if dec is negative */
+ALWAYS_INLINE int
+mpd_arith_sign(const mpd_t *dec)
+{
+ return 1 - 2 * mpd_isnegative(dec);
+}
+
+/* Radix */
+ALWAYS_INLINE long
+mpd_radix(void)
+{
+ return 10;
+}
+
+/* Dynamic decimal */
+ALWAYS_INLINE int
+mpd_isdynamic(mpd_t *dec)
+{
+ return !(dec->flags & MPD_STATIC);
+}
+
+/* Static decimal */
+ALWAYS_INLINE int
+mpd_isstatic(mpd_t *dec)
+{
+ return dec->flags & MPD_STATIC;
+}
+
+/* Data of decimal is dynamic */
+ALWAYS_INLINE int
+mpd_isdynamic_data(mpd_t *dec)
+{
+ return !(dec->flags & MPD_DATAFLAGS);
+}
+
+/* Data of decimal is static */
+ALWAYS_INLINE int
+mpd_isstatic_data(mpd_t *dec)
+{
+ return dec->flags & MPD_STATIC_DATA;
+}
+
+/* Data of decimal is shared */
+ALWAYS_INLINE int
+mpd_isshared_data(mpd_t *dec)
+{
+ return dec->flags & MPD_SHARED_DATA;
+}
+
+/* Data of decimal is const */
+ALWAYS_INLINE int
+mpd_isconst_data(mpd_t *dec)
+{
+ return dec->flags & MPD_CONST_DATA;
+}
+
+
+/******************************************************************************/
+/* Inline memory handling */
+/******************************************************************************/
+
+/* Fill destination with zeros */
+ALWAYS_INLINE void
+mpd_uint_zero(mpd_uint_t *dest, mpd_size_t len)
+{
+ mpd_size_t i;
+
+ for (i = 0; i < len; i++) {
+ dest[i] = 0;
+ }
+}
+
+/* Free a decimal */
+ALWAYS_INLINE void
+mpd_del(mpd_t *dec)
+{
+ if (mpd_isdynamic_data(dec)) {
+ mpd_free(dec->data);
+ }
+ if (mpd_isdynamic(dec)) {
+ mpd_free(dec);
+ }
+}
+
+/*
+ * Resize the coefficient. Existing data up to 'nwords' is left untouched.
+ * Return 1 on success, 0 otherwise.
+ *
+ * Input invariant: MPD_MINALLOC <= result->alloc.
+ *
+ * Case nwords == result->alloc:
+ * 'result' is unchanged. Return 1.
+ *
+ * Case nwords > result->alloc:
+ * Case realloc success:
+ * The value of 'result' does not change. Return 1.
+ * Case realloc failure:
+ * 'result' is NaN, status is updated with MPD_Malloc_error. Return 0.
+ *
+ * Case nwords < result->alloc:
+ * Case is_static_data or realloc failure [1]:
+ * 'result' is unchanged. Return 1.
+ * Case realloc success:
+ * The value of result is undefined (expected). Return 1.
+ *
+ *
+ * [1] In that case the old (now oversized) area is still valid.
+ */
+ALWAYS_INLINE int
+mpd_qresize(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
+{
+ assert(!mpd_isconst_data(result)); /* illegal operation for a const */
+ assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
+ assert(MPD_MINALLOC <= result->alloc);
+
+ nwords = (nwords <= MPD_MINALLOC) ? MPD_MINALLOC : nwords;
+ if (nwords == result->alloc) {
+ return 1;
+ }
+ if (mpd_isstatic_data(result)) {
+ if (nwords > result->alloc) {
+ return mpd_switch_to_dyn(result, nwords, status);
+ }
+ return 1;
+ }
+
+ return mpd_realloc_dyn(result, nwords, status);
+}
+
+/* Same as mpd_qresize, but the complete coefficient (including the old
+ * memory area!) is initialized to zero. */
+ALWAYS_INLINE int
+mpd_qresize_zero(mpd_t *result, mpd_ssize_t nwords, uint32_t *status)
+{
+ assert(!mpd_isconst_data(result)); /* illegal operation for a const */
+ assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
+ assert(MPD_MINALLOC <= result->alloc);
+
+ nwords = (nwords <= MPD_MINALLOC) ? MPD_MINALLOC : nwords;
+ if (nwords != result->alloc) {
+ if (mpd_isstatic_data(result)) {
+ if (nwords > result->alloc) {
+ return mpd_switch_to_dyn_zero(result, nwords, status);
+ }
+ }
+ else if (!mpd_realloc_dyn(result, nwords, status)) {
+ return 0;
+ }
+ }
+
+ mpd_uint_zero(result->data, nwords);
+ return 1;
+}
+
+/*
+ * Reduce memory size for the coefficient to MPD_MINALLOC. In theory,
+ * realloc may fail even when reducing the memory size. But in that case
+ * the old memory area is always big enough, so checking for MPD_Malloc_error
+ * is not imperative.
+ */
+ALWAYS_INLINE void
+mpd_minalloc(mpd_t *result)
+{
+ assert(!mpd_isconst_data(result)); /* illegal operation for a const */
+ assert(!mpd_isshared_data(result)); /* illegal operation for a shared */
+
+ if (!mpd_isstatic_data(result) && result->alloc > MPD_MINALLOC) {
+ uint8_t err = 0;
+ result->data = mpd_realloc(result->data, MPD_MINALLOC,
+ sizeof *result->data, &err);
+ if (!err) {
+ result->alloc = MPD_MINALLOC;
+ }
+ }
+}
+
+int
+mpd_resize(mpd_t *result, mpd_ssize_t nwords, mpd_context_t *ctx)
+{
+ uint32_t status = 0;
+ if (!mpd_qresize(result, nwords, &status)) {
+ mpd_addstatus_raise(ctx, status);
+ return 0;
+ }
+ return 1;
+}
+
+int
+mpd_resize_zero(mpd_t *result, mpd_ssize_t nwords, mpd_context_t *ctx)
+{
+ uint32_t status = 0;
+ if (!mpd_qresize_zero(result, nwords, &status)) {
+ mpd_addstatus_raise(ctx, status);
+ return 0;
+ }
+ return 1;
+}
+
+
+/******************************************************************************/
+/* Set attributes of a decimal */
+/******************************************************************************/
+
+/* Set digits. Assumption: result->len is initialized and > 0. */
+inline void
+mpd_setdigits(mpd_t *result)
+{
+ mpd_ssize_t wdigits = mpd_word_digits(mpd_msword(result));
+ result->digits = wdigits + (result->len-1) * MPD_RDIGITS;
+}
+
+/* Set sign */
+ALWAYS_INLINE void
+mpd_set_sign(mpd_t *result, uint8_t sign)
+{
+ result->flags &= ~MPD_NEG;
+ result->flags |= sign;
+}
+
+/* Copy sign from another decimal */
+ALWAYS_INLINE void
+mpd_signcpy(mpd_t *result, mpd_t *a)
+{
+ uint8_t sign = a->flags&MPD_NEG;
+
+ result->flags &= ~MPD_NEG;
+ result->flags |= sign;
+}
+
+/* Set infinity */
+ALWAYS_INLINE void
+mpd_set_infinity(mpd_t *result)
+{
+ result->flags &= ~MPD_SPECIAL;
+ result->flags |= MPD_INF;
+}
+
+/* Set qNaN */
+ALWAYS_INLINE void
+mpd_set_qnan(mpd_t *result)
+{
+ result->flags &= ~MPD_SPECIAL;
+ result->flags |= MPD_NAN;
+}
+
+/* Set sNaN */
+ALWAYS_INLINE void
+mpd_set_snan(mpd_t *result)
+{
+ result->flags &= ~MPD_SPECIAL;
+ result->flags |= MPD_SNAN;
+}
+
+/* Set to negative */
+ALWAYS_INLINE void
+mpd_set_negative(mpd_t *result)
+{
+ result->flags |= MPD_NEG;
+}
+
+/* Set to positive */
+ALWAYS_INLINE void
+mpd_set_positive(mpd_t *result)
+{
+ result->flags &= ~MPD_NEG;
+}
+
+/* Set to dynamic */
+ALWAYS_INLINE void
+mpd_set_dynamic(mpd_t *result)
+{
+ result->flags &= ~MPD_STATIC;
+}
+
+/* Set to static */
+ALWAYS_INLINE void
+mpd_set_static(mpd_t *result)
+{
+ result->flags |= MPD_STATIC;
+}
+
+/* Set data to dynamic */
+ALWAYS_INLINE void
+mpd_set_dynamic_data(mpd_t *result)
+{
+ result->flags &= ~MPD_DATAFLAGS;
+}
+
+/* Set data to static */
+ALWAYS_INLINE void
+mpd_set_static_data(mpd_t *result)
+{
+ result->flags &= ~MPD_DATAFLAGS;
+ result->flags |= MPD_STATIC_DATA;
+}
+
+/* Set data to shared */
+ALWAYS_INLINE void
+mpd_set_shared_data(mpd_t *result)
+{
+ result->flags &= ~MPD_DATAFLAGS;
+ result->flags |= MPD_SHARED_DATA;
+}
+
+/* Set data to const */
+ALWAYS_INLINE void
+mpd_set_const_data(mpd_t *result)
+{
+ result->flags &= ~MPD_DATAFLAGS;
+ result->flags |= MPD_CONST_DATA;
+}
+
+/* Clear flags, preserving memory attributes. */
+ALWAYS_INLINE void
+mpd_clear_flags(mpd_t *result)
+{
+ result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
+}
+
+/* Set flags, preserving memory attributes. */
+ALWAYS_INLINE void
+mpd_set_flags(mpd_t *result, uint8_t flags)
+{
+ result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
+ result->flags |= flags;
+}
+
+/* Copy flags, preserving memory attributes of result. */
+ALWAYS_INLINE void
+mpd_copy_flags(mpd_t *result, const mpd_t *a)
+{
+ uint8_t aflags = a->flags;
+ result->flags &= (MPD_STATIC|MPD_DATAFLAGS);
+ result->flags |= (aflags & ~(MPD_STATIC|MPD_DATAFLAGS));
+}
+
+/* Initialize a workcontext from ctx. Set traps, flags and newtrap to 0. */
+static inline void
+mpd_workcontext(mpd_context_t *workctx, const mpd_context_t *ctx)
+{
+ workctx->prec = ctx->prec;
+ workctx->emax = ctx->emax;
+ workctx->emin = ctx->emin;
+ workctx->round = ctx->round;
+ workctx->traps = 0;
+ workctx->status = 0;
+ workctx->newtrap = 0;
+ workctx->clamp = ctx->clamp;
+ workctx->allcr = ctx->allcr;
+}
+
+
+/******************************************************************************/
+/* Getting and setting parts of decimals */
+/******************************************************************************/
+
+/* Flip the sign of a decimal */
+static inline void
+_mpd_negate(mpd_t *dec)
+{
+ dec->flags ^= MPD_NEG;
+}
+
+/* Set coefficient to zero */
+void
+mpd_zerocoeff(mpd_t *result)
+{
+ mpd_minalloc(result);
+ result->digits = 1;
+ result->len = 1;
+ result->data[0] = 0;
+}
+
+/* Set the coefficient to all nines. */
+void
+mpd_qmaxcoeff(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_ssize_t len, r;
+
+ _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
+ len = (r == 0) ? len : len+1;
+
+ if (!mpd_qresize(result, len, status)) {
+ return;
+ }
+
+ result->len = len;
+ result->digits = ctx->prec;
+
+ --len;
+ if (r > 0) {
+ result->data[len--] = mpd_pow10[r]-1;
+ }
+ for (; len >= 0; --len) {
+ result->data[len] = MPD_RADIX-1;
+ }
+}
+
+/*
+ * Cut off the most significant digits so that the rest fits in ctx->prec.
+ * Cannot fail.
+ */
+static void
+_mpd_cap(mpd_t *result, const mpd_context_t *ctx)
+{
+ uint32_t dummy;
+ mpd_ssize_t len, r;
+
+ if (result->len > 0 && result->digits > ctx->prec) {
+ _mpd_idiv_word(&len, &r, ctx->prec, MPD_RDIGITS);
+ len = (r == 0) ? len : len+1;
+
+ if (r != 0) {
+ result->data[len-1] %= mpd_pow10[r];
+ }
+
+ len = _mpd_real_size(result->data, len);
+ /* resize to fewer words cannot fail */
+ mpd_qresize(result, len, &dummy);
+ result->len = len;
+ mpd_setdigits(result);
+ }
+ if (mpd_iszero(result)) {
+ _settriple(result, mpd_sign(result), 0, result->exp);
+ }
+}
+
+/*
+ * Cut off the most significant digits of a NaN payload so that the rest
+ * fits in ctx->prec - ctx->clamp. Cannot fail.
+ */
+static void
+_mpd_fix_nan(mpd_t *result, const mpd_context_t *ctx)
+{
+ uint32_t dummy;
+ mpd_ssize_t prec;
+ mpd_ssize_t len, r;
+
+ prec = ctx->prec - ctx->clamp;
+ if (result->len > 0 && result->digits > prec) {
+ if (prec == 0) {
+ mpd_minalloc(result);
+ result->len = result->digits = 0;
+ }
+ else {
+ _mpd_idiv_word(&len, &r, prec, MPD_RDIGITS);
+ len = (r == 0) ? len : len+1;
+
+ if (r != 0) {
+ result->data[len-1] %= mpd_pow10[r];
+ }
+
+ len = _mpd_real_size(result->data, len);
+ /* resize to fewer words cannot fail */
+ mpd_qresize(result, len, &dummy);
+ result->len = len;
+ mpd_setdigits(result);
+ if (mpd_iszerocoeff(result)) {
+ /* NaN0 is not a valid representation */
+ result->len = result->digits = 0;
+ }
+ }
+ }
+}
+
+/*
+ * Get n most significant digits from a decimal, where 0 < n <= MPD_UINT_DIGITS.
+ * Assumes MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for 32 and 64 bit
+ * machines.
+ *
+ * The result of the operation will be in lo. If the operation is impossible,
+ * hi will be nonzero. This is used to indicate an error.
+ */
+static inline void
+_mpd_get_msdigits(mpd_uint_t *hi, mpd_uint_t *lo, const mpd_t *dec,
+ unsigned int n)
+{
+ mpd_uint_t r, tmp;
+
+ assert(0 < n && n <= MPD_RDIGITS+1);
+
+ _mpd_div_word(&tmp, &r, dec->digits, MPD_RDIGITS);
+ r = (r == 0) ? MPD_RDIGITS : r; /* digits in the most significant word */
+
+ *hi = 0;
+ *lo = dec->data[dec->len-1];
+ if (n <= r) {
+ *lo /= mpd_pow10[r-n];
+ }
+ else if (dec->len > 1) {
+ /* at this point 1 <= r < n <= MPD_RDIGITS+1 */
+ _mpd_mul_words(hi, lo, *lo, mpd_pow10[n-r]);
+ tmp = dec->data[dec->len-2] / mpd_pow10[MPD_RDIGITS-(n-r)];
+ *lo = *lo + tmp;
+ if (*lo < tmp) (*hi)++;
+ }
+}
+
+
+/******************************************************************************/
+/* Gathering information about a decimal */
+/******************************************************************************/
+
+/* The real size of the coefficient without leading zero words. */
+static inline mpd_ssize_t
+_mpd_real_size(mpd_uint_t *data, mpd_ssize_t size)
+{
+ while (size > 1 && data[size-1] == 0) {
+ size--;
+ }
+
+ return size;
+}
+
+/* Return number of trailing zeros. No errors are possible. */
+mpd_ssize_t
+mpd_trail_zeros(const mpd_t *dec)
+{
+ mpd_uint_t word;
+ mpd_ssize_t i, tz = 0;
+
+ for (i=0; i < dec->len; ++i) {
+ if (dec->data[i] != 0) {
+ word = dec->data[i];
+ tz = i * MPD_RDIGITS;
+ while (word % 10 == 0) {
+ word /= 10;
+ tz++;
+ }
+ break;
+ }
+ }
+
+ return tz;
+}
+
+/* Integer: Undefined for specials */
+static int
+_mpd_isint(const mpd_t *dec)
+{
+ mpd_ssize_t tz;
+
+ if (mpd_iszerocoeff(dec)) {
+ return 1;
+ }
+
+ tz = mpd_trail_zeros(dec);
+ return (dec->exp + tz >= 0);
+}
+
+/* Integer */
+int
+mpd_isinteger(const mpd_t *dec)
+{
+ if (mpd_isspecial(dec)) {
+ return 0;
+ }
+ return _mpd_isint(dec);
+}
+
+/* Word is a power of 10 */
+static int
+mpd_word_ispow10(mpd_uint_t word)
+{
+ int n;
+
+ n = mpd_word_digits(word);
+ if (word == mpd_pow10[n-1]) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Coefficient is a power of 10 */
+static int
+mpd_coeff_ispow10(const mpd_t *dec)
+{
+ if (mpd_word_ispow10(mpd_msword(dec))) {
+ if (_mpd_isallzero(dec->data, dec->len-1)) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* All digits of a word are nines */
+static int
+mpd_word_isallnine(mpd_uint_t word)
+{
+ int n;
+
+ n = mpd_word_digits(word);
+ if (word == mpd_pow10[n]-1) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* All digits of the coefficient are nines */
+static int
+mpd_coeff_isallnine(const mpd_t *dec)
+{
+ if (mpd_word_isallnine(mpd_msword(dec))) {
+ if (_mpd_isallnine(dec->data, dec->len-1)) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Odd decimal: Undefined for non-integers! */
+int
+mpd_isodd(const mpd_t *dec)
+{
+ mpd_uint_t q, r;
+ assert(mpd_isinteger(dec));
+ if (mpd_iszerocoeff(dec)) return 0;
+ if (dec->exp < 0) {
+ _mpd_div_word(&q, &r, -dec->exp, MPD_RDIGITS);
+ q = dec->data[q] / mpd_pow10[r];
+ return mpd_isoddword(q);
+ }
+ return dec->exp == 0 && mpd_isoddword(dec->data[0]);
+}
+
+/* Even: Undefined for non-integers! */
+int
+mpd_iseven(const mpd_t *dec)
+{
+ return !mpd_isodd(dec);
+}
+
+/******************************************************************************/
+/* Getting and setting decimals */
+/******************************************************************************/
+
+/* Internal function: Set a static decimal from a triple, no error checking. */
+static void
+_ssettriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
+{
+ mpd_set_flags(result, sign);
+ result->exp = exp;
+ _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
+ result->len = (result->data[1] == 0) ? 1 : 2;
+ mpd_setdigits(result);
+}
+
+/* Internal function: Set a decimal from a triple, no error checking. */
+static void
+_settriple(mpd_t *result, uint8_t sign, mpd_uint_t a, mpd_ssize_t exp)
+{
+ mpd_minalloc(result);
+ mpd_set_flags(result, sign);
+ result->exp = exp;
+ _mpd_div_word(&result->data[1], &result->data[0], a, MPD_RADIX);
+ result->len = (result->data[1] == 0) ? 1 : 2;
+ mpd_setdigits(result);
+}
+
+/* Set a special number from a triple */
+void
+mpd_setspecial(mpd_t *result, uint8_t sign, uint8_t type)
+{
+ mpd_minalloc(result);
+ result->flags &= ~(MPD_NEG|MPD_SPECIAL);
+ result->flags |= (sign|type);
+ result->exp = result->digits = result->len = 0;
+}
+
+/* Set result of NaN with an error status */
+void
+mpd_seterror(mpd_t *result, uint32_t flags, uint32_t *status)
+{
+ mpd_minalloc(result);
+ mpd_set_qnan(result);
+ mpd_set_positive(result);
+ result->exp = result->digits = result->len = 0;
+ *status |= flags;
+}
+
+/* quietly set a static decimal from an mpd_ssize_t */
+void
+mpd_qsset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_uint_t u;
+ uint8_t sign = MPD_POS;
+
+ if (a < 0) {
+ if (a == MPD_SSIZE_MIN) {
+ u = (mpd_uint_t)MPD_SSIZE_MAX +
+ (-(MPD_SSIZE_MIN+MPD_SSIZE_MAX));
+ }
+ else {
+ u = -a;
+ }
+ sign = MPD_NEG;
+ }
+ else {
+ u = a;
+ }
+ _ssettriple(result, sign, u, 0);
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* quietly set a static decimal from an mpd_uint_t */
+void
+mpd_qsset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ _ssettriple(result, MPD_POS, a, 0);
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* quietly set a static decimal from an int32_t */
+void
+mpd_qsset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_qsset_ssize(result, a, ctx, status);
+}
+
+/* quietly set a static decimal from a uint32_t */
+void
+mpd_qsset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_qsset_uint(result, a, ctx, status);
+}
+
+#ifdef CONFIG_64
+/* quietly set a static decimal from an int64_t */
+void
+mpd_qsset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_qsset_ssize(result, a, ctx, status);
+}
+
+/* quietly set a static decimal from a uint64_t */
+void
+mpd_qsset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_qsset_uint(result, a, ctx, status);
+}
+#endif
+
+/* quietly set a decimal from an mpd_ssize_t */
+void
+mpd_qset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_minalloc(result);
+ mpd_qsset_ssize(result, a, ctx, status);
+}
+
+/* quietly set a decimal from an mpd_uint_t */
+void
+mpd_qset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ _settriple(result, MPD_POS, a, 0);
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* quietly set a decimal from an int32_t */
+void
+mpd_qset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_qset_ssize(result, a, ctx, status);
+}
+
+/* quietly set a decimal from a uint32_t */
+void
+mpd_qset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_qset_uint(result, a, ctx, status);
+}
+
+#if defined(CONFIG_32) && !defined(LEGACY_COMPILER)
+/* set a decimal from a uint64_t */
+static void
+_c32setu64(mpd_t *result, uint64_t u, uint8_t sign, uint32_t *status)
+{
+ mpd_uint_t w[3];
+ uint64_t q;
+ int i, len;
+
+ len = 0;
+ do {
+ q = u / MPD_RADIX;
+ w[len] = (mpd_uint_t)(u - q * MPD_RADIX);
+ u = q; len++;
+ } while (u != 0);
+
+ if (!mpd_qresize(result, len, status)) {
+ return;
+ }
+ for (i = 0; i < len; i++) {
+ result->data[i] = w[i];
+ }
+
+ mpd_set_sign(result, sign);
+ result->exp = 0;
+ result->len = len;
+ mpd_setdigits(result);
+}
+
+static void
+_c32_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ _c32setu64(result, a, MPD_POS, status);
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* set a decimal from an int64_t */
+static void
+_c32_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ uint64_t u;
+ uint8_t sign = MPD_POS;
+
+ if (a < 0) {
+ if (a == INT64_MIN) {
+ u = (uint64_t)INT64_MAX + (-(INT64_MIN+INT64_MAX));
+ }
+ else {
+ u = -a;
+ }
+ sign = MPD_NEG;
+ }
+ else {
+ u = a;
+ }
+ _c32setu64(result, u, sign, status);
+ mpd_qfinalize(result, ctx, status);
+}
+#endif /* CONFIG_32 && !LEGACY_COMPILER */
+
+#ifndef LEGACY_COMPILER
+/* quietly set a decimal from an int64_t */
+void
+mpd_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+#ifdef CONFIG_64
+ mpd_qset_ssize(result, a, ctx, status);
+#else
+ _c32_qset_i64(result, a, ctx, status);
+#endif
+}
+
+/* quietly set a decimal from a uint64_t */
+void
+mpd_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+#ifdef CONFIG_64
+ mpd_qset_uint(result, a, ctx, status);
+#else
+ _c32_qset_u64(result, a, ctx, status);
+#endif
+}
+#endif /* !LEGACY_COMPILER */
+
+
+/*
+ * Quietly get an mpd_uint_t from a decimal. Assumes
+ * MPD_UINT_DIGITS == MPD_RDIGITS+1, which is true for
+ * 32 and 64 bit machines.
+ *
+ * If the operation is impossible, MPD_Invalid_operation is set.
+ */
+static mpd_uint_t
+_mpd_qget_uint(int use_sign, const mpd_t *a, uint32_t *status)
+{
+ mpd_t tmp;
+ mpd_uint_t tmp_data[2];
+ mpd_uint_t lo, hi;
+
+ if (mpd_isspecial(a)) {
+ *status |= MPD_Invalid_operation;
+ return MPD_UINT_MAX;
+ }
+ if (mpd_iszero(a)) {
+ return 0;
+ }
+ if (use_sign && mpd_isnegative(a)) {
+ *status |= MPD_Invalid_operation;
+ return MPD_UINT_MAX;
+ }
+
+ if (a->digits+a->exp > MPD_RDIGITS+1) {
+ *status |= MPD_Invalid_operation;
+ return MPD_UINT_MAX;
+ }
+
+ if (a->exp < 0) {
+ if (!_mpd_isint(a)) {
+ *status |= MPD_Invalid_operation;
+ return MPD_UINT_MAX;
+ }
+ /* At this point a->digits+a->exp <= MPD_RDIGITS+1,
+ * so the shift fits. */
+ tmp.data = tmp_data;
+ tmp.flags = MPD_STATIC|MPD_STATIC_DATA;
+ tmp.alloc = 2;
+ mpd_qsshiftr(&tmp, a, -a->exp);
+ tmp.exp = 0;
+ a = &tmp;
+ }
+
+ _mpd_get_msdigits(&hi, &lo, a, MPD_RDIGITS+1);
+ if (hi) {
+ *status |= MPD_Invalid_operation;
+ return MPD_UINT_MAX;
+ }
+
+ if (a->exp > 0) {
+ _mpd_mul_words(&hi, &lo, lo, mpd_pow10[a->exp]);
+ if (hi) {
+ *status |= MPD_Invalid_operation;
+ return MPD_UINT_MAX;
+ }
+ }
+
+ return lo;
+}
+
+/*
+ * Sets Invalid_operation for:
+ * - specials
+ * - negative numbers (except negative zero)
+ * - non-integers
+ * - overflow
+ */
+mpd_uint_t
+mpd_qget_uint(const mpd_t *a, uint32_t *status)
+{
+ return _mpd_qget_uint(1, a, status);
+}
+
+/* Same as above, but gets the absolute value, i.e. the sign is ignored. */
+mpd_uint_t
+mpd_qabs_uint(const mpd_t *a, uint32_t *status)
+{
+ return _mpd_qget_uint(0, a, status);
+}
+
+/* quietly get an mpd_ssize_t from a decimal */
+mpd_ssize_t
+mpd_qget_ssize(const mpd_t *a, uint32_t *status)
+{
+ mpd_uint_t u;
+ int isneg;
+
+ u = mpd_qabs_uint(a, status);
+ if (*status&MPD_Invalid_operation) {
+ return MPD_SSIZE_MAX;
+ }
+
+ isneg = mpd_isnegative(a);
+ if (u <= MPD_SSIZE_MAX) {
+ return isneg ? -((mpd_ssize_t)u) : (mpd_ssize_t)u;
+ }
+ else if (isneg && u+(MPD_SSIZE_MIN+MPD_SSIZE_MAX) == MPD_SSIZE_MAX) {
+ return MPD_SSIZE_MIN;
+ }
+
+ *status |= MPD_Invalid_operation;
+ return MPD_SSIZE_MAX;
+}
+
+#ifdef CONFIG_64
+/* quietly get a uint64_t from a decimal */
+uint64_t
+mpd_qget_u64(const mpd_t *a, uint32_t *status)
+{
+ return mpd_qget_uint(a, status);
+}
+
+/* quietly get an int64_t from a decimal */
+int64_t
+mpd_qget_i64(const mpd_t *a, uint32_t *status)
+{
+ return mpd_qget_ssize(a, status);
+}
+#else
+/* quietly get a uint32_t from a decimal */
+uint32_t
+mpd_qget_u32(const mpd_t *a, uint32_t *status)
+{
+ return mpd_qget_uint(a, status);
+}
+
+/* quietly get an int32_t from a decimal */
+int32_t
+mpd_qget_i32(const mpd_t *a, uint32_t *status)
+{
+ return mpd_qget_ssize(a, status);
+}
+#endif
+
+
+/******************************************************************************/
+/* Filtering input of functions, finalizing output of functions */
+/******************************************************************************/
+
+/*
+ * Check if the operand is NaN, copy to result and return 1 if this is
+ * the case. Copying can fail since NaNs are allowed to have a payload that
+ * does not fit in MPD_MINALLOC.
+ */
+int
+mpd_qcheck_nan(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ if (mpd_isnan(a)) {
+ *status |= mpd_issnan(a) ? MPD_Invalid_operation : 0;
+ mpd_qcopy(result, a, status);
+ mpd_set_qnan(result);
+ _mpd_fix_nan(result, ctx);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Check if either operand is NaN, copy to result and return 1 if this
+ * is the case. Copying can fail since NaNs are allowed to have a payload
+ * that does not fit in MPD_MINALLOC.
+ */
+int
+mpd_qcheck_nans(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ if ((a->flags|b->flags)&(MPD_NAN|MPD_SNAN)) {
+ const mpd_t *choice = b;
+ if (mpd_issnan(a)) {
+ choice = a;
+ *status |= MPD_Invalid_operation;
+ }
+ else if (mpd_issnan(b)) {
+ *status |= MPD_Invalid_operation;
+ }
+ else if (mpd_isqnan(a)) {
+ choice = a;
+ }
+ mpd_qcopy(result, choice, status);
+ mpd_set_qnan(result);
+ _mpd_fix_nan(result, ctx);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Check if one of the operands is NaN, copy to result and return 1 if this
+ * is the case. Copying can fail since NaNs are allowed to have a payload
+ * that does not fit in MPD_MINALLOC.
+ */
+static int
+mpd_qcheck_3nans(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ if ((a->flags|b->flags|c->flags)&(MPD_NAN|MPD_SNAN)) {
+ const mpd_t *choice = c;
+ if (mpd_issnan(a)) {
+ choice = a;
+ *status |= MPD_Invalid_operation;
+ }
+ else if (mpd_issnan(b)) {
+ choice = b;
+ *status |= MPD_Invalid_operation;
+ }
+ else if (mpd_issnan(c)) {
+ *status |= MPD_Invalid_operation;
+ }
+ else if (mpd_isqnan(a)) {
+ choice = a;
+ }
+ else if (mpd_isqnan(b)) {
+ choice = b;
+ }
+ mpd_qcopy(result, choice, status);
+ mpd_set_qnan(result);
+ _mpd_fix_nan(result, ctx);
+ return 1;
+ }
+ return 0;
+}
+
+/* Check if rounding digit 'rnd' leads to an increment. */
+static inline int
+_mpd_rnd_incr(const mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx)
+{
+ int ld;
+
+ switch (ctx->round) {
+ case MPD_ROUND_DOWN: case MPD_ROUND_TRUNC:
+ return 0;
+ case MPD_ROUND_HALF_UP:
+ return (rnd >= 5);
+ case MPD_ROUND_HALF_EVEN:
+ return (rnd > 5) || ((rnd == 5) && mpd_isoddcoeff(dec));
+ case MPD_ROUND_CEILING:
+ return !(rnd == 0 || mpd_isnegative(dec));
+ case MPD_ROUND_FLOOR:
+ return !(rnd == 0 || mpd_ispositive(dec));
+ case MPD_ROUND_HALF_DOWN:
+ return (rnd > 5);
+ case MPD_ROUND_UP:
+ return !(rnd == 0);
+ case MPD_ROUND_05UP:
+ ld = (int)mpd_lsd(dec->data[0]);
+ return (!(rnd == 0) && (ld == 0 || ld == 5));
+ default:
+ /* Without a valid context, further results will be undefined. */
+ return 0; /* GCOV_NOT_REACHED */
+ }
+}
+
+/*
+ * Apply rounding to a decimal that has been right-shifted into a full
+ * precision decimal. If an increment leads to an overflow of the precision,
+ * adjust the coefficient and the exponent and check the new exponent for
+ * overflow.
+ */
+static inline void
+_mpd_apply_round(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ if (_mpd_rnd_incr(dec, rnd, ctx)) {
+ /* We have a number with exactly ctx->prec digits. The increment
+ * can only lead to an overflow if the decimal is all nines. In
+ * that case, the result is a power of ten with prec+1 digits.
+ *
+ * If the precision is a multiple of MPD_RDIGITS, this situation is
+ * detected by _mpd_baseincr returning a carry.
+ * If the precision is not a multiple of MPD_RDIGITS, we have to
+ * check if the result has one digit too many.
+ */
+ mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
+ if (carry) {
+ dec->data[dec->len-1] = mpd_pow10[MPD_RDIGITS-1];
+ dec->exp += 1;
+ _mpd_check_exp(dec, ctx, status);
+ return;
+ }
+ mpd_setdigits(dec);
+ if (dec->digits > ctx->prec) {
+ mpd_qshiftr_inplace(dec, 1);
+ dec->exp += 1;
+ dec->digits = ctx->prec;
+ _mpd_check_exp(dec, ctx, status);
+ }
+ }
+}
+
+/*
+ * Apply rounding to a decimal. Allow overflow of the precision.
+ */
+static inline void
+_mpd_apply_round_excess(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ if (_mpd_rnd_incr(dec, rnd, ctx)) {
+ mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
+ if (carry) {
+ if (!mpd_qresize(dec, dec->len+1, status)) {
+ return;
+ }
+ dec->data[dec->len] = 1;
+ dec->len += 1;
+ }
+ mpd_setdigits(dec);
+ }
+}
+
+/*
+ * Apply rounding to a decimal that has been right-shifted into a decimal
+ * with full precision or less. Return failure if an increment would
+ * overflow the precision.
+ */
+static inline int
+_mpd_apply_round_fit(mpd_t *dec, mpd_uint_t rnd, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ if (_mpd_rnd_incr(dec, rnd, ctx)) {
+ mpd_uint_t carry = _mpd_baseincr(dec->data, dec->len);
+ if (carry) {
+ if (!mpd_qresize(dec, dec->len+1, status)) {
+ return 0;
+ }
+ dec->data[dec->len] = 1;
+ dec->len += 1;
+ }
+ mpd_setdigits(dec);
+ if (dec->digits > ctx->prec) {
+ mpd_seterror(dec, MPD_Invalid_operation, status);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Check a normal number for overflow, underflow, clamping. If the operand
+ is modified, it will be zero, special or (sub)normal with a coefficient
+ that fits into the current context precision. */
+static inline void
+_mpd_check_exp(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_ssize_t adjexp, etiny, shift;
+ int rnd;
+
+ adjexp = mpd_adjexp(dec);
+ if (adjexp > ctx->emax) {
+
+ if (mpd_iszerocoeff(dec)) {
+ dec->exp = ctx->emax;
+ if (ctx->clamp) {
+ dec->exp -= (ctx->prec-1);
+ }
+ mpd_zerocoeff(dec);
+ *status |= MPD_Clamped;
+ return;
+ }
+
+ switch (ctx->round) {
+ case MPD_ROUND_HALF_UP: case MPD_ROUND_HALF_EVEN:
+ case MPD_ROUND_HALF_DOWN: case MPD_ROUND_UP:
+ case MPD_ROUND_TRUNC:
+ mpd_setspecial(dec, mpd_sign(dec), MPD_INF);
+ break;
+ case MPD_ROUND_DOWN: case MPD_ROUND_05UP:
+ mpd_qmaxcoeff(dec, ctx, status);
+ dec->exp = ctx->emax - ctx->prec + 1;
+ break;
+ case MPD_ROUND_CEILING:
+ if (mpd_isnegative(dec)) {
+ mpd_qmaxcoeff(dec, ctx, status);
+ dec->exp = ctx->emax - ctx->prec + 1;
+ }
+ else {
+ mpd_setspecial(dec, MPD_POS, MPD_INF);
+ }
+ break;
+ case MPD_ROUND_FLOOR:
+ if (mpd_ispositive(dec)) {
+ mpd_qmaxcoeff(dec, ctx, status);
+ dec->exp = ctx->emax - ctx->prec + 1;
+ }
+ else {
+ mpd_setspecial(dec, MPD_NEG, MPD_INF);
+ }
+ break;
+ default: /* debug */
+ abort(); /* GCOV_NOT_REACHED */
+ }
+
+ *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
+
+ } /* fold down */
+ else if (ctx->clamp && dec->exp > mpd_etop(ctx)) {
+ /* At this point adjexp=exp+digits-1 <= emax and exp > etop=emax-prec+1:
+ * (1) shift = exp -emax+prec-1 > 0
+ * (2) digits+shift = exp+digits-1 - emax + prec <= prec */
+ shift = dec->exp - mpd_etop(ctx);
+ if (!mpd_qshiftl(dec, dec, shift, status)) {
+ return;
+ }
+ dec->exp -= shift;
+ *status |= MPD_Clamped;
+ if (!mpd_iszerocoeff(dec) && adjexp < ctx->emin) {
+ /* Underflow is impossible, since exp < etiny=emin-prec+1
+ * and exp > etop=emax-prec+1 would imply emax < emin. */
+ *status |= MPD_Subnormal;
+ }
+ }
+ else if (adjexp < ctx->emin) {
+
+ etiny = mpd_etiny(ctx);
+
+ if (mpd_iszerocoeff(dec)) {
+ if (dec->exp < etiny) {
+ dec->exp = etiny;
+ mpd_zerocoeff(dec);
+ *status |= MPD_Clamped;
+ }
+ return;
+ }
+
+ *status |= MPD_Subnormal;
+ if (dec->exp < etiny) {
+ /* At this point adjexp=exp+digits-1 < emin and exp < etiny=emin-prec+1:
+ * (1) shift = emin-prec+1 - exp > 0
+ * (2) digits-shift = exp+digits-1 - emin + prec < prec */
+ shift = etiny - dec->exp;
+ rnd = (int)mpd_qshiftr_inplace(dec, shift);
+ dec->exp = etiny;
+ /* We always have a spare digit in case of an increment. */
+ _mpd_apply_round_excess(dec, rnd, ctx, status);
+ *status |= MPD_Rounded;
+ if (rnd) {
+ *status |= (MPD_Inexact|MPD_Underflow);
+ if (mpd_iszerocoeff(dec)) {
+ mpd_zerocoeff(dec);
+ *status |= MPD_Clamped;
+ }
+ }
+ }
+ /* Case exp >= etiny=emin-prec+1:
+ * (1) adjexp=exp+digits-1 < emin
+ * (2) digits < emin-exp+1 <= prec */
+ }
+}
+
+/* Transcendental functions do not always set Underflow reliably,
+ * since they only use as much precision as is necessary for correct
+ * rounding. If a result like 1.0000000000e-101 is finalized, there
+ * is no rounding digit that would trigger Underflow. But we can
+ * assume Inexact, so a short check suffices. */
+static inline void
+mpd_check_underflow(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
+{
+ if (mpd_adjexp(dec) < ctx->emin && !mpd_iszero(dec) &&
+ dec->exp < mpd_etiny(ctx)) {
+ *status |= MPD_Underflow;
+ }
+}
+
+/* Check if a normal number must be rounded after the exponent has been checked. */
+static inline void
+_mpd_check_round(mpd_t *dec, const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_uint_t rnd;
+ mpd_ssize_t shift;
+
+ /* must handle specials: _mpd_check_exp() can produce infinities or NaNs */
+ if (mpd_isspecial(dec)) {
+ return;
+ }
+
+ if (dec->digits > ctx->prec) {
+ shift = dec->digits - ctx->prec;
+ rnd = mpd_qshiftr_inplace(dec, shift);
+ dec->exp += shift;
+ _mpd_apply_round(dec, rnd, ctx, status);
+ *status |= MPD_Rounded;
+ if (rnd) {
+ *status |= MPD_Inexact;
+ }
+ }
+}
+
+/* Finalize all operations. */
+void
+mpd_qfinalize(mpd_t *result, const mpd_context_t *ctx, uint32_t *status)
+{
+ if (mpd_isspecial(result)) {
+ if (mpd_isnan(result)) {
+ _mpd_fix_nan(result, ctx);
+ }
+ return;
+ }
+
+ _mpd_check_exp(result, ctx, status);
+ _mpd_check_round(result, ctx, status);
+}
+
+
+/******************************************************************************/
+/* Copying */
+/******************************************************************************/
+
+/* Internal function: Copy a decimal, share data with src: USE WITH CARE! */
+static inline void
+_mpd_copy_shared(mpd_t *dest, const mpd_t *src)
+{
+ dest->flags = src->flags;
+ dest->exp = src->exp;
+ dest->digits = src->digits;
+ dest->len = src->len;
+ dest->alloc = src->alloc;
+ dest->data = src->data;
+
+ mpd_set_shared_data(dest);
+}
+
+/*
+ * Copy a decimal. In case of an error, status is set to MPD_Malloc_error.
+ */
+int
+mpd_qcopy(mpd_t *result, const mpd_t *a, uint32_t *status)
+{
+ if (result == a) return 1;
+
+ if (!mpd_qresize(result, a->len, status)) {
+ return 0;
+ }
+
+ mpd_copy_flags(result, a);
+ result->exp = a->exp;
+ result->digits = a->digits;
+ result->len = a->len;
+ memcpy(result->data, a->data, a->len * (sizeof *result->data));
+
+ return 1;
+}
+
+/*
+ * Copy to a decimal with a static buffer. The caller has to make sure that
+ * the buffer is big enough. Cannot fail.
+ */
+static void
+mpd_qcopy_static(mpd_t *result, const mpd_t *a)
+{
+ if (result == a) return;
+
+ memcpy(result->data, a->data, a->len * (sizeof *result->data));
+
+ mpd_copy_flags(result, a);
+ result->exp = a->exp;
+ result->digits = a->digits;
+ result->len = a->len;
+}
+
+/*
+ * Return a newly allocated copy of the operand. In case of an error,
+ * status is set to MPD_Malloc_error and the return value is NULL.
+ */
+mpd_t *
+mpd_qncopy(const mpd_t *a)
+{
+ mpd_t *result;
+
+ if ((result = mpd_qnew_size(a->len)) == NULL) {
+ return NULL;
+ }
+ memcpy(result->data, a->data, a->len * (sizeof *result->data));
+ mpd_copy_flags(result, a);
+ result->exp = a->exp;
+ result->digits = a->digits;
+ result->len = a->len;
+
+ return result;
+}
+
+/*
+ * Copy a decimal and set the sign to positive. In case of an error, the
+ * status is set to MPD_Malloc_error.
+ */
+int
+mpd_qcopy_abs(mpd_t *result, const mpd_t *a, uint32_t *status)
+{
+ if (!mpd_qcopy(result, a, status)) {
+ return 0;
+ }
+ mpd_set_positive(result);
+ return 1;
+}
+
+/*
+ * Copy a decimal and negate the sign. In case of an error, the
+ * status is set to MPD_Malloc_error.
+ */
+int
+mpd_qcopy_negate(mpd_t *result, const mpd_t *a, uint32_t *status)
+{
+ if (!mpd_qcopy(result, a, status)) {
+ return 0;
+ }
+ _mpd_negate(result);
+ return 1;
+}
+
+/*
+ * Copy a decimal, setting the sign of the first operand to the sign of the
+ * second operand. In case of an error, the status is set to MPD_Malloc_error.
+ */
+int
+mpd_qcopy_sign(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
+{
+ uint8_t sign_b = mpd_sign(b); /* result may equal b! */
+
+ if (!mpd_qcopy(result, a, status)) {
+ return 0;
+ }
+ mpd_set_sign(result, sign_b);
+ return 1;
+}
+
+
+/******************************************************************************/
+/* Comparisons */
+/******************************************************************************/
+
+/*
+ * For all functions that compare two operands and return an int the usual
+ * convention applies to the return value:
+ *
+ * -1 if op1 < op2
+ * 0 if op1 == op2
+ * 1 if op1 > op2
+ *
+ * INT_MAX for error
+ */
+
+
+/* Convenience macro. If a and b are not equal, return from the calling
+ * function with the correct comparison value. */
+#define CMP_EQUAL_OR_RETURN(a, b) \
+ if (a != b) { \
+ if (a < b) { \
+ return -1; \
+ } \
+ return 1; \
+ }
+
+/*
+ * Compare the data of big and small. This function does the equivalent
+ * of first shifting small to the left and then comparing the data of
+ * big and small, except that no allocation for the left shift is needed.
+ */
+static int
+_mpd_basecmp(mpd_uint_t *big, mpd_uint_t *small, mpd_size_t n, mpd_size_t m,
+ mpd_size_t shift)
+{
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
+ /* spurious uninitialized warnings */
+ mpd_uint_t l=l, lprev=lprev, h=h;
+#else
+ mpd_uint_t l, lprev, h;
+#endif
+ mpd_uint_t q, r;
+ mpd_uint_t ph, x;
+
+ assert(m > 0 && n >= m && shift > 0);
+
+ _mpd_div_word(&q, &r, (mpd_uint_t)shift, MPD_RDIGITS);
+
+ if (r != 0) {
+
+ ph = mpd_pow10[r];
+
+ --m; --n;
+ _mpd_divmod_pow10(&h, &lprev, small[m--], MPD_RDIGITS-r);
+ if (h != 0) {
+ CMP_EQUAL_OR_RETURN(big[n], h)
+ --n;
+ }
+ for (; m != MPD_SIZE_MAX; m--,n--) {
+ _mpd_divmod_pow10(&h, &l, small[m], MPD_RDIGITS-r);
+ x = ph * lprev + h;
+ CMP_EQUAL_OR_RETURN(big[n], x)
+ lprev = l;
+ }
+ x = ph * lprev;
+ CMP_EQUAL_OR_RETURN(big[q], x)
+ }
+ else {
+ while (--m != MPD_SIZE_MAX) {
+ CMP_EQUAL_OR_RETURN(big[m+q], small[m])
+ }
+ }
+
+ return !_mpd_isallzero(big, q);
+}
+
+/* Compare two decimals with the same adjusted exponent. */
+static int
+_mpd_cmp_same_adjexp(const mpd_t *a, const mpd_t *b)
+{
+ mpd_ssize_t shift, i;
+
+ if (a->exp != b->exp) {
+ /* Cannot wrap: a->exp + a->digits = b->exp + b->digits, so
+ * a->exp - b->exp = b->digits - a->digits. */
+ shift = a->exp - b->exp;
+ if (shift > 0) {
+ return -1 * _mpd_basecmp(b->data, a->data, b->len, a->len, shift);
+ }
+ else {
+ return _mpd_basecmp(a->data, b->data, a->len, b->len, -shift);
+ }
+ }
+
+ /*
+ * At this point adjexp(a) == adjexp(b) and a->exp == b->exp,
+ * so a->digits == b->digits, therefore a->len == b->len.
+ */
+ for (i = a->len-1; i >= 0; --i) {
+ CMP_EQUAL_OR_RETURN(a->data[i], b->data[i])
+ }
+
+ return 0;
+}
+
+/* Compare two numerical values. */
+static int
+_mpd_cmp(const mpd_t *a, const mpd_t *b)
+{
+ mpd_ssize_t adjexp_a, adjexp_b;
+
+ /* equal pointers */
+ if (a == b) {
+ return 0;
+ }
+
+ /* infinities */
+ if (mpd_isinfinite(a)) {
+ if (mpd_isinfinite(b)) {
+ return mpd_isnegative(b) - mpd_isnegative(a);
+ }
+ return mpd_arith_sign(a);
+ }
+ if (mpd_isinfinite(b)) {
+ return -mpd_arith_sign(b);
+ }
+
+ /* zeros */
+ if (mpd_iszerocoeff(a)) {
+ if (mpd_iszerocoeff(b)) {
+ return 0;
+ }
+ return -mpd_arith_sign(b);
+ }
+ if (mpd_iszerocoeff(b)) {
+ return mpd_arith_sign(a);
+ }
+
+ /* different signs */
+ if (mpd_sign(a) != mpd_sign(b)) {
+ return mpd_sign(b) - mpd_sign(a);
+ }
+
+ /* different adjusted exponents */
+ adjexp_a = mpd_adjexp(a);
+ adjexp_b = mpd_adjexp(b);
+ if (adjexp_a != adjexp_b) {
+ if (adjexp_a < adjexp_b) {
+ return -1 * mpd_arith_sign(a);
+ }
+ return mpd_arith_sign(a);
+ }
+
+ /* same adjusted exponents */
+ return _mpd_cmp_same_adjexp(a, b) * mpd_arith_sign(a);
+}
+
+/* Compare the absolutes of two numerical values. */
+static int
+_mpd_cmp_abs(const mpd_t *a, const mpd_t *b)
+{
+ mpd_ssize_t adjexp_a, adjexp_b;
+
+ /* equal pointers */
+ if (a == b) {
+ return 0;
+ }
+
+ /* infinities */
+ if (mpd_isinfinite(a)) {
+ if (mpd_isinfinite(b)) {
+ return 0;
+ }
+ return 1;
+ }
+ if (mpd_isinfinite(b)) {
+ return -1;
+ }
+
+ /* zeros */
+ if (mpd_iszerocoeff(a)) {
+ if (mpd_iszerocoeff(b)) {
+ return 0;
+ }
+ return -1;
+ }
+ if (mpd_iszerocoeff(b)) {
+ return 1;
+ }
+
+ /* different adjusted exponents */
+ adjexp_a = mpd_adjexp(a);
+ adjexp_b = mpd_adjexp(b);
+ if (adjexp_a != adjexp_b) {
+ if (adjexp_a < adjexp_b) {
+ return -1;
+ }
+ return 1;
+ }
+
+ /* same adjusted exponents */
+ return _mpd_cmp_same_adjexp(a, b);
+}
+
+/* Compare two values and return an integer result. */
+int
+mpd_qcmp(const mpd_t *a, const mpd_t *b, uint32_t *status)
+{
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_isnan(a) || mpd_isnan(b)) {
+ *status |= MPD_Invalid_operation;
+ return INT_MAX;
+ }
+ }
+
+ return _mpd_cmp(a, b);
+}
+
+/*
+ * Compare a and b, convert the the usual integer result to a decimal and
+ * store it in 'result'. For convenience, the integer result of the comparison
+ * is returned. Comparisons involving NaNs return NaN/INT_MAX.
+ */
+int
+mpd_qcompare(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ int c;
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return INT_MAX;
+ }
+ }
+
+ c = _mpd_cmp(a, b);
+ _settriple(result, (c < 0), (c != 0), 0);
+ return c;
+}
+
+/* Same as mpd_compare(), but signal for all NaNs, i.e. also for quiet NaNs. */
+int
+mpd_qcompare_signal(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ int c;
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ *status |= MPD_Invalid_operation;
+ return INT_MAX;
+ }
+ }
+
+ c = _mpd_cmp(a, b);
+ _settriple(result, (c < 0), (c != 0), 0);
+ return c;
+}
+
+/* Compare the operands using a total order. */
+int
+mpd_cmp_total(const mpd_t *a, const mpd_t *b)
+{
+ mpd_t aa, bb;
+ int nan_a, nan_b;
+ int c;
+
+ if (mpd_sign(a) != mpd_sign(b)) {
+ return mpd_sign(b) - mpd_sign(a);
+ }
+
+
+ if (mpd_isnan(a)) {
+ c = 1;
+ if (mpd_isnan(b)) {
+ nan_a = (mpd_isqnan(a)) ? 1 : 0;
+ nan_b = (mpd_isqnan(b)) ? 1 : 0;
+ if (nan_b == nan_a) {
+ if (a->len > 0 && b->len > 0) {
+ _mpd_copy_shared(&aa, a);
+ _mpd_copy_shared(&bb, b);
+ aa.exp = bb.exp = 0;
+ /* compare payload */
+ c = _mpd_cmp_abs(&aa, &bb);
+ }
+ else {
+ c = (a->len > 0) - (b->len > 0);
+ }
+ }
+ else {
+ c = nan_a - nan_b;
+ }
+ }
+ }
+ else if (mpd_isnan(b)) {
+ c = -1;
+ }
+ else {
+ c = _mpd_cmp_abs(a, b);
+ if (c == 0 && a->exp != b->exp) {
+ c = (a->exp < b->exp) ? -1 : 1;
+ }
+ }
+
+ return c * mpd_arith_sign(a);
+}
+
+/*
+ * Compare a and b according to a total order, convert the usual integer result
+ * to a decimal and store it in 'result'. For convenience, the integer result
+ * of the comparison is returned.
+ */
+int
+mpd_compare_total(mpd_t *result, const mpd_t *a, const mpd_t *b)
+{
+ int c;
+
+ c = mpd_cmp_total(a, b);
+ _settriple(result, (c < 0), (c != 0), 0);
+ return c;
+}
+
+/* Compare the magnitude of the operands using a total order. */
+int
+mpd_cmp_total_mag(const mpd_t *a, const mpd_t *b)
+{
+ mpd_t aa, bb;
+
+ _mpd_copy_shared(&aa, a);
+ _mpd_copy_shared(&bb, b);
+
+ mpd_set_positive(&aa);
+ mpd_set_positive(&bb);
+
+ return mpd_cmp_total(&aa, &bb);
+}
+
+/*
+ * Compare the magnitude of a and b according to a total order, convert the
+ * the usual integer result to a decimal and store it in 'result'.
+ * For convenience, the integer result of the comparison is returned.
+ */
+int
+mpd_compare_total_mag(mpd_t *result, const mpd_t *a, const mpd_t *b)
+{
+ int c;
+
+ c = mpd_cmp_total_mag(a, b);
+ _settriple(result, (c < 0), (c != 0), 0);
+ return c;
+}
+
+/* Determine an ordering for operands that are numerically equal. */
+static inline int
+_mpd_cmp_numequal(const mpd_t *a, const mpd_t *b)
+{
+ int sign_a, sign_b;
+ int c;
+
+ sign_a = mpd_sign(a);
+ sign_b = mpd_sign(b);
+ if (sign_a != sign_b) {
+ c = sign_b - sign_a;
+ }
+ else {
+ c = (a->exp < b->exp) ? -1 : 1;
+ c *= mpd_arith_sign(a);
+ }
+
+ return c;
+}
+
+
+/******************************************************************************/
+/* Shifting the coefficient */
+/******************************************************************************/
+
+/*
+ * Shift the coefficient of the operand to the left, no check for specials.
+ * Both operands may be the same pointer. If the result length has to be
+ * increased, mpd_qresize() might fail with MPD_Malloc_error.
+ */
+int
+mpd_qshiftl(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
+{
+ mpd_ssize_t size;
+
+ assert(!mpd_isspecial(a));
+ assert(n >= 0);
+
+ if (mpd_iszerocoeff(a) || n == 0) {
+ return mpd_qcopy(result, a, status);
+ }
+
+ size = mpd_digits_to_size(a->digits+n);
+ if (!mpd_qresize(result, size, status)) {
+ return 0; /* result is NaN */
+ }
+
+ _mpd_baseshiftl(result->data, a->data, size, a->len, n);
+
+ mpd_copy_flags(result, a);
+ result->exp = a->exp;
+ result->digits = a->digits+n;
+ result->len = size;
+
+ return 1;
+}
+
+/* Determine the rounding indicator if all digits of the coefficient are shifted
+ * out of the picture. */
+static mpd_uint_t
+_mpd_get_rnd(const mpd_uint_t *data, mpd_ssize_t len, int use_msd)
+{
+ mpd_uint_t rnd = 0, rest = 0, word;
+
+ word = data[len-1];
+ /* special treatment for the most significant digit if shift == digits */
+ if (use_msd) {
+ _mpd_divmod_pow10(&rnd, &rest, word, mpd_word_digits(word)-1);
+ if (len > 1 && rest == 0) {
+ rest = !_mpd_isallzero(data, len-1);
+ }
+ }
+ else {
+ rest = !_mpd_isallzero(data, len);
+ }
+
+ return (rnd == 0 || rnd == 5) ? rnd + !!rest : rnd;
+}
+
+/*
+ * Same as mpd_qshiftr(), but 'result' is an mpd_t with a static coefficient.
+ * It is the caller's responsibility to ensure that the coefficient is big
+ * enough. The function cannot fail.
+ */
+mpd_uint_t
+mpd_qsshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n)
+{
+ mpd_uint_t rnd;
+ mpd_ssize_t size;
+
+ assert(!mpd_isspecial(a));
+ assert(n >= 0);
+
+ if (mpd_iszerocoeff(a) || n == 0) {
+ mpd_qcopy_static(result, a);
+ return 0;
+ }
+
+ if (n >= a->digits) {
+ rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
+ mpd_zerocoeff(result);
+ }
+ else {
+ result->digits = a->digits-n;
+ size = mpd_digits_to_size(result->digits);
+ rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
+ result->len = size;
+ }
+
+ mpd_copy_flags(result, a);
+ result->exp = a->exp;
+
+ return rnd;
+}
+
+/*
+ * Inplace shift of the coefficient to the right, no check for specials.
+ * Returns the rounding indicator for mpd_rnd_incr().
+ * The function cannot fail.
+ */
+mpd_uint_t
+mpd_qshiftr_inplace(mpd_t *result, mpd_ssize_t n)
+{
+ uint32_t dummy;
+ mpd_uint_t rnd;
+ mpd_ssize_t size;
+
+ assert(!mpd_isspecial(result));
+ assert(n >= 0);
+
+ if (mpd_iszerocoeff(result) || n == 0) {
+ return 0;
+ }
+
+ if (n >= result->digits) {
+ rnd = _mpd_get_rnd(result->data, result->len, (n==result->digits));
+ mpd_zerocoeff(result);
+ }
+ else {
+ rnd = _mpd_baseshiftr(result->data, result->data, result->len, n);
+ result->digits -= n;
+ size = mpd_digits_to_size(result->digits);
+ /* reducing the size cannot fail */
+ mpd_qresize(result, size, &dummy);
+ result->len = size;
+ }
+
+ return rnd;
+}
+
+/*
+ * Shift the coefficient of the operand to the right, no check for specials.
+ * Both operands may be the same pointer. Returns the rounding indicator to
+ * be used by mpd_rnd_incr(). If the result length has to be increased,
+ * mpd_qcopy() or mpd_qresize() might fail with MPD_Malloc_error. In those
+ * cases, MPD_UINT_MAX is returned.
+ */
+mpd_uint_t
+mpd_qshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status)
+{
+ mpd_uint_t rnd;
+ mpd_ssize_t size;
+
+ assert(!mpd_isspecial(a));
+ assert(n >= 0);
+
+ if (mpd_iszerocoeff(a) || n == 0) {
+ if (!mpd_qcopy(result, a, status)) {
+ return MPD_UINT_MAX;
+ }
+ return 0;
+ }
+
+ if (n >= a->digits) {
+ rnd = _mpd_get_rnd(a->data, a->len, (n==a->digits));
+ mpd_zerocoeff(result);
+ }
+ else {
+ result->digits = a->digits-n;
+ size = mpd_digits_to_size(result->digits);
+ if (result == a) {
+ rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
+ /* reducing the size cannot fail */
+ mpd_qresize(result, size, status);
+ }
+ else {
+ if (!mpd_qresize(result, size, status)) {
+ return MPD_UINT_MAX;
+ }
+ rnd = _mpd_baseshiftr(result->data, a->data, a->len, n);
+ }
+ result->len = size;
+ }
+
+ mpd_copy_flags(result, a);
+ result->exp = a->exp;
+
+ return rnd;
+}
+
+
+/******************************************************************************/
+/* Miscellaneous operations */
+/******************************************************************************/
+
+/* Logical And */
+void
+mpd_qand(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ const mpd_t *big = a, *small = b;
+ mpd_uint_t x, y, z, xbit, ybit;
+ int k, mswdigits;
+ mpd_ssize_t i;
+
+ if (mpd_isspecial(a) || mpd_isspecial(b) ||
+ mpd_isnegative(a) || mpd_isnegative(b) ||
+ a->exp != 0 || b->exp != 0) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (b->digits > a->digits) {
+ big = b;
+ small = a;
+ }
+ if (!mpd_qresize(result, big->len, status)) {
+ return;
+ }
+
+
+ /* full words */
+ for (i = 0; i < small->len-1; i++) {
+ x = small->data[i];
+ y = big->data[i];
+ z = 0;
+ for (k = 0; k < MPD_RDIGITS; k++) {
+ xbit = x % 10;
+ x /= 10;
+ ybit = y % 10;
+ y /= 10;
+ if (xbit > 1 || ybit > 1) {
+ goto invalid_operation;
+ }
+ z += (xbit&ybit) ? mpd_pow10[k] : 0;
+ }
+ result->data[i] = z;
+ }
+ /* most significant word of small */
+ x = small->data[i];
+ y = big->data[i];
+ z = 0;
+ mswdigits = mpd_word_digits(x);
+ for (k = 0; k < mswdigits; k++) {
+ xbit = x % 10;
+ x /= 10;
+ ybit = y % 10;
+ y /= 10;
+ if (xbit > 1 || ybit > 1) {
+ goto invalid_operation;
+ }
+ z += (xbit&ybit) ? mpd_pow10[k] : 0;
+ }
+ result->data[i++] = z;
+
+ /* scan the rest of y for digits > 1 */
+ for (; k < MPD_RDIGITS; k++) {
+ ybit = y % 10;
+ y /= 10;
+ if (ybit > 1) {
+ goto invalid_operation;
+ }
+ }
+ /* scan the rest of big for digits > 1 */
+ for (; i < big->len; i++) {
+ y = big->data[i];
+ for (k = 0; k < MPD_RDIGITS; k++) {
+ ybit = y % 10;
+ y /= 10;
+ if (ybit > 1) {
+ goto invalid_operation;
+ }
+ }
+ }
+
+ mpd_clear_flags(result);
+ result->exp = 0;
+ result->len = _mpd_real_size(result->data, small->len);
+ mpd_qresize(result, result->len, status);
+ mpd_setdigits(result);
+ _mpd_cap(result, ctx);
+ return;
+
+invalid_operation:
+ mpd_seterror(result, MPD_Invalid_operation, status);
+}
+
+/* Class of an operand. Returns a pointer to the constant name. */
+const char *
+mpd_class(const mpd_t *a, const mpd_context_t *ctx)
+{
+ if (mpd_isnan(a)) {
+ if (mpd_isqnan(a))
+ return "NaN";
+ else
+ return "sNaN";
+ }
+ else if (mpd_ispositive(a)) {
+ if (mpd_isinfinite(a))
+ return "+Infinity";
+ else if (mpd_iszero(a))
+ return "+Zero";
+ else if (mpd_isnormal(a, ctx))
+ return "+Normal";
+ else
+ return "+Subnormal";
+ }
+ else {
+ if (mpd_isinfinite(a))
+ return "-Infinity";
+ else if (mpd_iszero(a))
+ return "-Zero";
+ else if (mpd_isnormal(a, ctx))
+ return "-Normal";
+ else
+ return "-Subnormal";
+ }
+}
+
+/* Logical Xor */
+void
+mpd_qinvert(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_uint_t x, z, xbit;
+ mpd_ssize_t i, digits, len;
+ mpd_ssize_t q, r;
+ int k;
+
+ if (mpd_isspecial(a) || mpd_isnegative(a) || a->exp != 0) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ digits = (a->digits < ctx->prec) ? ctx->prec : a->digits;
+ _mpd_idiv_word(&q, &r, digits, MPD_RDIGITS);
+ len = (r == 0) ? q : q+1;
+ if (!mpd_qresize(result, len, status)) {
+ return;
+ }
+
+ for (i = 0; i < len; i++) {
+ x = (i < a->len) ? a->data[i] : 0;
+ z = 0;
+ for (k = 0; k < MPD_RDIGITS; k++) {
+ xbit = x % 10;
+ x /= 10;
+ if (xbit > 1) {
+ goto invalid_operation;
+ }
+ z += !xbit ? mpd_pow10[k] : 0;
+ }
+ result->data[i] = z;
+ }
+
+ mpd_clear_flags(result);
+ result->exp = 0;
+ result->len = _mpd_real_size(result->data, len);
+ mpd_qresize(result, result->len, status);
+ mpd_setdigits(result);
+ _mpd_cap(result, ctx);
+ return;
+
+invalid_operation:
+ mpd_seterror(result, MPD_Invalid_operation, status);
+}
+
+/* Exponent of the magnitude of the most significant digit of the operand. */
+void
+mpd_qlogb(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ mpd_setspecial(result, MPD_POS, MPD_INF);
+ }
+ else if (mpd_iszerocoeff(a)) {
+ mpd_setspecial(result, MPD_NEG, MPD_INF);
+ *status |= MPD_Division_by_zero;
+ }
+ else {
+ mpd_qset_ssize(result, mpd_adjexp(a), ctx, status);
+ }
+}
+
+/* Logical Or */
+void
+mpd_qor(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ const mpd_t *big = a, *small = b;
+ mpd_uint_t x, y, z, xbit, ybit;
+ int k, mswdigits;
+ mpd_ssize_t i;
+
+ if (mpd_isspecial(a) || mpd_isspecial(b) ||
+ mpd_isnegative(a) || mpd_isnegative(b) ||
+ a->exp != 0 || b->exp != 0) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (b->digits > a->digits) {
+ big = b;
+ small = a;
+ }
+ if (!mpd_qresize(result, big->len, status)) {
+ return;
+ }
+
+
+ /* full words */
+ for (i = 0; i < small->len-1; i++) {
+ x = small->data[i];
+ y = big->data[i];
+ z = 0;
+ for (k = 0; k < MPD_RDIGITS; k++) {
+ xbit = x % 10;
+ x /= 10;
+ ybit = y % 10;
+ y /= 10;
+ if (xbit > 1 || ybit > 1) {
+ goto invalid_operation;
+ }
+ z += (xbit|ybit) ? mpd_pow10[k] : 0;
+ }
+ result->data[i] = z;
+ }
+ /* most significant word of small */
+ x = small->data[i];
+ y = big->data[i];
+ z = 0;
+ mswdigits = mpd_word_digits(x);
+ for (k = 0; k < mswdigits; k++) {
+ xbit = x % 10;
+ x /= 10;
+ ybit = y % 10;
+ y /= 10;
+ if (xbit > 1 || ybit > 1) {
+ goto invalid_operation;
+ }
+ z += (xbit|ybit) ? mpd_pow10[k] : 0;
+ }
+
+ /* scan for digits > 1 and copy the rest of y */
+ for (; k < MPD_RDIGITS; k++) {
+ ybit = y % 10;
+ y /= 10;
+ if (ybit > 1) {
+ goto invalid_operation;
+ }
+ z += ybit*mpd_pow10[k];
+ }
+ result->data[i++] = z;
+ /* scan for digits > 1 and copy the rest of big */
+ for (; i < big->len; i++) {
+ y = big->data[i];
+ for (k = 0; k < MPD_RDIGITS; k++) {
+ ybit = y % 10;
+ y /= 10;
+ if (ybit > 1) {
+ goto invalid_operation;
+ }
+ }
+ result->data[i] = big->data[i];
+ }
+
+ mpd_clear_flags(result);
+ result->exp = 0;
+ result->len = _mpd_real_size(result->data, big->len);
+ mpd_qresize(result, result->len, status);
+ mpd_setdigits(result);
+ _mpd_cap(result, ctx);
+ return;
+
+invalid_operation:
+ mpd_seterror(result, MPD_Invalid_operation, status);
+}
+
+/*
+ * Rotate the coefficient of 'a' by 'b' digits. 'b' must be an integer with
+ * exponent 0.
+ */
+void
+mpd_qrotate(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ MPD_NEW_STATIC(tmp,0,0,0,0);
+ MPD_NEW_STATIC(big,0,0,0,0);
+ MPD_NEW_STATIC(small,0,0,0,0);
+ mpd_ssize_t n, lshift, rshift;
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ }
+ if (b->exp != 0 || mpd_isinfinite(b)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ n = mpd_qget_ssize(b, &workstatus);
+ if (workstatus&MPD_Invalid_operation) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (n > ctx->prec || n < -ctx->prec) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mpd_isinfinite(a)) {
+ mpd_qcopy(result, a, status);
+ return;
+ }
+
+ if (n >= 0) {
+ lshift = n;
+ rshift = ctx->prec-n;
+ }
+ else {
+ lshift = ctx->prec+n;
+ rshift = -n;
+ }
+
+ if (a->digits > ctx->prec) {
+ if (!mpd_qcopy(&tmp, a, status)) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ goto finish;
+ }
+ _mpd_cap(&tmp, ctx);
+ a = &tmp;
+ }
+
+ if (!mpd_qshiftl(&big, a, lshift, status)) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ goto finish;
+ }
+ _mpd_cap(&big, ctx);
+
+ if (mpd_qshiftr(&small, a, rshift, status) == MPD_UINT_MAX) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ goto finish;
+ }
+ _mpd_qadd(result, &big, &small, ctx, status);
+
+
+finish:
+ mpd_del(&tmp);
+ mpd_del(&big);
+ mpd_del(&small);
+}
+
+/*
+ * b must be an integer with exponent 0 and in the range +-2*(emax + prec).
+ * XXX: In my opinion +-(2*emax + prec) would be more sensible.
+ * The result is a with the value of b added to its exponent.
+ */
+void
+mpd_qscaleb(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ mpd_uint_t n, maxjump;
+#ifndef LEGACY_COMPILER
+ int64_t exp;
+#else
+ mpd_uint_t x;
+ int x_sign, n_sign;
+ mpd_ssize_t exp;
+#endif
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ }
+ if (b->exp != 0 || mpd_isinfinite(b)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ n = mpd_qabs_uint(b, &workstatus);
+ /* the spec demands this */
+ maxjump = 2 * (mpd_uint_t)(ctx->emax + ctx->prec);
+
+ if (n > maxjump || workstatus&MPD_Invalid_operation) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mpd_isinfinite(a)) {
+ mpd_qcopy(result, a, status);
+ return;
+ }
+
+#ifndef LEGACY_COMPILER
+ exp = a->exp + (int64_t)n * mpd_arith_sign(b);
+ exp = (exp > MPD_EXP_INF) ? MPD_EXP_INF : exp;
+ exp = (exp < MPD_EXP_CLAMP) ? MPD_EXP_CLAMP : exp;
+#else
+ x = (a->exp < 0) ? -a->exp : a->exp;
+ x_sign = (a->exp < 0) ? 1 : 0;
+ n_sign = mpd_isnegative(b) ? 1 : 0;
+
+ if (x_sign == n_sign) {
+ x = x + n;
+ if (x < n) x = MPD_UINT_MAX;
+ }
+ else {
+ x_sign = (x >= n) ? x_sign : n_sign;
+ x = (x >= n) ? x - n : n - x;
+ }
+ if (!x_sign && x > MPD_EXP_INF) x = MPD_EXP_INF;
+ if (x_sign && x > -MPD_EXP_CLAMP) x = -MPD_EXP_CLAMP;
+ exp = x_sign ? -((mpd_ssize_t)x) : (mpd_ssize_t)x;
+#endif
+
+ mpd_qcopy(result, a, status);
+ result->exp = (mpd_ssize_t)exp;
+
+ mpd_qfinalize(result, ctx, status);
+}
+
+/*
+ * Shift the coefficient by n digits, positive n is a left shift. In the case
+ * of a left shift, the result is decapitated to fit the context precision. If
+ * you don't want that, use mpd_shiftl().
+ */
+void
+mpd_qshiftn(mpd_t *result, const mpd_t *a, mpd_ssize_t n, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ mpd_qcopy(result, a, status);
+ return;
+ }
+
+ if (n >= 0 && n <= ctx->prec) {
+ mpd_qshiftl(result, a, n, status);
+ _mpd_cap(result, ctx);
+ }
+ else if (n < 0 && n >= -ctx->prec) {
+ if (!mpd_qcopy(result, a, status)) {
+ return;
+ }
+ _mpd_cap(result, ctx);
+ mpd_qshiftr_inplace(result, -n);
+ }
+ else {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ }
+}
+
+/*
+ * Same as mpd_shiftn(), but the shift is specified by the decimal b, which
+ * must be an integer with a zero exponent. Infinities remain infinities.
+ */
+void
+mpd_qshift(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ mpd_ssize_t n;
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ }
+ if (b->exp != 0 || mpd_isinfinite(b)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ n = mpd_qget_ssize(b, &workstatus);
+ if (workstatus&MPD_Invalid_operation) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (n > ctx->prec || n < -ctx->prec) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mpd_isinfinite(a)) {
+ mpd_qcopy(result, a, status);
+ return;
+ }
+
+ if (n >= 0) {
+ mpd_qshiftl(result, a, n, status);
+ _mpd_cap(result, ctx);
+ }
+ else {
+ if (!mpd_qcopy(result, a, status)) {
+ return;
+ }
+ _mpd_cap(result, ctx);
+ mpd_qshiftr_inplace(result, -n);
+ }
+}
+
+/* Logical Xor */
+void
+mpd_qxor(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ const mpd_t *big = a, *small = b;
+ mpd_uint_t x, y, z, xbit, ybit;
+ int k, mswdigits;
+ mpd_ssize_t i;
+
+ if (mpd_isspecial(a) || mpd_isspecial(b) ||
+ mpd_isnegative(a) || mpd_isnegative(b) ||
+ a->exp != 0 || b->exp != 0) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (b->digits > a->digits) {
+ big = b;
+ small = a;
+ }
+ if (!mpd_qresize(result, big->len, status)) {
+ return;
+ }
+
+
+ /* full words */
+ for (i = 0; i < small->len-1; i++) {
+ x = small->data[i];
+ y = big->data[i];
+ z = 0;
+ for (k = 0; k < MPD_RDIGITS; k++) {
+ xbit = x % 10;
+ x /= 10;
+ ybit = y % 10;
+ y /= 10;
+ if (xbit > 1 || ybit > 1) {
+ goto invalid_operation;
+ }
+ z += (xbit^ybit) ? mpd_pow10[k] : 0;
+ }
+ result->data[i] = z;
+ }
+ /* most significant word of small */
+ x = small->data[i];
+ y = big->data[i];
+ z = 0;
+ mswdigits = mpd_word_digits(x);
+ for (k = 0; k < mswdigits; k++) {
+ xbit = x % 10;
+ x /= 10;
+ ybit = y % 10;
+ y /= 10;
+ if (xbit > 1 || ybit > 1) {
+ goto invalid_operation;
+ }
+ z += (xbit^ybit) ? mpd_pow10[k] : 0;
+ }
+
+ /* scan for digits > 1 and copy the rest of y */
+ for (; k < MPD_RDIGITS; k++) {
+ ybit = y % 10;
+ y /= 10;
+ if (ybit > 1) {
+ goto invalid_operation;
+ }
+ z += ybit*mpd_pow10[k];
+ }
+ result->data[i++] = z;
+ /* scan for digits > 1 and copy the rest of big */
+ for (; i < big->len; i++) {
+ y = big->data[i];
+ for (k = 0; k < MPD_RDIGITS; k++) {
+ ybit = y % 10;
+ y /= 10;
+ if (ybit > 1) {
+ goto invalid_operation;
+ }
+ }
+ result->data[i] = big->data[i];
+ }
+
+ mpd_clear_flags(result);
+ result->exp = 0;
+ result->len = _mpd_real_size(result->data, big->len);
+ mpd_qresize(result, result->len, status);
+ mpd_setdigits(result);
+ _mpd_cap(result, ctx);
+ return;
+
+invalid_operation:
+ mpd_seterror(result, MPD_Invalid_operation, status);
+}
+
+
+/******************************************************************************/
+/* Arithmetic operations */
+/******************************************************************************/
+
+/*
+ * The absolute value of a. If a is negative, the result is the same
+ * as the result of the minus operation. Otherwise, the result is the
+ * result of the plus operation.
+ */
+void
+mpd_qabs(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ }
+
+ if (mpd_isnegative(a)) {
+ mpd_qminus(result, a, ctx, status);
+ }
+ else {
+ mpd_qplus(result, a, ctx, status);
+ }
+}
+
+static inline void
+_mpd_ptrswap(mpd_t **a, mpd_t **b)
+{
+ mpd_t *t = *a;
+ *a = *b;
+ *b = t;
+}
+
+/* Add or subtract infinities. */
+static void
+_mpd_qaddsub_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
+ uint32_t *status)
+{
+ if (mpd_isinfinite(a)) {
+ if (mpd_sign(a) != sign_b && mpd_isinfinite(b)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ }
+ else {
+ mpd_setspecial(result, mpd_sign(a), MPD_INF);
+ }
+ return;
+ }
+ assert(mpd_isinfinite(b));
+ mpd_setspecial(result, sign_b, MPD_INF);
+}
+
+/* Add or subtract non-special numbers. */
+static void
+_mpd_qaddsub(mpd_t *result, const mpd_t *a, const mpd_t *b, uint8_t sign_b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_t *big, *small;
+ MPD_NEW_STATIC(big_aligned,0,0,0,0);
+ MPD_NEW_CONST(tiny,0,0,1,1,1,1);
+ mpd_uint_t carry;
+ mpd_ssize_t newsize, shift;
+ mpd_ssize_t exp, i;
+ int swap = 0;
+
+
+ /* compare exponents */
+ big = (mpd_t *)a; small = (mpd_t *)b;
+ if (big->exp != small->exp) {
+ if (small->exp > big->exp) {
+ _mpd_ptrswap(&big, &small);
+ swap++;
+ }
+ /* align the coefficients */
+ if (!mpd_iszerocoeff(big)) {
+ exp = big->exp - 1;
+ exp += (big->digits > ctx->prec) ? 0 : big->digits-ctx->prec-1;
+ if (mpd_adjexp(small) < exp) {
+ /*
+ * Avoid huge shifts by substituting a value for small that is
+ * guaranteed to produce the same results.
+ *
+ * adjexp(small) < exp if and only if:
+ *
+ * bdigits <= prec AND
+ * bdigits+shift >= prec+2+sdigits AND
+ * exp = bexp+bdigits-prec-2
+ *
+ * 1234567000000000 -> bdigits + shift
+ * ----------XX1234 -> sdigits
+ * ----------X1 -> tiny-digits
+ * |- prec -|
+ *
+ * OR
+ *
+ * bdigits > prec AND
+ * shift > sdigits AND
+ * exp = bexp-1
+ *
+ * 1234567892100000 -> bdigits + shift
+ * ----------XX1234 -> sdigits
+ * ----------X1 -> tiny-digits
+ * |- prec -|
+ *
+ * If tiny is zero, adding or subtracting is a no-op.
+ * Otherwise, adding tiny generates a non-zero digit either
+ * below the rounding digit or the least significant digit
+ * of big. When subtracting, tiny is in the same position as
+ * the carry that would be generated by subtracting sdigits.
+ */
+ mpd_copy_flags(&tiny, small);
+ tiny.exp = exp;
+ tiny.digits = 1;
+ tiny.len = 1;
+ tiny.data[0] = mpd_iszerocoeff(small) ? 0 : 1;
+ small = &tiny;
+ }
+ /* This cannot wrap: the difference is positive and <= maxprec */
+ shift = big->exp - small->exp;
+ if (!mpd_qshiftl(&big_aligned, big, shift, status)) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ goto finish;
+ }
+ big = &big_aligned;
+ }
+ }
+ result->exp = small->exp;
+
+
+ /* compare length of coefficients */
+ if (big->len < small->len) {
+ _mpd_ptrswap(&big, &small);
+ swap++;
+ }
+
+ newsize = big->len;
+ if (!mpd_qresize(result, newsize, status)) {
+ goto finish;
+ }
+
+ if (mpd_sign(a) == sign_b) {
+
+ carry = _mpd_baseadd(result->data, big->data, small->data,
+ big->len, small->len);
+
+ if (carry) {
+ newsize = big->len + 1;
+ if (!mpd_qresize(result, newsize, status)) {
+ goto finish;
+ }
+ result->data[newsize-1] = carry;
+ }
+
+ result->len = newsize;
+ mpd_set_flags(result, sign_b);
+ }
+ else {
+ if (big->len == small->len) {
+ for (i=big->len-1; i >= 0; --i) {
+ if (big->data[i] != small->data[i]) {
+ if (big->data[i] < small->data[i]) {
+ _mpd_ptrswap(&big, &small);
+ swap++;
+ }
+ break;
+ }
+ }
+ }
+
+ _mpd_basesub(result->data, big->data, small->data,
+ big->len, small->len);
+ newsize = _mpd_real_size(result->data, big->len);
+ /* resize to smaller cannot fail */
+ (void)mpd_qresize(result, newsize, status);
+
+ result->len = newsize;
+ sign_b = (swap & 1) ? sign_b : mpd_sign(a);
+ mpd_set_flags(result, sign_b);
+
+ if (mpd_iszerocoeff(result)) {
+ mpd_set_positive(result);
+ if (ctx->round == MPD_ROUND_FLOOR) {
+ mpd_set_negative(result);
+ }
+ }
+ }
+
+ mpd_setdigits(result);
+
+finish:
+ mpd_del(&big_aligned);
+}
+
+/* Add a and b. No specials, no finalizing. */
+static void
+_mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
+}
+
+/* Subtract b from a. No specials, no finalizing. */
+static void
+_mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
+}
+
+/* Add a and b. */
+void
+mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ _mpd_qaddsub_inf(result, a, b, mpd_sign(b), status);
+ return;
+ }
+
+ _mpd_qaddsub(result, a, b, mpd_sign(b), ctx, status);
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* Subtract b from a. */
+void
+mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ _mpd_qaddsub_inf(result, a, b, !mpd_sign(b), status);
+ return;
+ }
+
+ _mpd_qaddsub(result, a, b, !mpd_sign(b), ctx, status);
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* Add decimal and mpd_ssize_t. */
+void
+mpd_qadd_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+ MPD_NEW_STATIC(bb,0,0,0,0);
+
+ mpd_maxcontext(&maxcontext);
+ mpd_qsset_ssize(&bb, b, &maxcontext, status);
+ mpd_qadd(result, a, &bb, ctx, status);
+ mpd_del(&bb);
+}
+
+/* Add decimal and mpd_uint_t. */
+void
+mpd_qadd_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+ MPD_NEW_STATIC(bb,0,0,0,0);
+
+ mpd_maxcontext(&maxcontext);
+ mpd_qsset_uint(&bb, b, &maxcontext, status);
+ mpd_qadd(result, a, &bb, ctx, status);
+ mpd_del(&bb);
+}
+
+/* Subtract mpd_ssize_t from decimal. */
+void
+mpd_qsub_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+ MPD_NEW_STATIC(bb,0,0,0,0);
+
+ mpd_maxcontext(&maxcontext);
+ mpd_qsset_ssize(&bb, b, &maxcontext, status);
+ mpd_qsub(result, a, &bb, ctx, status);
+ mpd_del(&bb);
+}
+
+/* Subtract mpd_uint_t from decimal. */
+void
+mpd_qsub_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+ MPD_NEW_STATIC(bb,0,0,0,0);
+
+ mpd_maxcontext(&maxcontext);
+ mpd_qsset_uint(&bb, b, &maxcontext, status);
+ mpd_qsub(result, a, &bb, ctx, status);
+ mpd_del(&bb);
+}
+
+/* Add decimal and int32_t. */
+void
+mpd_qadd_i32(mpd_t *result, const mpd_t *a, int32_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qadd_ssize(result, a, b, ctx, status);
+}
+
+/* Add decimal and uint32_t. */
+void
+mpd_qadd_u32(mpd_t *result, const mpd_t *a, uint32_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qadd_uint(result, a, b, ctx, status);
+}
+
+#ifdef CONFIG_64
+/* Add decimal and int64_t. */
+void
+mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qadd_ssize(result, a, b, ctx, status);
+}
+
+/* Add decimal and uint64_t. */
+void
+mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qadd_uint(result, a, b, ctx, status);
+}
+#endif
+
+/* Subtract int32_t from decimal. */
+void
+mpd_qsub_i32(mpd_t *result, const mpd_t *a, int32_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qsub_ssize(result, a, b, ctx, status);
+}
+
+/* Subtract uint32_t from decimal. */
+void
+mpd_qsub_u32(mpd_t *result, const mpd_t *a, uint32_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qsub_uint(result, a, b, ctx, status);
+}
+
+#ifdef CONFIG_64
+/* Subtract int64_t from decimal. */
+void
+mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qsub_ssize(result, a, b, ctx, status);
+}
+
+/* Subtract uint64_t from decimal. */
+void
+mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qsub_uint(result, a, b, ctx, status);
+}
+#endif
+
+
+/* Divide infinities. */
+static void
+_mpd_qdiv_inf(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ if (mpd_isinfinite(a)) {
+ if (mpd_isinfinite(b)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
+ return;
+ }
+ assert(mpd_isinfinite(b));
+ _settriple(result, mpd_sign(a)^mpd_sign(b), 0, mpd_etiny(ctx));
+ *status |= MPD_Clamped;
+}
+
+enum {NO_IDEAL_EXP, SET_IDEAL_EXP};
+/* Divide a by b. */
+static void
+_mpd_qdiv(int action, mpd_t *q, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ MPD_NEW_STATIC(aligned,0,0,0,0);
+ mpd_uint_t ld;
+ mpd_ssize_t shift, exp, tz;
+ mpd_ssize_t newsize;
+ mpd_ssize_t ideal_exp;
+ mpd_uint_t rem;
+ uint8_t sign_a = mpd_sign(a);
+ uint8_t sign_b = mpd_sign(b);
+
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(q, a, b, ctx, status)) {
+ return;
+ }
+ _mpd_qdiv_inf(q, a, b, ctx, status);
+ return;
+ }
+ if (mpd_iszerocoeff(b)) {
+ if (mpd_iszerocoeff(a)) {
+ mpd_seterror(q, MPD_Division_undefined, status);
+ }
+ else {
+ mpd_setspecial(q, sign_a^sign_b, MPD_INF);
+ *status |= MPD_Division_by_zero;
+ }
+ return;
+ }
+ if (mpd_iszerocoeff(a)) {
+ exp = a->exp - b->exp;
+ _settriple(q, sign_a^sign_b, 0, exp);
+ mpd_qfinalize(q, ctx, status);
+ return;
+ }
+
+ shift = (b->digits - a->digits) + ctx->prec + 1;
+ ideal_exp = a->exp - b->exp;
+ exp = ideal_exp - shift;
+ if (shift > 0) {
+ if (!mpd_qshiftl(&aligned, a, shift, status)) {
+ mpd_seterror(q, MPD_Malloc_error, status);
+ goto finish;
+ }
+ a = &aligned;
+ }
+ else if (shift < 0) {
+ shift = -shift;
+ if (!mpd_qshiftl(&aligned, b, shift, status)) {
+ mpd_seterror(q, MPD_Malloc_error, status);
+ goto finish;
+ }
+ b = &aligned;
+ }
+
+
+ newsize = a->len - b->len + 1;
+ if ((q != b && q != a) || (q == b && newsize > b->len)) {
+ if (!mpd_qresize(q, newsize, status)) {
+ mpd_seterror(q, MPD_Malloc_error, status);
+ goto finish;
+ }
+ }
+
+
+ if (b->len == 1) {
+ rem = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
+ }
+ else if (a->len < 2*MPD_NEWTONDIV_CUTOFF &&
+ b->len < MPD_NEWTONDIV_CUTOFF) {
+ int ret = _mpd_basedivmod(q->data, NULL, a->data, b->data,
+ a->len, b->len);
+ if (ret < 0) {
+ mpd_seterror(q, MPD_Malloc_error, status);
+ goto finish;
+ }
+ rem = ret;
+ }
+ else {
+ MPD_NEW_STATIC(r,0,0,0,0);
+ _mpd_qbarrett_divmod(q, &r, a, b, status);
+ if (mpd_isspecial(q) || mpd_isspecial(&r)) {
+ mpd_del(&r);
+ goto finish;
+ }
+ rem = !mpd_iszerocoeff(&r);
+ mpd_del(&r);
+ newsize = q->len;
+ }
+
+ newsize = _mpd_real_size(q->data, newsize);
+ /* resize to smaller cannot fail */
+ mpd_qresize(q, newsize, status);
+ mpd_set_flags(q, sign_a^sign_b);
+ q->len = newsize;
+ mpd_setdigits(q);
+
+ shift = ideal_exp - exp;
+ if (rem) {
+ ld = mpd_lsd(q->data[0]);
+ if (ld == 0 || ld == 5) {
+ q->data[0] += 1;
+ }
+ }
+ else if (action == SET_IDEAL_EXP && shift > 0) {
+ tz = mpd_trail_zeros(q);
+ shift = (tz > shift) ? shift : tz;
+ mpd_qshiftr_inplace(q, shift);
+ exp += shift;
+ }
+
+ q->exp = exp;
+
+
+finish:
+ mpd_del(&aligned);
+ mpd_qfinalize(q, ctx, status);
+}
+
+/* Divide a by b. */
+void
+mpd_qdiv(mpd_t *q, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ _mpd_qdiv(SET_IDEAL_EXP, q, a, b, ctx, status);
+}
+
+/* Internal function. */
+static void
+_mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ MPD_NEW_STATIC(aligned,0,0,0,0);
+ mpd_ssize_t qsize, rsize;
+ mpd_ssize_t ideal_exp, expdiff, shift;
+ uint8_t sign_a = mpd_sign(a);
+ uint8_t sign_ab = mpd_sign(a)^mpd_sign(b);
+
+
+ ideal_exp = (a->exp > b->exp) ? b->exp : a->exp;
+ if (mpd_iszerocoeff(a)) {
+ if (!mpd_qcopy(r, a, status)) {
+ goto nanresult; /* GCOV_NOT_REACHED */
+ }
+ r->exp = ideal_exp;
+ _settriple(q, sign_ab, 0, 0);
+ return;
+ }
+
+ expdiff = mpd_adjexp(a) - mpd_adjexp(b);
+ if (expdiff < 0) {
+ if (a->exp > b->exp) {
+ /* positive and less than b->digits - a->digits */
+ shift = a->exp - b->exp;
+ if (!mpd_qshiftl(r, a, shift, status)) {
+ goto nanresult;
+ }
+ r->exp = ideal_exp;
+ }
+ else {
+ if (!mpd_qcopy(r, a, status)) {
+ goto nanresult;
+ }
+ }
+ _settriple(q, sign_ab, 0, 0);
+ return;
+ }
+ if (expdiff > ctx->prec) {
+ *status |= MPD_Division_impossible;
+ goto nanresult;
+ }
+
+
+ /*
+ * At this point we have:
+ * (1) 0 <= a->exp + a->digits - b->exp - b->digits <= prec
+ * (2) a->exp - b->exp >= b->digits - a->digits
+ * (3) a->exp - b->exp <= prec + b->digits - a->digits
+ */
+ if (a->exp != b->exp) {
+ shift = a->exp - b->exp;
+ if (shift > 0) {
+ /* by (3), after the shift a->digits <= prec + b->digits */
+ if (!mpd_qshiftl(&aligned, a, shift, status)) {
+ goto nanresult;
+ }
+ a = &aligned;
+ }
+ else {
+ shift = -shift;
+ /* by (2), after the shift b->digits <= a->digits */
+ if (!mpd_qshiftl(&aligned, b, shift, status)) {
+ goto nanresult;
+ }
+ b = &aligned;
+ }
+ }
+
+
+ qsize = a->len - b->len + 1;
+ if (!(q == a && qsize < a->len) && !(q == b && qsize < b->len)) {
+ if (!mpd_qresize(q, qsize, status)) {
+ goto nanresult;
+ }
+ }
+
+ rsize = b->len;
+ if (!(r == a && rsize < a->len)) {
+ if (!mpd_qresize(r, rsize, status)) {
+ goto nanresult;
+ }
+ }
+
+ if (b->len == 1) {
+ if (a->len == 1) {
+ _mpd_div_word(&q->data[0], &r->data[0], a->data[0], b->data[0]);
+ }
+ else {
+ r->data[0] = _mpd_shortdiv(q->data, a->data, a->len, b->data[0]);
+ }
+ }
+ else if (a->len < 2*MPD_NEWTONDIV_CUTOFF &&
+ b->len < MPD_NEWTONDIV_CUTOFF) {
+ int ret;
+ ret = _mpd_basedivmod(q->data, r->data, a->data, b->data,
+ a->len, b->len);
+ if (ret == -1) {
+ *status |= MPD_Malloc_error;
+ goto nanresult;
+ }
+ }
+ else {
+ _mpd_qbarrett_divmod(q, r, a, b, status);
+ if (mpd_isspecial(q) || mpd_isspecial(r)) {
+ goto nanresult;
+ }
+ if (mpd_isinfinite(q) || q->digits > ctx->prec) {
+ *status |= MPD_Division_impossible;
+ goto nanresult;
+ }
+ qsize = q->len;
+ rsize = r->len;
+ }
+
+ qsize = _mpd_real_size(q->data, qsize);
+ /* resize to smaller cannot fail */
+ mpd_qresize(q, qsize, status);
+ q->len = qsize;
+ mpd_setdigits(q);
+ mpd_set_flags(q, sign_ab);
+ q->exp = 0;
+ if (q->digits > ctx->prec) {
+ *status |= MPD_Division_impossible;
+ goto nanresult;
+ }
+
+ rsize = _mpd_real_size(r->data, rsize);
+ /* resize to smaller cannot fail */
+ mpd_qresize(r, rsize, status);
+ r->len = rsize;
+ mpd_setdigits(r);
+ mpd_set_flags(r, sign_a);
+ r->exp = ideal_exp;
+
+out:
+ mpd_del(&aligned);
+ return;
+
+nanresult:
+ mpd_setspecial(q, MPD_POS, MPD_NAN);
+ mpd_setspecial(r, MPD_POS, MPD_NAN);
+ goto out;
+}
+
+/* Integer division with remainder. */
+void
+mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ uint8_t sign = mpd_sign(a)^mpd_sign(b);
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(q, a, b, ctx, status)) {
+ mpd_qcopy(r, q, status);
+ return;
+ }
+ if (mpd_isinfinite(a)) {
+ if (mpd_isinfinite(b)) {
+ mpd_setspecial(q, MPD_POS, MPD_NAN);
+ }
+ else {
+ mpd_setspecial(q, sign, MPD_INF);
+ }
+ mpd_setspecial(r, MPD_POS, MPD_NAN);
+ *status |= MPD_Invalid_operation;
+ return;
+ }
+ if (mpd_isinfinite(b)) {
+ if (!mpd_qcopy(r, a, status)) {
+ mpd_seterror(q, MPD_Malloc_error, status);
+ return;
+ }
+ mpd_qfinalize(r, ctx, status);
+ _settriple(q, sign, 0, 0);
+ return;
+ }
+ /* debug */
+ abort(); /* GCOV_NOT_REACHED */
+ }
+ if (mpd_iszerocoeff(b)) {
+ if (mpd_iszerocoeff(a)) {
+ mpd_setspecial(q, MPD_POS, MPD_NAN);
+ mpd_setspecial(r, MPD_POS, MPD_NAN);
+ *status |= MPD_Division_undefined;
+ }
+ else {
+ mpd_setspecial(q, sign, MPD_INF);
+ mpd_setspecial(r, MPD_POS, MPD_NAN);
+ *status |= (MPD_Division_by_zero|MPD_Invalid_operation);
+ }
+ return;
+ }
+
+ _mpd_qdivmod(q, r, a, b, ctx, status);
+ mpd_qfinalize(q, ctx, status);
+ mpd_qfinalize(r, ctx, status);
+}
+
+void
+mpd_qdivint(mpd_t *q, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ MPD_NEW_STATIC(r,0,0,0,0);
+ uint8_t sign = mpd_sign(a)^mpd_sign(b);
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(q, a, b, ctx, status)) {
+ return;
+ }
+ if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
+ mpd_seterror(q, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mpd_isinfinite(a)) {
+ mpd_setspecial(q, sign, MPD_INF);
+ return;
+ }
+ if (mpd_isinfinite(b)) {
+ _settriple(q, sign, 0, 0);
+ return;
+ }
+ /* debug */
+ abort(); /* GCOV_NOT_REACHED */
+ }
+ if (mpd_iszerocoeff(b)) {
+ if (mpd_iszerocoeff(a)) {
+ mpd_seterror(q, MPD_Division_undefined, status);
+ }
+ else {
+ mpd_setspecial(q, sign, MPD_INF);
+ *status |= MPD_Division_by_zero;
+ }
+ return;
+ }
+
+
+ _mpd_qdivmod(q, &r, a, b, ctx, status);
+ mpd_del(&r);
+ mpd_qfinalize(q, ctx, status);
+}
+
+/* Divide decimal by mpd_ssize_t. */
+void
+mpd_qdiv_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+ MPD_NEW_STATIC(bb,0,0,0,0);
+
+ mpd_maxcontext(&maxcontext);
+ mpd_qsset_ssize(&bb, b, &maxcontext, status);
+ mpd_qdiv(result, a, &bb, ctx, status);
+ mpd_del(&bb);
+}
+
+/* Divide decimal by mpd_uint_t. */
+void
+mpd_qdiv_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+ MPD_NEW_STATIC(bb,0,0,0,0);
+
+ mpd_maxcontext(&maxcontext);
+ mpd_qsset_uint(&bb, b, &maxcontext, status);
+ mpd_qdiv(result, a, &bb, ctx, status);
+ mpd_del(&bb);
+}
+
+/* Divide decimal by int32_t. */
+void
+mpd_qdiv_i32(mpd_t *result, const mpd_t *a, int32_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qdiv_ssize(result, a, b, ctx, status);
+}
+
+/* Divide decimal by uint32_t. */
+void
+mpd_qdiv_u32(mpd_t *result, const mpd_t *a, uint32_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qdiv_uint(result, a, b, ctx, status);
+}
+
+#ifdef CONFIG_64
+/* Divide decimal by int64_t. */
+void
+mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qdiv_ssize(result, a, b, ctx, status);
+}
+
+/* Divide decimal by uint64_t. */
+void
+mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qdiv_uint(result, a, b, ctx, status);
+}
+#endif
+
+#if defined(_MSC_VER)
+ /* conversion from 'double' to 'mpd_ssize_t', possible loss of data */
+ #pragma warning(disable:4244)
+#endif
+/*
+ * Get the number of iterations for the Horner scheme in _mpd_qexp().
+ */
+static inline mpd_ssize_t
+_mpd_get_exp_iterations(const mpd_t *a, mpd_ssize_t prec)
+{
+ mpd_uint_t dummy;
+ mpd_uint_t msdigits;
+ double f;
+
+ /* 9 is MPD_RDIGITS for 32 bit platforms */
+ _mpd_get_msdigits(&dummy, &msdigits, a, 9);
+ f = ((double)msdigits + 1) / mpd_pow10[mpd_word_digits(msdigits)];
+
+#ifdef CONFIG_64
+ #ifdef USE_80BIT_LONG_DOUBLE
+ return ceill((1.435*(long double)prec - 1.182)
+ / log10l((long double)prec/f));
+ #else
+ /* prec > floor((1ULL<<53) / 1.435) */
+ if (prec > 6276793905742851LL) {
+ return MPD_SSIZE_MAX;
+ }
+ return ceil((1.435*(double)prec - 1.182) / log10((double)prec/f));
+ #endif
+#else /* CONFIG_32 */
+ return ceil((1.435*(double)prec - 1.182) / log10((double)prec/f));
+ #if defined(_MSC_VER)
+ #pragma warning(default:4244)
+ #endif
+#endif
+}
+
+/*
+ * Internal function, specials have been dealt with.
+ *
+ * The algorithm is from Hull&Abrham, Variable Precision Exponential Function,
+ * ACM Transactions on Mathematical Software, Vol. 12, No. 2, June 1986.
+ *
+ * Main differences:
+ *
+ * - The number of iterations for the Horner scheme is calculated using the
+ * C log10() function.
+ *
+ * - The analysis for early abortion has been adapted for the mpd_t
+ * ranges.
+ */
+static void
+_mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t workctx;
+ MPD_NEW_STATIC(tmp,0,0,0,0);
+ MPD_NEW_STATIC(sum,0,0,0,0);
+ MPD_NEW_CONST(word,0,0,0,1,1,1);
+ mpd_ssize_t j, n, t;
+
+ assert(!mpd_isspecial(a));
+
+ /*
+ * We are calculating e^x = e^(r*10^t) = (e^r)^(10^t), where r < 1 and t >= 0.
+ *
+ * If t > 0, we have:
+ *
+ * (1) 0.1 <= r < 1, so e^r >= e^0.1. Overflow in the final power operation
+ * will occur when (e^0.1)^(10^t) > 10^(emax+1). If we consider MAX_EMAX,
+ * this will happen for t > 10 (32 bit) or (t > 19) (64 bit).
+ *
+ * (2) -1 < r <= -0.1, so e^r > e^-1. Underflow in the final power operation
+ * will occur when (e^-1)^(10^t) < 10^(etiny-1). If we consider MIN_ETINY,
+ * this will also happen for t > 10 (32 bit) or (t > 19) (64 bit).
+ */
+#if defined(CONFIG_64)
+ #define MPD_EXP_MAX_T 19
+#elif defined(CONFIG_32)
+ #define MPD_EXP_MAX_T 10
+#endif
+ t = a->digits + a->exp;
+ t = (t > 0) ? t : 0;
+ if (t > MPD_EXP_MAX_T) {
+ if (mpd_ispositive(a)) {
+ mpd_setspecial(result, MPD_POS, MPD_INF);
+ *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
+ }
+ else {
+ _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
+ *status |= (MPD_Inexact|MPD_Rounded|MPD_Subnormal|
+ MPD_Underflow|MPD_Clamped);
+ }
+ return;
+ }
+
+ mpd_maxcontext(&workctx);
+ workctx.prec = ctx->prec + t + 2;
+ workctx.prec = (workctx.prec < 9) ? 9 : workctx.prec;
+ workctx.round = MPD_ROUND_HALF_EVEN;
+
+ if ((n = _mpd_get_exp_iterations(a, workctx.prec)) == MPD_SSIZE_MAX) {
+ mpd_seterror(result, MPD_Invalid_operation, status); /* GCOV_UNLIKELY */
+ goto finish; /* GCOV_UNLIKELY */
+ }
+
+ if (!mpd_qcopy(result, a, status)) {
+ goto finish;
+ }
+ result->exp -= t;
+
+ _settriple(&sum, MPD_POS, 1, 0);
+
+ for (j = n-1; j >= 1; j--) {
+ word.data[0] = j;
+ mpd_setdigits(&word);
+ mpd_qdiv(&tmp, result, &word, &workctx, &workctx.status);
+ mpd_qmul(&sum, &sum, &tmp, &workctx, &workctx.status);
+ mpd_qadd(&sum, &sum, &one, &workctx, &workctx.status);
+ }
+
+#ifdef CONFIG_64
+ _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
+#else
+ if (t <= MPD_MAX_POW10) {
+ _mpd_qpow_uint(result, &sum, mpd_pow10[t], MPD_POS, &workctx, status);
+ }
+ else {
+ t -= MPD_MAX_POW10;
+ _mpd_qpow_uint(&tmp, &sum, mpd_pow10[MPD_MAX_POW10], MPD_POS,
+ &workctx, status);
+ _mpd_qpow_uint(result, &tmp, mpd_pow10[t], MPD_POS, &workctx, status);
+ }
+#endif
+
+
+finish:
+ mpd_del(&tmp);
+ mpd_del(&sum);
+ *status |= (workctx.status&MPD_Errors);
+ *status |= (MPD_Inexact|MPD_Rounded);
+}
+
+/* exp(a) */
+void
+mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t workctx;
+
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ if (mpd_isnegative(a)) {
+ _settriple(result, MPD_POS, 0, 0);
+ }
+ else {
+ mpd_setspecial(result, MPD_POS, MPD_INF);
+ }
+ return;
+ }
+ if (mpd_iszerocoeff(a)) {
+ _settriple(result, MPD_POS, 1, 0);
+ return;
+ }
+
+ workctx = *ctx;
+ workctx.round = MPD_ROUND_HALF_EVEN;
+
+ if (ctx->allcr) {
+ MPD_NEW_STATIC(t1, 0,0,0,0);
+ MPD_NEW_STATIC(t2, 0,0,0,0);
+ MPD_NEW_STATIC(ulp, 0,0,0,0);
+ MPD_NEW_STATIC(aa, 0,0,0,0);
+ mpd_ssize_t prec;
+
+ if (result == a) {
+ if (!mpd_qcopy(&aa, a, status)) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ a = &aa;
+ }
+
+ workctx.clamp = 0;
+ prec = ctx->prec + 3;
+ while (1) {
+ workctx.prec = prec;
+ _mpd_qexp(result, a, &workctx, status);
+ _ssettriple(&ulp, MPD_POS, 1,
+ result->exp + result->digits-workctx.prec-1);
+
+ workctx.prec = ctx->prec;
+ mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
+ mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
+ if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
+ mpd_qcmp(&t1, &t2, status) == 0) {
+ workctx.clamp = ctx->clamp;
+ mpd_check_underflow(result, &workctx, status);
+ mpd_qfinalize(result, &workctx, status);
+ break;
+ }
+ prec += MPD_RDIGITS;
+ }
+ mpd_del(&t1);
+ mpd_del(&t2);
+ mpd_del(&ulp);
+ mpd_del(&aa);
+ }
+ else {
+ _mpd_qexp(result, a, &workctx, status);
+ mpd_check_underflow(result, &workctx, status);
+ mpd_qfinalize(result, &workctx, status);
+ }
+}
+
+/* Fused multiply-add: (a * b) + c, with a single final rounding. */
+void
+mpd_qfma(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ mpd_t *cc = (mpd_t *)c;
+
+ if (result == c) {
+ if ((cc = mpd_qncopy(c)) == NULL) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ }
+
+ _mpd_qmul(result, a, b, ctx, &workstatus);
+ if (!(workstatus&MPD_Invalid_operation)) {
+ mpd_qadd(result, result, cc, ctx, &workstatus);
+ }
+
+ if (cc != c) mpd_del(cc);
+ *status |= workstatus;
+}
+
+static inline int
+ln_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2], mpd_ssize_t maxprec,
+ mpd_ssize_t initprec)
+{
+ mpd_ssize_t k;
+ int i;
+
+ assert(maxprec >= 2 && initprec >= 2);
+ if (maxprec <= initprec) return -1;
+
+ i = 0; k = maxprec;
+ do {
+ k = (k+2) / 2;
+ klist[i++] = k;
+ } while (k > initprec);
+
+ return i-1;
+}
+
+#ifdef CONFIG_64
+#if MPD_RDIGITS != 19
+ #error "mpdecimal.c: MPD_RDIGITS must be 19."
+#endif
+static const mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {
+ 6983716328982174407ULL, 9089704281976336583ULL, 1515961135648465461ULL,
+ 4416816335727555703ULL, 2900988039194170265ULL, 2307925037472986509ULL,
+ 107598438319191292ULL, 3466624107184669231ULL, 4450099781311469159ULL,
+ 9807828059751193854ULL, 7713456862091670584ULL, 1492198849978748873ULL,
+ 6528728696511086257ULL, 2385392051446341972ULL, 8692180205189339507ULL,
+ 6518769751037497088ULL, 2375253577097505395ULL, 9095610299291824318ULL,
+ 982748238504564801ULL, 5438635917781170543ULL, 7547331541421808427ULL,
+ 752371033310119785ULL, 3171643095059950878ULL, 9785265383207606726ULL,
+ 2932258279850258550ULL, 5497347726624257094ULL, 2976979522110718264ULL,
+ 9221477656763693866ULL, 1979650047149510504ULL, 6674183485704422507ULL,
+ 9702766860595249671ULL, 9278096762712757753ULL, 9314848524948644871ULL,
+ 6826928280848118428ULL, 754403708474699401ULL, 230105703089634572ULL,
+ 1929203337658714166ULL, 7589402567763113569ULL, 4208241314695689016ULL,
+ 2922455440575892572ULL, 9356734206705811364ULL, 2684916746550586856ULL,
+ 644507064800027750ULL, 9476834636167921018ULL, 5659121373450747856ULL,
+ 2835522011480466371ULL, 6470806855677432162ULL, 7141748003688084012ULL,
+ 9619404400222105101ULL, 5504893431493939147ULL, 6674744042432743651ULL,
+ 2287698219886746543ULL, 7773262884616336622ULL, 1985283935053089653ULL,
+ 4680843799894826233ULL, 8168948290720832555ULL, 8067566662873690987ULL,
+ 6248633409525465082ULL, 9829834196778404228ULL, 3524802359972050895ULL,
+ 3327900967572609677ULL, 110148862877297603ULL, 179914546843642076ULL,
+ 2302585092994045684ULL
+};
+#else
+#if MPD_RDIGITS != 9
+ #error "mpdecimal.c: MPD_RDIGITS must be 9."
+#endif
+static const mpd_uint_t mpd_ln10_data[MPD_MINALLOC_MAX] = {
+ 401682692UL, 708474699UL, 720754403UL, 30896345UL, 602301057UL, 765871416UL,
+ 192920333UL, 763113569UL, 589402567UL, 956890167UL, 82413146UL, 589257242UL,
+ 245544057UL, 811364292UL, 734206705UL, 868569356UL, 167465505UL, 775026849UL,
+ 706480002UL, 18064450UL, 636167921UL, 569476834UL, 734507478UL, 156591213UL,
+ 148046637UL, 283552201UL, 677432162UL, 470806855UL, 880840126UL, 417480036UL,
+ 210510171UL, 940440022UL, 939147961UL, 893431493UL, 436515504UL, 440424327UL,
+ 654366747UL, 821988674UL, 622228769UL, 884616336UL, 537773262UL, 350530896UL,
+ 319852839UL, 989482623UL, 468084379UL, 720832555UL, 168948290UL, 736909878UL,
+ 675666628UL, 546508280UL, 863340952UL, 404228624UL, 834196778UL, 508959829UL,
+ 23599720UL, 967735248UL, 96757260UL, 603332790UL, 862877297UL, 760110148UL,
+ 468436420UL, 401799145UL, 299404568UL, 230258509UL
+};
+#endif
+/* _mpd_ln10 is used directly for precisions smaller than MINALLOC_MAX*RDIGITS.
+ Otherwise, it serves as the initial approximation for calculating ln(10). */
+static const mpd_t _mpd_ln10 = {
+ MPD_STATIC|MPD_CONST_DATA, -(MPD_MINALLOC_MAX*MPD_RDIGITS-1),
+ MPD_MINALLOC_MAX*MPD_RDIGITS, MPD_MINALLOC_MAX, MPD_MINALLOC_MAX,
+ (mpd_uint_t *)mpd_ln10_data
+};
+
+/* Set 'result' to ln(10), with 'prec' digits, using ROUND_HALF_EVEN. */
+void
+mpd_qln10(mpd_t *result, mpd_ssize_t prec, uint32_t *status)
+{
+ mpd_context_t varcontext, maxcontext;
+ MPD_NEW_STATIC(tmp, 0,0,0,0);
+ MPD_NEW_CONST(static10, 0,0,2,1,1,10);
+ mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
+ mpd_uint_t rnd;
+ mpd_ssize_t shift;
+ int i;
+
+ assert(prec >= 1);
+
+ shift = MPD_MINALLOC_MAX*MPD_RDIGITS-prec;
+ shift = shift < 0 ? 0 : shift;
+
+ rnd = mpd_qshiftr(result, &_mpd_ln10, shift, status);
+ if (rnd == MPD_UINT_MAX) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ result->exp = -(result->digits-1);
+
+ mpd_maxcontext(&maxcontext);
+ if (prec < MPD_MINALLOC_MAX*MPD_RDIGITS) {
+ maxcontext.prec = prec;
+ _mpd_apply_round_excess(result, rnd, &maxcontext, status);
+ *status |= (MPD_Inexact|MPD_Rounded);
+ return;
+ }
+
+ mpd_maxcontext(&varcontext);
+ varcontext.round = MPD_ROUND_TRUNC;
+
+ i = ln_schedule_prec(klist, prec+2, result->digits);
+ for (; i >= 0; i--) {
+ varcontext.prec = 2*klist[i]+3;
+ result->flags ^= MPD_NEG;
+ _mpd_qexp(&tmp, result, &varcontext, status);
+ result->flags ^= MPD_NEG;
+ mpd_qmul(&tmp, &static10, &tmp, &varcontext, status);
+ mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
+ mpd_qadd(result, result, &tmp, &maxcontext, status);
+ if (mpd_isspecial(result)) {
+ break;
+ }
+ }
+
+ mpd_del(&tmp);
+ maxcontext.prec = prec;
+ mpd_qfinalize(result, &maxcontext, status);
+}
+
+/* Initial approximations for the ln() iteration */
+static const uint16_t lnapprox[900] = {
+ /* index 0 - 400: log((i+100)/100) * 1000 */
+ 0, 10, 20, 30, 39, 49, 58, 68, 77, 86, 95, 104, 113, 122, 131, 140, 148, 157,
+ 166, 174, 182, 191, 199, 207, 215, 223, 231, 239, 247, 255, 262, 270, 278,
+ 285, 293, 300, 308, 315, 322, 329, 336, 344, 351, 358, 365, 372, 378, 385,
+ 392, 399, 406, 412, 419, 425, 432, 438, 445, 451, 457, 464, 470, 476, 482,
+ 489, 495, 501, 507, 513, 519, 525, 531, 536, 542, 548, 554, 560, 565, 571,
+ 577, 582, 588, 593, 599, 604, 610, 615, 621, 626, 631, 637, 642, 647, 652,
+ 658, 663, 668, 673, 678, 683, 688, 693, 698, 703, 708, 713, 718, 723, 728,
+ 732, 737, 742, 747, 751, 756, 761, 766, 770, 775, 779, 784, 788, 793, 798,
+ 802, 806, 811, 815, 820, 824, 829, 833, 837, 842, 846, 850, 854, 859, 863,
+ 867, 871, 876, 880, 884, 888, 892, 896, 900, 904, 908, 912, 916, 920, 924,
+ 928, 932, 936, 940, 944, 948, 952, 956, 959, 963, 967, 971, 975, 978, 982,
+ 986, 990, 993, 997, 1001, 1004, 1008, 1012, 1015, 1019, 1022, 1026, 1030,
+ 1033, 1037, 1040, 1044, 1047, 1051, 1054, 1058, 1061, 1065, 1068, 1072, 1075,
+ 1078, 1082, 1085, 1089, 1092, 1095, 1099, 1102, 1105, 1109, 1112, 1115, 1118,
+ 1122, 1125, 1128, 1131, 1135, 1138, 1141, 1144, 1147, 1151, 1154, 1157, 1160,
+ 1163, 1166, 1169, 1172, 1176, 1179, 1182, 1185, 1188, 1191, 1194, 1197, 1200,
+ 1203, 1206, 1209, 1212, 1215, 1218, 1221, 1224, 1227, 1230, 1233, 1235, 1238,
+ 1241, 1244, 1247, 1250, 1253, 1256, 1258, 1261, 1264, 1267, 1270, 1273, 1275,
+ 1278, 1281, 1284, 1286, 1289, 1292, 1295, 1297, 1300, 1303, 1306, 1308, 1311,
+ 1314, 1316, 1319, 1322, 1324, 1327, 1330, 1332, 1335, 1338, 1340, 1343, 1345,
+ 1348, 1351, 1353, 1356, 1358, 1361, 1364, 1366, 1369, 1371, 1374, 1376, 1379,
+ 1381, 1384, 1386, 1389, 1391, 1394, 1396, 1399, 1401, 1404, 1406, 1409, 1411,
+ 1413, 1416, 1418, 1421, 1423, 1426, 1428, 1430, 1433, 1435, 1437, 1440, 1442,
+ 1445, 1447, 1449, 1452, 1454, 1456, 1459, 1461, 1463, 1466, 1468, 1470, 1472,
+ 1475, 1477, 1479, 1482, 1484, 1486, 1488, 1491, 1493, 1495, 1497, 1500, 1502,
+ 1504, 1506, 1509, 1511, 1513, 1515, 1517, 1520, 1522, 1524, 1526, 1528, 1530,
+ 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1548, 1550, 1552, 1554, 1556, 1558,
+ 1560, 1562, 1564, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585,
+ 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609,
+ /* index 401 - 899: -log((i+100)/1000) * 1000 */
+ 691, 689, 687, 685, 683, 681, 679, 677, 675, 673, 671, 669, 668, 666, 664,
+ 662, 660, 658, 656, 654, 652, 650, 648, 646, 644, 642, 641, 639, 637, 635,
+ 633, 631, 629, 627, 626, 624, 622, 620, 618, 616, 614, 612, 611, 609, 607,
+ 605, 603, 602, 600, 598, 596, 594, 592, 591, 589, 587, 585, 583, 582, 580,
+ 578, 576, 574, 573, 571, 569, 567, 566, 564, 562, 560, 559, 557, 555, 553,
+ 552, 550, 548, 546, 545, 543, 541, 540, 538, 536, 534, 533, 531, 529, 528,
+ 526, 524, 523, 521, 519, 518, 516, 514, 512, 511, 509, 508, 506, 504, 502,
+ 501, 499, 498, 496, 494, 493, 491, 489, 488, 486, 484, 483, 481, 480, 478,
+ 476, 475, 473, 472, 470, 468, 467, 465, 464, 462, 460, 459, 457, 456, 454,
+ 453, 451, 449, 448, 446, 445, 443, 442, 440, 438, 437, 435, 434, 432, 431,
+ 429, 428, 426, 425, 423, 422, 420, 419, 417, 416, 414, 412, 411, 410, 408,
+ 406, 405, 404, 402, 400, 399, 398, 396, 394, 393, 392, 390, 389, 387, 386,
+ 384, 383, 381, 380, 378, 377, 375, 374, 372, 371, 370, 368, 367, 365, 364,
+ 362, 361, 360, 358, 357, 355, 354, 352, 351, 350, 348, 347, 345, 344, 342,
+ 341, 340, 338, 337, 336, 334, 333, 331, 330, 328, 327, 326, 324, 323, 322,
+ 320, 319, 318, 316, 315, 313, 312, 311, 309, 308, 306, 305, 304, 302, 301,
+ 300, 298, 297, 296, 294, 293, 292, 290, 289, 288, 286, 285, 284, 282, 281,
+ 280, 278, 277, 276, 274, 273, 272, 270, 269, 268, 267, 265, 264, 263, 261,
+ 260, 259, 258, 256, 255, 254, 252, 251, 250, 248, 247, 246, 245, 243, 242,
+ 241, 240, 238, 237, 236, 234, 233, 232, 231, 229, 228, 227, 226, 224, 223,
+ 222, 221, 219, 218, 217, 216, 214, 213, 212, 211, 210, 208, 207, 206, 205,
+ 203, 202, 201, 200, 198, 197, 196, 195, 194, 192, 191, 190, 189, 188, 186,
+ 185, 184, 183, 182, 180, 179, 178, 177, 176, 174, 173, 172, 171, 170, 168,
+ 167, 166, 165, 164, 162, 161, 160, 159, 158, 157, 156, 154, 153, 152, 151,
+ 150, 148, 147, 146, 145, 144, 143, 142, 140, 139, 138, 137, 136, 135, 134,
+ 132, 131, 130, 129, 128, 127, 126, 124, 123, 122, 121, 120, 119, 118, 116,
+ 115, 114, 113, 112, 111, 110, 109, 108, 106, 105, 104, 103, 102, 101, 100,
+ 99, 98, 97, 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 84, 83, 82, 81, 80, 79,
+ 78, 77, 76, 75, 74, 73, 72, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59,
+ 58, 57, 56, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39,
+ 38, 37, 36, 35, 34, 33, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19,
+ 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
+};
+
+/* Internal ln() function that does not check for specials, zero or one. */
+static void
+_mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t varcontext, maxcontext;
+ mpd_t *z = (mpd_t *) result;
+ MPD_NEW_STATIC(v,0,0,0,0);
+ MPD_NEW_STATIC(vtmp,0,0,0,0);
+ MPD_NEW_STATIC(tmp,0,0,0,0);
+ mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
+ mpd_ssize_t maxprec, shift, t;
+ mpd_ssize_t a_digits, a_exp;
+ mpd_uint_t dummy, x;
+ int i;
+
+ assert(!mpd_isspecial(a) && !mpd_iszerocoeff(a));
+
+ /*
+ * We are calculating ln(a) = ln(v * 10^t) = ln(v) + t*ln(10),
+ * where 0.5 < v <= 5.
+ */
+ if (!mpd_qcopy(&v, a, status)) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ goto finish;
+ }
+
+ /* Initial approximation: we have at least one non-zero digit */
+ _mpd_get_msdigits(&dummy, &x, &v, 3);
+ if (x < 10) x *= 10;
+ if (x < 100) x *= 10;
+ x -= 100;
+
+ /* a may equal z */
+ a_digits = a->digits;
+ a_exp = a->exp;
+
+ mpd_minalloc(z);
+ mpd_clear_flags(z);
+ z->data[0] = lnapprox[x];
+ z->len = 1;
+ z->exp = -3;
+ mpd_setdigits(z);
+
+ if (x <= 400) {
+ v.exp = -(a_digits - 1);
+ t = a_exp + a_digits - 1;
+ }
+ else {
+ v.exp = -a_digits;
+ t = a_exp + a_digits;
+ mpd_set_negative(z);
+ }
+
+ mpd_maxcontext(&maxcontext);
+ mpd_maxcontext(&varcontext);
+ varcontext.round = MPD_ROUND_TRUNC;
+
+ maxprec = ctx->prec + 2;
+ if (x <= 10 || x >= 805) {
+ /* v is close to 1: Estimate the magnitude of the logarithm.
+ * If v = 1 or ln(v) will underflow, skip the loop. Otherwise,
+ * adjust the precision upwards in order to obtain a sufficient
+ * number of significant digits.
+ *
+ * 1) x/(1+x) < ln(1+x) < x, for x > -1, x != 0
+ *
+ * 2) (v-1)/v < ln(v) < v-1
+ */
+ mpd_t *lower = &tmp;
+ mpd_t *upper = &vtmp;
+ int cmp = _mpd_cmp(&v, &one);
+
+ varcontext.round = MPD_ROUND_CEILING;
+ varcontext.prec = maxprec;
+ mpd_qsub(upper, &v, &one, &varcontext, &varcontext.status);
+ varcontext.round = MPD_ROUND_FLOOR;
+ mpd_qdiv(lower, upper, &v, &varcontext, &varcontext.status);
+ varcontext.round = MPD_ROUND_TRUNC;
+
+ if (cmp < 0) {
+ _mpd_ptrswap(&upper, &lower);
+ }
+ if (mpd_adjexp(upper) < mpd_etiny(ctx)) {
+ _settriple(z, (cmp<0), 1, mpd_etiny(ctx)-1);
+ goto postloop;
+ }
+ /* XXX optimization: t == 0 && mpd_adjexp(lower) < 0 */
+ if (mpd_adjexp(lower) < 0) {
+ maxprec = maxprec - mpd_adjexp(lower);
+ }
+ }
+
+ i = ln_schedule_prec(klist, maxprec, 2);
+ for (; i >= 0; i--) {
+ varcontext.prec = 2*klist[i]+3;
+ z->flags ^= MPD_NEG;
+ _mpd_qexp(&tmp, z, &varcontext, status);
+ z->flags ^= MPD_NEG;
+
+ if (v.digits > varcontext.prec) {
+ shift = v.digits - varcontext.prec;
+ mpd_qshiftr(&vtmp, &v, shift, status);
+ vtmp.exp += shift;
+ mpd_qmul(&tmp, &vtmp, &tmp, &varcontext, status);
+ }
+ else {
+ mpd_qmul(&tmp, &v, &tmp, &varcontext, status);
+ }
+
+ mpd_qsub(&tmp, &tmp, &one, &maxcontext, status);
+ mpd_qadd(z, z, &tmp, &maxcontext, status);
+ if (mpd_isspecial(z)) {
+ break;
+ }
+ }
+
+postloop:
+ mpd_qln10(&v, maxprec+2, status);
+ mpd_qmul_ssize(&tmp, &v, t, &maxcontext, status);
+ varcontext.prec = maxprec+2;
+ mpd_qadd(result, &tmp, z, &varcontext, status);
+
+
+finish:
+ mpd_del(&v);
+ mpd_del(&vtmp);
+ mpd_del(&tmp);
+}
+
+/* ln(a) */
+void
+mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t workctx;
+ mpd_ssize_t adjexp, t;
+
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ if (mpd_isnegative(a)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ mpd_setspecial(result, MPD_POS, MPD_INF);
+ return;
+ }
+ if (mpd_iszerocoeff(a)) {
+ mpd_setspecial(result, MPD_NEG, MPD_INF);
+ return;
+ }
+ if (mpd_isnegative(a)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (_mpd_cmp(a, &one) == 0) {
+ _settriple(result, MPD_POS, 0, 0);
+ return;
+ }
+ /* Check if the result will overflow.
+ *
+ * 1) adjexp(a) + 1 > log10(a) >= adjexp(a)
+ *
+ * 2) |log10(a)| >= adjexp(a), if adjexp(a) >= 0
+ * |log10(a)| > -adjexp(a)-1, if adjexp(a) < 0
+ *
+ * 3) |log(a)| > 2*|log10(a)|
+ */
+ adjexp = mpd_adjexp(a);
+ t = (adjexp < 0) ? -adjexp-1 : adjexp;
+ t *= 2;
+ if (mpd_exp_digits(t)-1 > ctx->emax) {
+ *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
+ mpd_setspecial(result, (adjexp<0), MPD_INF);
+ return;
+ }
+
+ workctx = *ctx;
+ workctx.round = MPD_ROUND_HALF_EVEN;
+
+ if (ctx->allcr) {
+ MPD_NEW_STATIC(t1, 0,0,0,0);
+ MPD_NEW_STATIC(t2, 0,0,0,0);
+ MPD_NEW_STATIC(ulp, 0,0,0,0);
+ MPD_NEW_STATIC(aa, 0,0,0,0);
+ mpd_ssize_t prec;
+
+ if (result == a) {
+ if (!mpd_qcopy(&aa, a, status)) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ a = &aa;
+ }
+
+ workctx.clamp = 0;
+ prec = ctx->prec + 3;
+ while (1) {
+ workctx.prec = prec;
+ _mpd_qln(result, a, &workctx, status);
+ _ssettriple(&ulp, MPD_POS, 1,
+ result->exp + result->digits-workctx.prec-1);
+
+ workctx.prec = ctx->prec;
+ mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
+ mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
+ if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
+ mpd_qcmp(&t1, &t2, status) == 0) {
+ workctx.clamp = ctx->clamp;
+ mpd_check_underflow(result, &workctx, status);
+ mpd_qfinalize(result, &workctx, status);
+ break;
+ }
+ prec += MPD_RDIGITS;
+ }
+ mpd_del(&t1);
+ mpd_del(&t2);
+ mpd_del(&ulp);
+ mpd_del(&aa);
+ }
+ else {
+ _mpd_qln(result, a, &workctx, status);
+ mpd_check_underflow(result, &workctx, status);
+ mpd_qfinalize(result, &workctx, status);
+ }
+}
+
+/* Internal log10() function that does not check for specials, zero, ... */
+static void
+_mpd_qlog10(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t workctx;
+ MPD_NEW_STATIC(ln10,0,0,0,0);
+
+ mpd_maxcontext(&workctx);
+ workctx.prec = ctx->prec + 3;
+ _mpd_qln(result, a, &workctx, status);
+ mpd_qln10(&ln10, workctx.prec, status);
+
+ workctx = *ctx;
+ workctx.round = MPD_ROUND_HALF_EVEN;
+ _mpd_qdiv(NO_IDEAL_EXP, result, result, &ln10, &workctx, status);
+
+ mpd_del(&ln10);
+}
+
+/* log10(a) */
+void
+mpd_qlog10(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t workctx;
+ mpd_ssize_t adjexp, t;
+
+ workctx = *ctx;
+ workctx.round = MPD_ROUND_HALF_EVEN;
+
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ if (mpd_isnegative(a)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ mpd_setspecial(result, MPD_POS, MPD_INF);
+ return;
+ }
+ if (mpd_iszerocoeff(a)) {
+ mpd_setspecial(result, MPD_NEG, MPD_INF);
+ return;
+ }
+ if (mpd_isnegative(a)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mpd_coeff_ispow10(a)) {
+ uint8_t sign = 0;
+ adjexp = mpd_adjexp(a);
+ if (adjexp < 0) {
+ sign = 1;
+ adjexp = -adjexp;
+ }
+ _settriple(result, sign, adjexp, 0);
+ mpd_qfinalize(result, &workctx, status);
+ return;
+ }
+ /* Check if the result will overflow.
+ *
+ * 1) adjexp(a) + 1 > log10(a) >= adjexp(a)
+ *
+ * 2) |log10(a)| >= adjexp(a), if adjexp(a) >= 0
+ * |log10(a)| > -adjexp(a)-1, if adjexp(a) < 0
+ */
+ adjexp = mpd_adjexp(a);
+ t = (adjexp < 0) ? -adjexp-1 : adjexp;
+ if (mpd_exp_digits(t)-1 > ctx->emax) {
+ *status |= MPD_Overflow|MPD_Inexact|MPD_Rounded;
+ mpd_setspecial(result, (adjexp<0), MPD_INF);
+ return;
+ }
+
+ if (ctx->allcr) {
+ MPD_NEW_STATIC(t1, 0,0,0,0);
+ MPD_NEW_STATIC(t2, 0,0,0,0);
+ MPD_NEW_STATIC(ulp, 0,0,0,0);
+ MPD_NEW_STATIC(aa, 0,0,0,0);
+ mpd_ssize_t prec;
+
+ if (result == a) {
+ if (!mpd_qcopy(&aa, a, status)) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ a = &aa;
+ }
+
+ workctx.clamp = 0;
+ prec = ctx->prec + 3;
+ while (1) {
+ workctx.prec = prec;
+ _mpd_qlog10(result, a, &workctx, status);
+ _ssettriple(&ulp, MPD_POS, 1,
+ result->exp + result->digits-workctx.prec-1);
+
+ workctx.prec = ctx->prec;
+ mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);
+ mpd_qsub(&t2, result, &ulp, &workctx, &workctx.status);
+ if (mpd_isspecial(result) || mpd_iszerocoeff(result) ||
+ mpd_qcmp(&t1, &t2, status) == 0) {
+ workctx.clamp = ctx->clamp;
+ mpd_check_underflow(result, &workctx, status);
+ mpd_qfinalize(result, &workctx, status);
+ break;
+ }
+ prec += MPD_RDIGITS;
+ }
+ mpd_del(&t1);
+ mpd_del(&t2);
+ mpd_del(&ulp);
+ mpd_del(&aa);
+ }
+ else {
+ _mpd_qlog10(result, a, &workctx, status);
+ mpd_check_underflow(result, &workctx, status);
+ }
+}
+
+/*
+ * Maximum of the two operands. Attention: If one operand is a quiet NaN and the
+ * other is numeric, the numeric operand is returned. This may not be what one
+ * expects.
+ */
+void
+mpd_qmax(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ int c;
+
+ if (mpd_isqnan(a) && !mpd_isnan(b)) {
+ mpd_qcopy(result, b, status);
+ }
+ else if (mpd_isqnan(b) && !mpd_isnan(a)) {
+ mpd_qcopy(result, a, status);
+ }
+ else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ else {
+ c = _mpd_cmp(a, b);
+ if (c == 0) {
+ c = _mpd_cmp_numequal(a, b);
+ }
+
+ if (c < 0) {
+ mpd_qcopy(result, b, status);
+ }
+ else {
+ mpd_qcopy(result, a, status);
+ }
+ }
+
+ mpd_qfinalize(result, ctx, status);
+}
+
+/*
+ * Maximum magnitude: Same as mpd_max(), but compares the operands with their
+ * sign ignored.
+ */
+void
+mpd_qmax_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ int c;
+
+ if (mpd_isqnan(a) && !mpd_isnan(b)) {
+ mpd_qcopy(result, b, status);
+ }
+ else if (mpd_isqnan(b) && !mpd_isnan(a)) {
+ mpd_qcopy(result, a, status);
+ }
+ else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ else {
+ c = _mpd_cmp_abs(a, b);
+ if (c == 0) {
+ c = _mpd_cmp_numequal(a, b);
+ }
+
+ if (c < 0) {
+ mpd_qcopy(result, b, status);
+ }
+ else {
+ mpd_qcopy(result, a, status);
+ }
+ }
+
+ mpd_qfinalize(result, ctx, status);
+}
+
+/*
+ * Minimum of the two operands. Attention: If one operand is a quiet NaN and the
+ * other is numeric, the numeric operand is returned. This may not be what one
+ * expects.
+ */
+void
+mpd_qmin(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ int c;
+
+ if (mpd_isqnan(a) && !mpd_isnan(b)) {
+ mpd_qcopy(result, b, status);
+ }
+ else if (mpd_isqnan(b) && !mpd_isnan(a)) {
+ mpd_qcopy(result, a, status);
+ }
+ else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ else {
+ c = _mpd_cmp(a, b);
+ if (c == 0) {
+ c = _mpd_cmp_numequal(a, b);
+ }
+
+ if (c < 0) {
+ mpd_qcopy(result, a, status);
+ }
+ else {
+ mpd_qcopy(result, b, status);
+ }
+ }
+
+ mpd_qfinalize(result, ctx, status);
+}
+
+/*
+ * Minimum magnitude: Same as mpd_min(), but compares the operands with their
+ * sign ignored.
+ */
+void
+mpd_qmin_mag(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ int c;
+
+ if (mpd_isqnan(a) && !mpd_isnan(b)) {
+ mpd_qcopy(result, b, status);
+ }
+ else if (mpd_isqnan(b) && !mpd_isnan(a)) {
+ mpd_qcopy(result, a, status);
+ }
+ else if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ else {
+ c = _mpd_cmp_abs(a, b);
+ if (c == 0) {
+ c = _mpd_cmp_numequal(a, b);
+ }
+
+ if (c < 0) {
+ mpd_qcopy(result, a, status);
+ }
+ else {
+ mpd_qcopy(result, b, status);
+ }
+ }
+
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* Minimum space needed for the result array in _karatsuba_rec(). */
+static inline mpd_size_t
+_kmul_resultsize(mpd_size_t la, mpd_size_t lb)
+{
+ mpd_size_t n, m;
+
+ n = add_size_t(la, lb);
+ n = add_size_t(n, 1);
+
+ m = (la+1)/2 + 1;
+ m = mul_size_t(m, 3);
+
+ return (m > n) ? m : n;
+}
+
+/* Work space needed in _karatsuba_rec(). lim >= 4 */
+static inline mpd_size_t
+_kmul_worksize(mpd_size_t n, mpd_size_t lim)
+{
+ mpd_size_t m;
+
+ if (n <= lim) {
+ return 0;
+ }
+
+ m = (n+1)/2 + 1;
+
+ return add_size_t(mul_size_t(m, 2), _kmul_worksize(m, lim));
+}
+
+
+#define MPD_KARATSUBA_BASECASE 16 /* must be >= 4 */
+
+/*
+ * Add the product of a and b to c.
+ * c must be _kmul_resultsize(la, lb) in size.
+ * w is used as a work array and must be _kmul_worksize(a, lim) in size.
+ * Roman E. Maeder, Storage Allocation for the Karatsuba Integer Multiplication
+ * Algorithm. In "Design and implementation of symbolic computation systems",
+ * Springer, 1993, ISBN 354057235X, 9783540572350.
+ */
+static void
+_karatsuba_rec(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
+ mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
+{
+ mpd_size_t m, lt;
+
+ assert(la >= lb && lb > 0);
+ assert(la <= MPD_KARATSUBA_BASECASE || w != NULL);
+
+ if (la <= MPD_KARATSUBA_BASECASE) {
+ _mpd_basemul(c, a, b, la, lb);
+ return;
+ }
+
+ m = (la+1)/2; // ceil(la/2)
+
+ /* lb <= m < la */
+ if (lb <= m) {
+
+ /* lb can now be larger than la-m */
+ if (lb > la-m) {
+ lt = lb + lb + 1; // space needed for result array
+ mpd_uint_zero(w, lt); // clear result array
+ _karatsuba_rec(w, b, a+m, w+lt, lb, la-m); // b*ah
+ }
+ else {
+ lt = (la-m) + (la-m) + 1; // space needed for result array
+ mpd_uint_zero(w, lt); // clear result array
+ _karatsuba_rec(w, a+m, b, w+lt, la-m, lb); // ah*b
+ }
+ _mpd_baseaddto(c+m, w, (la-m)+lb); // add ah*b*B**m
+
+ lt = m + m + 1; // space needed for the result array
+ mpd_uint_zero(w, lt); // clear result array
+ _karatsuba_rec(w, a, b, w+lt, m, lb); // al*b
+ _mpd_baseaddto(c, w, m+lb); // add al*b
+
+ return;
+ }
+
+ /* la >= lb > m */
+ memcpy(w, a, m * sizeof *w);
+ w[m] = 0;
+ _mpd_baseaddto(w, a+m, la-m);
+
+ memcpy(w+(m+1), b, m * sizeof *w);
+ w[m+1+m] = 0;
+ _mpd_baseaddto(w+(m+1), b+m, lb-m);
+
+ _karatsuba_rec(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1);
+
+ lt = (la-m) + (la-m) + 1;
+ mpd_uint_zero(w, lt);
+
+ _karatsuba_rec(w, a+m, b+m, w+lt, la-m, lb-m);
+
+ _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
+ _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
+
+ lt = m + m + 1;
+ mpd_uint_zero(w, lt);
+
+ _karatsuba_rec(w, a, b, w+lt, m, m);
+ _mpd_baseaddto(c, w, m+m);
+ _mpd_basesubfrom(c+m, w, m+m);
+
+ return;
+}
+
+/*
+ * Multiply u and v, using Karatsuba multiplication. Returns a pointer
+ * to the result or NULL in case of failure (malloc error).
+ * Conditions: ulen >= vlen, ulen >= 4
+ */
+mpd_uint_t *
+_mpd_kmul(const mpd_uint_t *u, const mpd_uint_t *v,
+ mpd_size_t ulen, mpd_size_t vlen,
+ mpd_size_t *rsize)
+{
+ mpd_uint_t *result = NULL, *w = NULL;
+ mpd_size_t m;
+
+ assert(ulen >= 4);
+ assert(ulen >= vlen);
+
+ *rsize = _kmul_resultsize(ulen, vlen);
+ if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
+ return NULL;
+ }
+
+ m = _kmul_worksize(ulen, MPD_KARATSUBA_BASECASE);
+ if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
+ mpd_free(result);
+ return NULL;
+ }
+
+ _karatsuba_rec(result, u, v, w, ulen, vlen);
+
+
+ if (w) mpd_free(w);
+ return result;
+}
+
+
+/* Determine the minimum length for the number theoretic transform. */
+static inline mpd_size_t
+_mpd_get_transform_len(mpd_size_t rsize)
+{
+ mpd_size_t log2rsize;
+ mpd_size_t x, step;
+
+ assert(rsize >= 4);
+ log2rsize = mpd_bsr(rsize);
+
+ if (rsize <= 1024) {
+ x = ((mpd_size_t)1)<<log2rsize;
+ return (rsize == x) ? x : x<<1;
+ }
+ else if (rsize <= MPD_MAXTRANSFORM_2N) {
+ x = ((mpd_size_t)1)<<log2rsize;
+ if (rsize == x) return x;
+ step = x>>1;
+ x += step;
+ return (rsize <= x) ? x : x + step;
+ }
+ else if (rsize <= MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2) {
+ return MPD_MAXTRANSFORM_2N+MPD_MAXTRANSFORM_2N/2;
+ }
+ else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
+ return 3*MPD_MAXTRANSFORM_2N;
+ }
+ else {
+ return MPD_SIZE_MAX;
+ }
+}
+
+#ifdef PPRO
+#ifndef _MSC_VER
+static inline unsigned short
+_mpd_get_control87(void)
+{
+ unsigned short cw;
+
+ __asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
+ return cw;
+}
+
+static inline void
+_mpd_set_control87(unsigned short cw)
+{
+ __asm__ __volatile__ ("fldcw %0" : : "m" (cw));
+}
+#endif
+
+unsigned int
+mpd_set_fenv(void)
+{
+ unsigned int cw;
+#ifdef _MSC_VER
+ unsigned int flags =
+ _EM_INVALID|_EM_DENORMAL|_EM_ZERODIVIDE|_EM_OVERFLOW|
+ _EM_UNDERFLOW|_EM_INEXACT|_RC_CHOP|_PC_64;
+ unsigned int mask = _MCW_EM|_MCW_RC|_MCW_PC;
+ unsigned int dummy;
+
+ __control87_2(0, 0, &cw, NULL);
+ __control87_2(flags, mask, &dummy, NULL);
+#else
+ cw = _mpd_get_control87();
+ _mpd_set_control87(cw|0xF3F);
+#endif
+ return cw;
+}
+
+void
+mpd_restore_fenv(unsigned int cw)
+{
+#ifdef _MSC_VER
+ unsigned int mask = _MCW_EM|_MCW_RC|_MCW_PC;
+ unsigned int dummy;
+
+ __control87_2(cw, mask, &dummy, NULL);
+#else
+ _mpd_set_control87((unsigned short)cw);
+#endif
+}
+#endif /* PPRO */
+
+/*
+ * Multiply u and v, using the fast number theoretic transform. Returns
+ * a pointer to the result or NULL in case of failure (malloc error).
+ */
+mpd_uint_t *
+_mpd_fntmul(const mpd_uint_t *u, const mpd_uint_t *v,
+ mpd_size_t ulen, mpd_size_t vlen,
+ mpd_size_t *rsize)
+{
+ mpd_uint_t *c1 = NULL, *c2 = NULL, *c3 = NULL, *vtmp = NULL;
+ mpd_size_t n;
+
+#ifdef PPRO
+ unsigned int cw;
+ cw = mpd_set_fenv();
+#endif
+
+ *rsize = add_size_t(ulen, vlen);
+ if ((n = _mpd_get_transform_len(*rsize)) == MPD_SIZE_MAX) {
+ goto malloc_error;
+ }
+
+ if ((c1 = mpd_calloc(sizeof *c1, n)) == NULL) {
+ goto malloc_error;
+ }
+ if ((c2 = mpd_calloc(sizeof *c2, n)) == NULL) {
+ goto malloc_error;
+ }
+ if ((c3 = mpd_calloc(sizeof *c3, n)) == NULL) {
+ goto malloc_error;
+ }
+
+ memcpy(c1, u, ulen * (sizeof *c1));
+ memcpy(c2, u, ulen * (sizeof *c2));
+ memcpy(c3, u, ulen * (sizeof *c3));
+
+ if (u == v) {
+ if (!fnt_autoconvolute(c1, n, P1) ||
+ !fnt_autoconvolute(c2, n, P2) ||
+ !fnt_autoconvolute(c3, n, P3)) {
+ goto malloc_error;
+ }
+ }
+ else {
+ if ((vtmp = mpd_calloc(sizeof *vtmp, n)) == NULL) {
+ goto malloc_error;
+ }
+
+ memcpy(vtmp, v, vlen * (sizeof *vtmp));
+ if (!fnt_convolute(c1, vtmp, n, P1)) {
+ mpd_free(vtmp);
+ goto malloc_error;
+ }
+
+ memcpy(vtmp, v, vlen * (sizeof *vtmp));
+ mpd_uint_zero(vtmp+vlen, n-vlen);
+ if (!fnt_convolute(c2, vtmp, n, P2)) {
+ mpd_free(vtmp);
+ goto malloc_error;
+ }
+
+ memcpy(vtmp, v, vlen * (sizeof *vtmp));
+ mpd_uint_zero(vtmp+vlen, n-vlen);
+ if (!fnt_convolute(c3, vtmp, n, P3)) {
+ mpd_free(vtmp);
+ goto malloc_error;
+ }
+
+ mpd_free(vtmp);
+ }
+
+ crt3(c1, c2, c3, *rsize);
+
+out:
+#ifdef PPRO
+ mpd_restore_fenv(cw);
+#endif
+ if (c2) mpd_free(c2);
+ if (c3) mpd_free(c3);
+ return c1;
+
+malloc_error:
+ if (c1) mpd_free(c1);
+ c1 = NULL;
+ goto out;
+}
+
+
+/*
+ * Karatsuba multiplication with FNT/basemul as the base case.
+ */
+static int
+_karatsuba_rec_fnt(mpd_uint_t *c, const mpd_uint_t *a, const mpd_uint_t *b,
+ mpd_uint_t *w, mpd_size_t la, mpd_size_t lb)
+{
+ mpd_size_t m, lt;
+
+ assert(la >= lb && lb > 0);
+ assert(la <= 3*(MPD_MAXTRANSFORM_2N/2) || w != NULL);
+
+ if (la <= 3*(MPD_MAXTRANSFORM_2N/2)) {
+
+ if (lb <= 192) {
+ _mpd_basemul(c, b, a, lb, la);
+ }
+ else {
+ mpd_uint_t *result;
+ mpd_size_t dummy;
+
+ if ((result = _mpd_fntmul(a, b, la, lb, &dummy)) == NULL) {
+ return 0;
+ }
+ memcpy(c, result, (la+lb) * (sizeof *result));
+ mpd_free(result);
+ }
+ return 1;
+ }
+
+ m = (la+1)/2; // ceil(la/2)
+
+ /* lb <= m < la */
+ if (lb <= m) {
+
+ /* lb can now be larger than la-m */
+ if (lb > la-m) {
+ lt = lb + lb + 1; // space needed for result array
+ mpd_uint_zero(w, lt); // clear result array
+ if (!_karatsuba_rec_fnt(w, b, a+m, w+lt, lb, la-m)) { // b*ah
+ return 0; /* GCOV_UNLIKELY */
+ }
+ }
+ else {
+ lt = (la-m) + (la-m) + 1; // space needed for result array
+ mpd_uint_zero(w, lt); // clear result array
+ if (!_karatsuba_rec_fnt(w, a+m, b, w+lt, la-m, lb)) { // ah*b
+ return 0; /* GCOV_UNLIKELY */
+ }
+ }
+ _mpd_baseaddto(c+m, w, (la-m)+lb); // add ah*b*B**m
+
+ lt = m + m + 1; // space needed for the result array
+ mpd_uint_zero(w, lt); // clear result array
+ if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, lb)) { // al*b
+ return 0; /* GCOV_UNLIKELY */
+ }
+ _mpd_baseaddto(c, w, m+lb); // add al*b
+
+ return 1;
+ }
+
+ /* la >= lb > m */
+ memcpy(w, a, m * sizeof *w);
+ w[m] = 0;
+ _mpd_baseaddto(w, a+m, la-m);
+
+ memcpy(w+(m+1), b, m * sizeof *w);
+ w[m+1+m] = 0;
+ _mpd_baseaddto(w+(m+1), b+m, lb-m);
+
+ if (!_karatsuba_rec_fnt(c+m, w, w+(m+1), w+2*(m+1), m+1, m+1)) {
+ return 0; /* GCOV_UNLIKELY */
+ }
+
+ lt = (la-m) + (la-m) + 1;
+ mpd_uint_zero(w, lt);
+
+ if (!_karatsuba_rec_fnt(w, a+m, b+m, w+lt, la-m, lb-m)) {
+ return 0; /* GCOV_UNLIKELY */
+ }
+
+ _mpd_baseaddto(c+2*m, w, (la-m) + (lb-m));
+ _mpd_basesubfrom(c+m, w, (la-m) + (lb-m));
+
+ lt = m + m + 1;
+ mpd_uint_zero(w, lt);
+
+ if (!_karatsuba_rec_fnt(w, a, b, w+lt, m, m)) {
+ return 0; /* GCOV_UNLIKELY */
+ }
+ _mpd_baseaddto(c, w, m+m);
+ _mpd_basesubfrom(c+m, w, m+m);
+
+ return 1;
+}
+
+/*
+ * Multiply u and v, using Karatsuba multiplication with the FNT as the
+ * base case. Returns a pointer to the result or NULL in case of failure
+ * (malloc error). Conditions: ulen >= vlen, ulen >= 4.
+ */
+mpd_uint_t *
+_mpd_kmul_fnt(const mpd_uint_t *u, const mpd_uint_t *v,
+ mpd_size_t ulen, mpd_size_t vlen,
+ mpd_size_t *rsize)
+{
+ mpd_uint_t *result = NULL, *w = NULL;
+ mpd_size_t m;
+
+ assert(ulen >= 4);
+ assert(ulen >= vlen);
+
+ *rsize = _kmul_resultsize(ulen, vlen);
+ if ((result = mpd_calloc(*rsize, sizeof *result)) == NULL) {
+ return NULL;
+ }
+
+ m = _kmul_worksize(ulen, 3*(MPD_MAXTRANSFORM_2N/2));
+ if (m && ((w = mpd_calloc(m, sizeof *w)) == NULL)) {
+ mpd_free(result); /* GCOV_UNLIKELY */
+ return NULL; /* GCOV_UNLIKELY */
+ }
+
+ if (!_karatsuba_rec_fnt(result, u, v, w, ulen, vlen)) {
+ mpd_free(result);
+ result = NULL;
+ }
+
+
+ if (w) mpd_free(w);
+ return result;
+}
+
+
+/* Deal with the special cases of multiplying infinities. */
+static void
+_mpd_qmul_inf(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status)
+{
+ if (mpd_isinfinite(a)) {
+ if (mpd_iszero(b)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ }
+ else {
+ mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
+ }
+ return;
+ }
+ assert(mpd_isinfinite(b));
+ if (mpd_iszero(a)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ }
+ else {
+ mpd_setspecial(result, mpd_sign(a)^mpd_sign(b), MPD_INF);
+ }
+}
+
+/*
+ * Internal function: Multiply a and b. _mpd_qmul deals with specials but
+ * does NOT finalize the result. This is for use in mpd_fma().
+ */
+static inline void
+_mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_t *big = (mpd_t *)a, *small = (mpd_t *)b;
+ mpd_uint_t *rdata = NULL;
+ mpd_uint_t rbuf[MPD_MINALLOC_MAX];
+ mpd_size_t rsize, i;
+
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ _mpd_qmul_inf(result, a, b, status);
+ return;
+ }
+
+ if (small->len > big->len) {
+ _mpd_ptrswap(&big, &small);
+ }
+
+ rsize = big->len + small->len;
+
+ if (big->len == 1) {
+ _mpd_singlemul(result->data, big->data[0], small->data[0]);
+ goto finish;
+ }
+ if (rsize <= (mpd_size_t)MPD_MINALLOC_MAX) {
+ if (big->len == 2) {
+ _mpd_mul_2_le2(rbuf, big->data, small->data, small->len);
+ }
+ else {
+ mpd_uint_zero(rbuf, rsize);
+ if (small->len == 1) {
+ _mpd_shortmul(rbuf, big->data, big->len, small->data[0]);
+ }
+ else {
+ _mpd_basemul(rbuf, small->data, big->data, small->len, big->len);
+ }
+ }
+ if (!mpd_qresize(result, rsize, status)) {
+ return;
+ }
+ for(i = 0; i < rsize; i++) {
+ result->data[i] = rbuf[i];
+ }
+ goto finish;
+ }
+
+
+ if (small->len == 1) {
+ if ((rdata = mpd_calloc(rsize, sizeof *rdata)) == NULL) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ _mpd_shortmul(rdata, big->data, big->len, small->data[0]);
+ }
+ else if (rsize <= 1024) {
+ rdata = _mpd_kmul(big->data, small->data, big->len, small->len, &rsize);
+ if (rdata == NULL) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ }
+ else if (rsize <= 3*MPD_MAXTRANSFORM_2N) {
+ rdata = _mpd_fntmul(big->data, small->data, big->len, small->len, &rsize);
+ if (rdata == NULL) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ }
+ else {
+ rdata = _mpd_kmul_fnt(big->data, small->data, big->len, small->len, &rsize);
+ if (rdata == NULL) {
+ mpd_seterror(result, MPD_Malloc_error, status); /* GCOV_UNLIKELY */
+ return; /* GCOV_UNLIKELY */
+ }
+ }
+
+ if (mpd_isdynamic_data(result)) {
+ mpd_free(result->data);
+ }
+ result->data = rdata;
+ result->alloc = rsize;
+ mpd_set_dynamic_data(result);
+
+
+finish:
+ mpd_set_flags(result, mpd_sign(a)^mpd_sign(b));
+ result->exp = big->exp + small->exp;
+ result->len = _mpd_real_size(result->data, rsize);
+ /* resize to smaller cannot fail */
+ mpd_qresize(result, result->len, status);
+ mpd_setdigits(result);
+}
+
+/* Multiply a and b. */
+void
+mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ _mpd_qmul(result, a, b, ctx, status);
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* Multiply decimal and mpd_ssize_t. */
+void
+mpd_qmul_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+ MPD_NEW_STATIC(bb,0,0,0,0);
+
+ mpd_maxcontext(&maxcontext);
+ mpd_qsset_ssize(&bb, b, &maxcontext, status);
+ mpd_qmul(result, a, &bb, ctx, status);
+ mpd_del(&bb);
+}
+
+/* Multiply decimal and mpd_uint_t. */
+void
+mpd_qmul_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+ MPD_NEW_STATIC(bb,0,0,0,0);
+
+ mpd_maxcontext(&maxcontext);
+ mpd_qsset_uint(&bb, b, &maxcontext, status);
+ mpd_qmul(result, a, &bb, ctx, status);
+ mpd_del(&bb);
+}
+
+void
+mpd_qmul_i32(mpd_t *result, const mpd_t *a, int32_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qmul_ssize(result, a, b, ctx, status);
+}
+
+void
+mpd_qmul_u32(mpd_t *result, const mpd_t *a, uint32_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qmul_uint(result, a, b, ctx, status);
+}
+
+#ifdef CONFIG_64
+void
+mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qmul_ssize(result, a, b, ctx, status);
+}
+
+void
+mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_qmul_uint(result, a, b, ctx, status);
+}
+#endif
+
+/* Like the minus operator. */
+void
+mpd_qminus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ }
+
+ if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
+ mpd_qcopy_abs(result, a, status);
+ }
+ else {
+ mpd_qcopy_negate(result, a, status);
+ }
+
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* Like the plus operator. */
+void
+mpd_qplus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ }
+
+ if (mpd_iszero(a) && ctx->round != MPD_ROUND_FLOOR) {
+ mpd_qcopy_abs(result, a, status);
+ }
+ else {
+ mpd_qcopy(result, a, status);
+ }
+
+ mpd_qfinalize(result, ctx, status);
+}
+
+/* The largest representable number that is smaller than the operand. */
+void
+mpd_qnext_minus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t workctx; /* function context */
+ MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
+
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ if (mpd_isinfinite(a)) {
+ if (mpd_isnegative(a)) {
+ mpd_qcopy(result, a, status);
+ return;
+ }
+ else {
+ mpd_clear_flags(result);
+ mpd_qmaxcoeff(result, ctx, status);
+ if (mpd_isnan(result)) {
+ return;
+ }
+ result->exp = ctx->emax - ctx->prec + 1;
+ return;
+ }
+ }
+ /* debug */
+ abort(); /* GCOV_NOT_REACHED */
+ }
+
+ mpd_workcontext(&workctx, ctx);
+ workctx.round = MPD_ROUND_FLOOR;
+
+ if (!mpd_qcopy(result, a, status)) {
+ return;
+ }
+
+ mpd_qfinalize(result, &workctx, &workctx.status);
+ if (workctx.status&(MPD_Inexact|MPD_Errors)) {
+ *status |= (workctx.status&MPD_Errors);
+ return;
+ }
+
+ workctx.status = 0;
+ mpd_qsub(result, a, &tiny, &workctx, &workctx.status);
+ *status |= (workctx.status&MPD_Errors);
+}
+
+/* The smallest representable number that is larger than the operand. */
+void
+mpd_qnext_plus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t workctx;
+ MPD_NEW_CONST(tiny,MPD_POS,mpd_etiny(ctx)-1,1,1,1,1);
+
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ if (mpd_isinfinite(a)) {
+ if (mpd_ispositive(a)) {
+ mpd_qcopy(result, a, status);
+ }
+ else {
+ mpd_clear_flags(result);
+ mpd_qmaxcoeff(result, ctx, status);
+ if (mpd_isnan(result)) {
+ return;
+ }
+ mpd_set_flags(result, MPD_NEG);
+ result->exp = mpd_etop(ctx);
+ }
+ return;
+ }
+ }
+
+ mpd_workcontext(&workctx, ctx);
+ workctx.round = MPD_ROUND_CEILING;
+
+ if (!mpd_qcopy(result, a, status)) {
+ return;
+ }
+
+ mpd_qfinalize(result, &workctx, &workctx.status);
+ if (workctx.status & (MPD_Inexact|MPD_Errors)) {
+ *status |= (workctx.status&MPD_Errors);
+ return;
+ }
+
+ workctx.status = 0;
+ mpd_qadd(result, a, &tiny, &workctx, &workctx.status);
+ *status |= (workctx.status&MPD_Errors);
+}
+
+/*
+ * The number closest to the first operand that is in the direction towards
+ * the second operand.
+ */
+void
+mpd_qnext_toward(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ int c;
+
+ if (mpd_isnan(a) || mpd_isnan(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status))
+ return;
+ }
+
+ c = _mpd_cmp(a, b);
+ if (c == 0) {
+ mpd_qcopy_sign(result, a, b, status);
+ return;
+ }
+
+ if (c < 0) {
+ mpd_qnext_plus(result, a, ctx, status);
+ }
+ else {
+ mpd_qnext_minus(result, a, ctx, status);
+ }
+
+ if (mpd_isinfinite(result)) {
+ *status |= (MPD_Overflow|MPD_Rounded|MPD_Inexact);
+ }
+ else if (mpd_adjexp(result) < ctx->emin) {
+ *status |= (MPD_Underflow|MPD_Subnormal|MPD_Rounded|MPD_Inexact);
+ if (mpd_iszero(result)) {
+ *status |= MPD_Clamped;
+ }
+ }
+}
+
+/*
+ * Internal function: Integer power with mpd_uint_t exponent, base is modified!
+ * Function can fail with MPD_Malloc_error.
+ */
+static inline void
+_mpd_qpow_uint(mpd_t *result, mpd_t *base, mpd_uint_t exp, uint8_t resultsign,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ mpd_uint_t n;
+
+ if (exp == 0) {
+ _settriple(result, resultsign, 1, 0); /* GCOV_NOT_REACHED */
+ return; /* GCOV_NOT_REACHED */
+ }
+
+ if (!mpd_qcopy(result, base, status)) {
+ return;
+ }
+
+ n = mpd_bits[mpd_bsr(exp)];
+ while (n >>= 1) {
+ mpd_qmul(result, result, result, ctx, &workstatus);
+ if (exp & n) {
+ mpd_qmul(result, result, base, ctx, &workstatus);
+ }
+ if (workstatus & (MPD_Overflow|MPD_Clamped)) {
+ break;
+ }
+ }
+
+ *status |= workstatus;
+ mpd_set_sign(result, resultsign);
+}
+
+/*
+ * Internal function: Integer power with mpd_t exponent, tbase and texp
+ * are modified!! Function can fail with MPD_Malloc_error.
+ */
+static inline void
+_mpd_qpow_mpd(mpd_t *result, mpd_t *tbase, mpd_t *texp, uint8_t resultsign,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ mpd_context_t maxctx;
+ MPD_NEW_CONST(two,0,0,1,1,1,2);
+
+
+ mpd_maxcontext(&maxctx);
+
+ /* resize to smaller cannot fail */
+ mpd_qcopy(result, &one, status);
+
+ while (!mpd_iszero(texp)) {
+ if (mpd_isodd(texp)) {
+ mpd_qmul(result, result, tbase, ctx, &workstatus);
+ *status |= workstatus;
+ if (workstatus & (MPD_Overflow|MPD_Clamped)) {
+ break;
+ }
+ }
+ mpd_qmul(tbase, tbase, tbase, ctx, &workstatus);
+ mpd_qdivint(texp, texp, &two, &maxctx, &workstatus);
+ if (mpd_isnan(tbase) || mpd_isnan(texp)) {
+ mpd_seterror(result, workstatus&MPD_Errors, status);
+ return;
+ }
+ }
+ mpd_set_sign(result, resultsign);
+}
+
+/*
+ * The power function for integer exponents.
+ */
+static void
+_mpd_qpow_int(mpd_t *result, const mpd_t *base, const mpd_t *exp,
+ uint8_t resultsign,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t workctx;
+ MPD_NEW_STATIC(tbase,0,0,0,0);
+ MPD_NEW_STATIC(texp,0,0,0,0);
+ mpd_ssize_t n;
+
+
+ mpd_workcontext(&workctx, ctx);
+ workctx.prec += (exp->digits + exp->exp + 2);
+ workctx.round = MPD_ROUND_HALF_EVEN;
+ workctx.clamp = 0;
+ if (mpd_isnegative(exp)) {
+ mpd_qdiv(&tbase, &one, base, &workctx, status);
+ if (*status&MPD_Errors) {
+ mpd_setspecial(result, MPD_POS, MPD_NAN);
+ goto finish;
+ }
+ }
+ else {
+ if (!mpd_qcopy(&tbase, base, status)) {
+ mpd_setspecial(result, MPD_POS, MPD_NAN);
+ goto finish;
+ }
+ }
+
+ n = mpd_qabs_uint(exp, &workctx.status);
+ if (workctx.status&MPD_Invalid_operation) {
+ if (!mpd_qcopy(&texp, exp, status)) {
+ mpd_setspecial(result, MPD_POS, MPD_NAN); /* GCOV_UNLIKELY */
+ goto finish; /* GCOV_UNLIKELY */
+ }
+ _mpd_qpow_mpd(result, &tbase, &texp, resultsign, &workctx, status);
+ }
+ else {
+ _mpd_qpow_uint(result, &tbase, n, resultsign, &workctx, status);
+ }
+
+ if (mpd_isinfinite(result)) {
+ /* for ROUND_DOWN, ROUND_FLOOR, etc. */
+ _settriple(result, resultsign, 1, MPD_EXP_INF);
+ }
+
+finish:
+ mpd_del(&tbase);
+ mpd_del(&texp);
+ mpd_qfinalize(result, ctx, status);
+}
+
+/*
+ * This is an internal function that does not check for NaNs.
+ */
+static int
+_qcheck_pow_one_inf(mpd_t *result, const mpd_t *base, uint8_t resultsign,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_ssize_t shift;
+ int cmp;
+
+ if ((cmp = _mpd_cmp(base, &one)) == 0) {
+ shift = ctx->prec-1;
+ mpd_qshiftl(result, &one, shift, status);
+ result->exp = -shift;
+ mpd_set_flags(result, resultsign);
+ *status |= (MPD_Inexact|MPD_Rounded);
+ }
+
+ return cmp;
+}
+
+/*
+ * If base equals one, calculate the correct power of one result.
+ * Otherwise, result is undefined. Return the value of the comparison
+ * against 1.
+ *
+ * This is an internal function that does not check for specials.
+ */
+static int
+_qcheck_pow_one(mpd_t *result, const mpd_t *base, const mpd_t *exp,
+ uint8_t resultsign,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ mpd_ssize_t shift;
+ int cmp;
+
+ if ((cmp = _mpd_cmp_abs(base, &one)) == 0) {
+ if (_mpd_isint(exp)) {
+ if (mpd_isnegative(exp)) {
+ _settriple(result, resultsign, 1, 0);
+ return 0;
+ }
+ /* 1.000**3 = 1.000000000 */
+ mpd_qmul_ssize(result, exp, -base->exp, ctx, &workstatus);
+ if (workstatus&MPD_Errors) {
+ *status |= (workstatus&MPD_Errors);
+ return 0;
+ }
+ /* digits-1 after exponentiation */
+ shift = mpd_qget_ssize(result, &workstatus);
+ /* shift is MPD_SSIZE_MAX if result is too large */
+ if (shift > ctx->prec-1) {
+ shift = ctx->prec-1;
+ *status |= MPD_Rounded;
+ }
+ }
+ else if (mpd_ispositive(base)) {
+ shift = ctx->prec-1;
+ *status |= (MPD_Inexact|MPD_Rounded);
+ }
+ else {
+ return -2; /* GCOV_NOT_REACHED */
+ }
+ if (!mpd_qshiftl(result, &one, shift, status)) {
+ return 0;
+ }
+ result->exp = -shift;
+ mpd_set_flags(result, resultsign);
+ }
+
+ return cmp;
+}
+
+/*
+ * Detect certain over/underflow of x**y.
+ * ACL2 proof: pow_bounds.lisp.
+ *
+ * Symbols:
+ *
+ * e: EXP_INF or EXP_CLAMP
+ * x: base
+ * y: exponent
+ *
+ * omega(e) = log10(abs(e))
+ * zeta(x) = log10(abs(log10(x)))
+ * theta(y) = log10(abs(y))
+ *
+ * Upper and lower bounds:
+ *
+ * ub_omega(e) = ceil(log10(abs(e)))
+ * lb_theta(y) = floor(log10(abs(y)))
+ *
+ * | floor(log10(floor(abs(log10(x))))) if x < 1/10 or x >= 10
+ * lb_zeta(x) = | floor(log10(abs(x-1)/10)) if 1/10 <= x < 1
+ * | floor(log10(abs((x-1)/100))) if 1 < x < 10
+ *
+ * ub_omega(e) and lb_theta(y) are obviously upper and lower bounds
+ * for omega(e) and theta(y).
+ *
+ * lb_zeta is a lower bound for zeta(x):
+ *
+ * x < 1/10 or x >= 10:
+ *
+ * abs(log10(x)) >= 1, so the outer log10 is well defined. Since log10
+ * is strictly increasing, the end result is a lower bound.
+ *
+ * 1/10 <= x < 1:
+ *
+ * We use: log10(x) <= (x-1)/log(10)
+ * abs(log10(x)) >= abs(x-1)/log(10)
+ * abs(log10(x)) >= abs(x-1)/10
+ *
+ * 1 < x < 10:
+ *
+ * We use: (x-1)/(x*log(10)) < log10(x)
+ * abs((x-1)/100) < abs(log10(x))
+ *
+ * XXX: abs((x-1)/10) would work, need ACL2 proof.
+ *
+ *
+ * Let (0 < x < 1 and y < 0) or (x > 1 and y > 0). (H1)
+ * Let ub_omega(exp_inf) < lb_zeta(x) + lb_theta(y) (H2)
+ *
+ * Then:
+ * log10(abs(exp_inf)) < log10(abs(log10(x))) + log10(abs(y)). (1)
+ * exp_inf < log10(x) * y (2)
+ * 10**exp_inf < x**y (3)
+ *
+ * Let (0 < x < 1 and y > 0) or (x > 1 and y < 0). (H3)
+ * Let ub_omega(exp_clamp) < lb_zeta(x) + lb_theta(y) (H4)
+ *
+ * Then:
+ * log10(abs(exp_clamp)) < log10(abs(log10(x))) + log10(abs(y)). (4)
+ * log10(x) * y < exp_clamp (5)
+ * x**y < 10**exp_clamp (6)
+ *
+ */
+static mpd_ssize_t
+_lower_bound_zeta(const mpd_t *x, uint32_t *status)
+{
+ mpd_context_t maxctx;
+ MPD_NEW_STATIC(scratch,0,0,0,0);
+ mpd_ssize_t t, u;
+
+ t = mpd_adjexp(x);
+ if (t > 0) {
+ /* x >= 10 -> floor(log10(floor(abs(log10(x))))) */
+ return mpd_exp_digits(t) - 1;
+ }
+ else if (t < -1) {
+ /* x < 1/10 -> floor(log10(floor(abs(log10(x))))) */
+ return mpd_exp_digits(t+1) - 1;
+ }
+ else {
+ mpd_maxcontext(&maxctx);
+ mpd_qsub(&scratch, x, &one, &maxctx, status);
+ if (mpd_isspecial(&scratch)) {
+ mpd_del(&scratch);
+ return MPD_SSIZE_MAX;
+ }
+ u = mpd_adjexp(&scratch);
+ mpd_del(&scratch);
+
+ /* t == -1, 1/10 <= x < 1 -> floor(log10(abs(x-1)/10))
+ * t == 0, 1 < x < 10 -> floor(log10(abs(x-1)/100)) */
+ return (t == 0) ? u-2 : u-1;
+ }
+}
+
+/*
+ * Detect cases of certain overflow/underflow in the power function.
+ * Assumptions: x != 1, y != 0. The proof above is for positive x.
+ * If x is negative and y is an odd integer, x**y == -(abs(x)**y),
+ * so the analysis does not change.
+ */
+static int
+_qcheck_pow_bounds(mpd_t *result, const mpd_t *x, const mpd_t *y,
+ uint8_t resultsign,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ MPD_NEW_SHARED(abs_x, x);
+ mpd_ssize_t ub_omega, lb_zeta, lb_theta;
+ uint8_t sign;
+
+ mpd_set_positive(&abs_x);
+
+ lb_theta = mpd_adjexp(y);
+ lb_zeta = _lower_bound_zeta(&abs_x, status);
+ if (lb_zeta == MPD_SSIZE_MAX) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return 1;
+ }
+
+ sign = (mpd_adjexp(&abs_x) < 0) ^ mpd_sign(y);
+ if (sign == 0) {
+ /* (0 < |x| < 1 and y < 0) or (|x| > 1 and y > 0) */
+ ub_omega = mpd_exp_digits(ctx->emax);
+ if (ub_omega < lb_zeta + lb_theta) {
+ _settriple(result, resultsign, 1, MPD_EXP_INF);
+ mpd_qfinalize(result, ctx, status);
+ return 1;
+ }
+ }
+ else {
+ /* (0 < |x| < 1 and y > 0) or (|x| > 1 and y < 0). */
+ ub_omega = mpd_exp_digits(mpd_etiny(ctx));
+ if (ub_omega < lb_zeta + lb_theta) {
+ _settriple(result, resultsign, 1, mpd_etiny(ctx)-1);
+ mpd_qfinalize(result, ctx, status);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * TODO: Implement algorithm for computing exact powers from decimal.py.
+ * In order to prevent infinite loops, this has to be called before
+ * using Ziv's strategy for correct rounding.
+ */
+/*
+static int
+_mpd_qpow_exact(mpd_t *result, const mpd_t *base, const mpd_t *exp,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ return 0;
+}
+*/
+
+/* The power function for real exponents */
+static void
+_mpd_qpow_real(mpd_t *result, const mpd_t *base, const mpd_t *exp,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t workctx;
+ MPD_NEW_STATIC(texp,0,0,0,0);
+
+ if (!mpd_qcopy(&texp, exp, status)) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+
+ mpd_maxcontext(&workctx);
+ workctx.prec = (base->digits > ctx->prec) ? base->digits : ctx->prec;
+ workctx.prec += (4 + MPD_EXPDIGITS);
+ workctx.round = MPD_ROUND_HALF_EVEN;
+ workctx.allcr = ctx->allcr;
+
+ mpd_qln(result, base, &workctx, &workctx.status);
+ mpd_qmul(result, result, &texp, &workctx, &workctx.status);
+ mpd_qexp(result, result, &workctx, status);
+
+ mpd_del(&texp);
+ *status |= (workctx.status&MPD_Errors);
+ *status |= (MPD_Inexact|MPD_Rounded);
+}
+
+/* The power function: base**exp */
+void
+mpd_qpow(mpd_t *result, const mpd_t *base, const mpd_t *exp,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ uint8_t resultsign = 0;
+ int intexp = 0;
+ int cmp;
+
+ if (mpd_isspecial(base) || mpd_isspecial(exp)) {
+ if (mpd_qcheck_nans(result, base, exp, ctx, status)) {
+ return;
+ }
+ }
+ if (mpd_isinteger(exp)) {
+ intexp = 1;
+ resultsign = mpd_isnegative(base) && mpd_isodd(exp);
+ }
+
+ if (mpd_iszero(base)) {
+ if (mpd_iszero(exp)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ }
+ else if (mpd_isnegative(exp)) {
+ mpd_setspecial(result, resultsign, MPD_INF);
+ }
+ else {
+ _settriple(result, resultsign, 0, 0);
+ }
+ return;
+ }
+ if (mpd_isnegative(base)) {
+ if (!intexp || mpd_isinfinite(exp)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ }
+ if (mpd_isinfinite(exp)) {
+ /* power of one */
+ cmp = _qcheck_pow_one_inf(result, base, resultsign, ctx, status);
+ if (cmp == 0) {
+ return;
+ }
+ else {
+ cmp *= mpd_arith_sign(exp);
+ if (cmp < 0) {
+ _settriple(result, resultsign, 0, 0);
+ }
+ else {
+ mpd_setspecial(result, resultsign, MPD_INF);
+ }
+ }
+ return;
+ }
+ if (mpd_isinfinite(base)) {
+ if (mpd_iszero(exp)) {
+ _settriple(result, resultsign, 1, 0);
+ }
+ else if (mpd_isnegative(exp)) {
+ _settriple(result, resultsign, 0, 0);
+ }
+ else {
+ mpd_setspecial(result, resultsign, MPD_INF);
+ }
+ return;
+ }
+ if (mpd_iszero(exp)) {
+ _settriple(result, resultsign, 1, 0);
+ return;
+ }
+ if (_qcheck_pow_one(result, base, exp, resultsign, ctx, status) == 0) {
+ return;
+ }
+ if (_qcheck_pow_bounds(result, base, exp, resultsign, ctx, status)) {
+ return;
+ }
+
+ if (intexp) {
+ _mpd_qpow_int(result, base, exp, resultsign, ctx, status);
+ }
+ else {
+ _mpd_qpow_real(result, base, exp, ctx, status);
+ if (!mpd_isspecial(result) && _mpd_cmp(result, &one) == 0) {
+ mpd_ssize_t shift = ctx->prec-1;
+ mpd_qshiftl(result, &one, shift, status);
+ result->exp = -shift;
+ }
+ if (mpd_isinfinite(result)) {
+ /* for ROUND_DOWN, ROUND_FLOOR, etc. */
+ _settriple(result, MPD_POS, 1, MPD_EXP_INF);
+ }
+ mpd_qfinalize(result, ctx, status);
+ }
+}
+
+/*
+ * Internal function: Integer powmod with mpd_uint_t exponent, base is modified!
+ * Function can fail with MPD_Malloc_error.
+ */
+static inline void
+_mpd_qpowmod_uint(mpd_t *result, mpd_t *base, mpd_uint_t exp,
+ mpd_t *mod, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+
+ mpd_maxcontext(&maxcontext);
+
+ /* resize to smaller cannot fail */
+ mpd_qcopy(result, &one, status);
+
+ while (exp > 0) {
+ if (exp & 1) {
+ mpd_qmul(result, result, base, &maxcontext, status);
+ mpd_qrem(result, result, mod, &maxcontext, status);
+ }
+ mpd_qmul(base, base, base, &maxcontext, status);
+ mpd_qrem(base, base, mod, &maxcontext, status);
+ exp >>= 1;
+ }
+}
+
+/* The powmod function: (base**exp) % mod */
+void
+mpd_qpowmod(mpd_t *result, const mpd_t *base, const mpd_t *exp,
+ const mpd_t *mod,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxcontext;
+ MPD_NEW_STATIC(tbase,0,0,0,0);
+ MPD_NEW_STATIC(texp,0,0,0,0);
+ MPD_NEW_STATIC(tmod,0,0,0,0);
+ MPD_NEW_STATIC(tmp,0,0,0,0);
+ MPD_NEW_CONST(two,0,0,1,1,1,2);
+ mpd_ssize_t tbase_exp, texp_exp;
+ mpd_ssize_t i;
+ mpd_t t;
+ mpd_uint_t r;
+ uint8_t sign;
+
+
+ if (mpd_isspecial(base) || mpd_isspecial(exp) || mpd_isspecial(mod)) {
+ if (mpd_qcheck_3nans(result, base, exp, mod, ctx, status)) {
+ return;
+ }
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+
+ if (!_mpd_isint(base) || !_mpd_isint(exp) || !_mpd_isint(mod)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mpd_iszerocoeff(mod)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mod->digits+mod->exp > ctx->prec) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ sign = (mpd_isnegative(base)) && (mpd_isodd(exp));
+ if (mpd_iszerocoeff(exp)) {
+ if (mpd_iszerocoeff(base)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ r = (_mpd_cmp_abs(mod, &one)==0) ? 0 : 1;
+ _settriple(result, sign, r, 0);
+ return;
+ }
+ if (mpd_isnegative(exp)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mpd_iszerocoeff(base)) {
+ _settriple(result, sign, 0, 0);
+ return;
+ }
+
+ if (!mpd_qcopy(&tmod, mod, status)) {
+ goto mpd_errors;
+ }
+ mpd_set_positive(&tmod);
+
+ mpd_maxcontext(&maxcontext);
+
+ mpd_qround_to_int(&tbase, base, &maxcontext, status);
+ mpd_qround_to_int(&texp, exp, &maxcontext, status);
+ mpd_qround_to_int(&tmod, &tmod, &maxcontext, status);
+
+ tbase_exp = tbase.exp;
+ tbase.exp = 0;
+ texp_exp = texp.exp;
+ texp.exp = 0;
+
+ /* base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo */
+ mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
+ _settriple(result, MPD_POS, 1, tbase_exp);
+ mpd_qrem(result, result, &tmod, &maxcontext, status);
+ mpd_qmul(&tbase, &tbase, result, &maxcontext, status);
+ mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
+ if (mpd_isspecial(&tbase) ||
+ mpd_isspecial(&texp) ||
+ mpd_isspecial(&tmod)) {
+ goto mpd_errors;
+ }
+
+ for (i = 0; i < texp_exp; i++) {
+ _mpd_qpowmod_uint(&tmp, &tbase, 10, &tmod, status);
+ t = tmp;
+ tmp = tbase;
+ tbase = t;
+ }
+ if (mpd_isspecial(&tbase)) {
+ goto mpd_errors; /* GCOV_UNLIKELY */
+ }
+
+ /* resize to smaller cannot fail */
+ mpd_qcopy(result, &one, status);
+ while (mpd_isfinite(&texp) && !mpd_iszero(&texp)) {
+ if (mpd_isodd(&texp)) {
+ mpd_qmul(result, result, &tbase, &maxcontext, status);
+ mpd_qrem(result, result, &tmod, &maxcontext, status);
+ }
+ mpd_qmul(&tbase, &tbase, &tbase, &maxcontext, status);
+ mpd_qrem(&tbase, &tbase, &tmod, &maxcontext, status);
+ mpd_qdivint(&texp, &texp, &two, &maxcontext, status);
+ }
+ if (mpd_isspecial(&texp) || mpd_isspecial(&tbase) ||
+ mpd_isspecial(&tmod) || mpd_isspecial(result)) {
+ /* MPD_Malloc_error */
+ goto mpd_errors;
+ }
+ else {
+ mpd_set_sign(result, sign);
+ }
+
+out:
+ mpd_del(&tbase);
+ mpd_del(&texp);
+ mpd_del(&tmod);
+ mpd_del(&tmp);
+ mpd_qfinalize(result, ctx, status);
+ return;
+
+mpd_errors:
+ mpd_setspecial(result, MPD_POS, MPD_NAN);
+ goto out;
+}
+
+void
+mpd_qquantize(mpd_t *result, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ mpd_ssize_t b_exp = b->exp;
+ mpd_ssize_t expdiff, shift;
+ mpd_uint_t rnd;
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(result, a, b, ctx, status)) {
+ return;
+ }
+ if (mpd_isinfinite(a) && mpd_isinfinite(b)) {
+ mpd_qcopy(result, a, status);
+ return;
+ }
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ if (b->exp > ctx->emax || b->exp < mpd_etiny(ctx)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ if (mpd_iszero(a)) {
+ _settriple(result, mpd_sign(a), 0, b->exp);
+ mpd_qfinalize(result, ctx, status);
+ return;
+ }
+
+
+ expdiff = a->exp - b->exp;
+ if (a->digits + expdiff > ctx->prec) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ if (expdiff >= 0) {
+ shift = expdiff;
+ if (!mpd_qshiftl(result, a, shift, status)) {
+ return;
+ }
+ result->exp = b_exp;
+ }
+ else {
+ /* At this point expdiff < 0 and a->digits+expdiff <= prec,
+ * so the shift before an increment will fit in prec. */
+ shift = -expdiff;
+ rnd = mpd_qshiftr(result, a, shift, status);
+ if (rnd == MPD_UINT_MAX) {
+ return;
+ }
+ result->exp = b_exp;
+ if (!_mpd_apply_round_fit(result, rnd, ctx, status)) {
+ return;
+ }
+ workstatus |= MPD_Rounded;
+ if (rnd) {
+ workstatus |= MPD_Inexact;
+ }
+ }
+
+ if (mpd_adjexp(result) > ctx->emax ||
+ mpd_adjexp(result) < mpd_etiny(ctx)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ *status |= workstatus;
+ mpd_qfinalize(result, ctx, status);
+}
+
+void
+mpd_qreduce(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_ssize_t shift, maxexp, maxshift;
+ uint8_t sign_a = mpd_sign(a);
+
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ mpd_qcopy(result, a, status);
+ return;
+ }
+
+ if (!mpd_qcopy(result, a, status)) {
+ return;
+ }
+ mpd_qfinalize(result, ctx, status);
+ if (mpd_isspecial(result)) {
+ return;
+ }
+ if (mpd_iszero(result)) {
+ _settriple(result, sign_a, 0, 0);
+ return;
+ }
+
+ shift = mpd_trail_zeros(result);
+ maxexp = (ctx->clamp) ? mpd_etop(ctx) : ctx->emax;
+ /* After the finalizing above result->exp <= maxexp. */
+ maxshift = maxexp - result->exp;
+ shift = (shift > maxshift) ? maxshift : shift;
+
+ mpd_qshiftr_inplace(result, shift);
+ result->exp += shift;
+}
+
+void
+mpd_qrem(mpd_t *r, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ MPD_NEW_STATIC(q,0,0,0,0);
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(r, a, b, ctx, status)) {
+ return;
+ }
+ if (mpd_isinfinite(a)) {
+ mpd_seterror(r, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mpd_isinfinite(b)) {
+ mpd_qcopy(r, a, status);
+ mpd_qfinalize(r, ctx, status);
+ return;
+ }
+ /* debug */
+ abort(); /* GCOV_NOT_REACHED */
+ }
+ if (mpd_iszerocoeff(b)) {
+ if (mpd_iszerocoeff(a)) {
+ mpd_seterror(r, MPD_Division_undefined, status);
+ }
+ else {
+ mpd_seterror(r, MPD_Invalid_operation, status);
+ }
+ return;
+ }
+
+ _mpd_qdivmod(&q, r, a, b, ctx, status);
+ mpd_del(&q);
+ mpd_qfinalize(r, ctx, status);
+}
+
+void
+mpd_qrem_near(mpd_t *r, const mpd_t *a, const mpd_t *b,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t workctx;
+ MPD_NEW_STATIC(btmp,0,0,0,0);
+ MPD_NEW_STATIC(q,0,0,0,0);
+ mpd_ssize_t expdiff, floordigits;
+ int cmp, isodd, allnine;
+
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ if (mpd_qcheck_nans(r, a, b, ctx, status)) {
+ return;
+ }
+ if (mpd_isinfinite(a)) {
+ mpd_seterror(r, MPD_Invalid_operation, status);
+ return;
+ }
+ if (mpd_isinfinite(b)) {
+ mpd_qcopy(r, a, status);
+ mpd_qfinalize(r, ctx, status);
+ return;
+ }
+ /* debug */
+ abort(); /* GCOV_NOT_REACHED */
+ }
+ if (mpd_iszerocoeff(b)) {
+ if (mpd_iszerocoeff(a)) {
+ mpd_seterror(r, MPD_Division_undefined, status);
+ }
+ else {
+ mpd_seterror(r, MPD_Invalid_operation, status);
+ }
+ return;
+ }
+
+ if (r == b) {
+ if (!mpd_qcopy(&btmp, b, status)) {
+ mpd_seterror(r, MPD_Malloc_error, status);
+ return;
+ }
+ b = &btmp;
+ }
+
+ workctx = *ctx;
+ workctx.prec = a->digits;
+ workctx.prec = (workctx.prec > ctx->prec) ? workctx.prec : ctx->prec;
+
+ _mpd_qdivmod(&q, r, a, b, &workctx, status);
+ if (mpd_isnan(&q) || mpd_isnan(r) || q.digits > ctx->prec) {
+ mpd_seterror(r, MPD_Division_impossible, status);
+ goto finish;
+ }
+ if (mpd_iszerocoeff(r)) {
+ goto finish;
+ }
+
+ /* Deal with cases like rmnx078:
+ * remaindernear 999999999.5 1 -> NaN Division_impossible */
+ expdiff = mpd_adjexp(b) - mpd_adjexp(r);
+ if (-1 <= expdiff && expdiff <= 1) {
+
+ mpd_qtrunc(&q, &q, &workctx, &workctx.status);
+ allnine = mpd_coeff_isallnine(&q);
+ floordigits = q.digits;
+ isodd = mpd_isodd(&q);
+
+ mpd_maxcontext(&workctx);
+ if (mpd_sign(a) == mpd_sign(b)) {
+ _mpd_qsub(&q, r, b, &workctx, &workctx.status);
+ if (workctx.status&MPD_Errors) {
+ mpd_seterror(r, workctx.status&MPD_Errors, status);
+ goto finish;
+ }
+ }
+ else {
+ _mpd_qadd(&q, r, b, &workctx, &workctx.status);
+ if (workctx.status&MPD_Errors) {
+ mpd_seterror(r, workctx.status&MPD_Errors, status);
+ goto finish;
+ }
+ }
+
+ cmp = mpd_cmp_total_mag(&q, r);
+ if (cmp < 0 || (cmp == 0 && isodd)) {
+ if (allnine && floordigits == ctx->prec) {
+ mpd_seterror(r, MPD_Division_impossible, status);
+ goto finish;
+ }
+ mpd_qcopy(r, &q, status);
+ *status &= ~MPD_Rounded;
+ }
+ }
+
+
+finish:
+ mpd_del(&btmp);
+ mpd_del(&q);
+ mpd_qfinalize(r, ctx, status);
+}
+
+static void
+_mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_ssize_t expdiff, shift;
+ mpd_uint_t rnd;
+
+ if (mpd_isspecial(a)) {
+ mpd_qcopy(result, a, status);
+ return;
+ }
+
+ if (mpd_iszero(a)) {
+ _settriple(result, mpd_sign(a), 0, exp);
+ return;
+ }
+
+ expdiff = a->exp - exp;
+ if (expdiff >= 0) {
+ shift = expdiff;
+ if (a->digits + shift > MPD_MAX_PREC+1) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (!mpd_qshiftl(result, a, shift, status)) {
+ return;
+ }
+ result->exp = exp;
+ }
+ else {
+ shift = -expdiff;
+ rnd = mpd_qshiftr(result, a, shift, status);
+ if (rnd == MPD_UINT_MAX) {
+ return;
+ }
+ result->exp = exp;
+ _mpd_apply_round_excess(result, rnd, ctx, status);
+ *status |= MPD_Rounded;
+ if (rnd) {
+ *status |= MPD_Inexact;
+ }
+ }
+
+ if (mpd_issubnormal(result, ctx)) {
+ *status |= MPD_Subnormal;
+ }
+}
+
+/*
+ * Rescale a number so that it has exponent 'exp'. Does not regard context
+ * precision, emax, emin, but uses the rounding mode. Special numbers are
+ * quietly copied. Restrictions:
+ *
+ * MPD_MIN_ETINY <= exp <= MPD_MAX_EMAX+1
+ * result->digits <= MPD_MAX_PREC+1
+ */
+void
+mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ if (exp > MPD_MAX_EMAX+1 || exp < MPD_MIN_ETINY) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ _mpd_qrescale(result, a, exp, ctx, status);
+}
+
+/*
+ * Same as mpd_qrescale, but with relaxed restrictions. The result of this
+ * function should only be used for formatting a number and never as input
+ * for other operations.
+ *
+ * MPD_MIN_ETINY-MPD_MAX_PREC <= exp <= MPD_MAX_EMAX+1
+ * result->digits <= MPD_MAX_PREC+1
+ */
+void
+mpd_qrescale_fmt(mpd_t *result, const mpd_t *a, mpd_ssize_t exp,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ if (exp > MPD_MAX_EMAX+1 || exp < MPD_MIN_ETINY-MPD_MAX_PREC) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ _mpd_qrescale(result, a, exp, ctx, status);
+}
+
+/* Round to an integer according to 'action' and ctx->round. */
+enum {TO_INT_EXACT, TO_INT_SILENT, TO_INT_TRUNC};
+static void
+_mpd_qround_to_integral(int action, mpd_t *result, const mpd_t *a,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_uint_t rnd;
+
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ mpd_qcopy(result, a, status);
+ return;
+ }
+ if (a->exp >= 0) {
+ mpd_qcopy(result, a, status);
+ return;
+ }
+ if (mpd_iszerocoeff(a)) {
+ _settriple(result, mpd_sign(a), 0, 0);
+ return;
+ }
+
+ rnd = mpd_qshiftr(result, a, -a->exp, status);
+ if (rnd == MPD_UINT_MAX) {
+ return;
+ }
+ result->exp = 0;
+
+ if (action == TO_INT_EXACT || action == TO_INT_SILENT) {
+ _mpd_apply_round_excess(result, rnd, ctx, status);
+ if (action == TO_INT_EXACT) {
+ *status |= MPD_Rounded;
+ if (rnd) {
+ *status |= MPD_Inexact;
+ }
+ }
+ }
+}
+
+void
+mpd_qround_to_intx(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ (void)_mpd_qround_to_integral(TO_INT_EXACT, result, a, ctx, status);
+}
+
+void
+mpd_qround_to_int(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a, ctx, status);
+}
+
+void
+mpd_qtrunc(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ (void)_mpd_qround_to_integral(TO_INT_TRUNC, result, a, ctx, status);
+}
+
+void
+mpd_qfloor(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t workctx = *ctx;
+ workctx.round = MPD_ROUND_FLOOR;
+ (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a,
+ &workctx, status);
+}
+
+void
+mpd_qceil(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t workctx = *ctx;
+ workctx.round = MPD_ROUND_CEILING;
+ (void)_mpd_qround_to_integral(TO_INT_SILENT, result, a,
+ &workctx, status);
+}
+
+int
+mpd_same_quantum(const mpd_t *a, const mpd_t *b)
+{
+ if (mpd_isspecial(a) || mpd_isspecial(b)) {
+ return ((mpd_isnan(a) && mpd_isnan(b)) ||
+ (mpd_isinfinite(a) && mpd_isinfinite(b)));
+ }
+
+ return a->exp == b->exp;
+}
+
+/* Schedule the increase in precision for the Newton iteration. */
+static inline int
+recpr_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
+ mpd_ssize_t maxprec, mpd_ssize_t initprec)
+{
+ mpd_ssize_t k;
+ int i;
+
+ assert(maxprec > 0 && initprec > 0);
+ if (maxprec <= initprec) return -1;
+
+ i = 0; k = maxprec;
+ do {
+ k = (k+1) / 2;
+ klist[i++] = k;
+ } while (k > initprec);
+
+ return i-1;
+}
+
+/* Initial approximation for the reciprocal. */
+static void
+_mpd_qreciprocal_approx(mpd_t *z, const mpd_t *v, uint32_t *status)
+{
+ mpd_uint_t p10data[2] = {0, mpd_pow10[MPD_RDIGITS-2]}; /* 10**(2*MPD_RDIGITS-2) */
+ mpd_uint_t dummy, word;
+ int n;
+
+ assert(v->exp == -v->digits);
+
+ _mpd_get_msdigits(&dummy, &word, v, MPD_RDIGITS);
+ n = mpd_word_digits(word);
+ word *= mpd_pow10[MPD_RDIGITS-n];
+
+ mpd_qresize(z, 2, status);
+ (void)_mpd_shortdiv(z->data, p10data, 2, word);
+
+ mpd_clear_flags(z);
+ z->exp = -(MPD_RDIGITS-2);
+ z->len = (z->data[1] == 0) ? 1 : 2;
+ mpd_setdigits(z);
+}
+
+/* Reciprocal, calculated with Newton's Method. Assumption: result != a. */
+static void
+_mpd_qreciprocal(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ mpd_context_t varcontext, maxcontext;
+ mpd_t *z = result; /* current approximation */
+ mpd_t *v; /* a, normalized to a number between 0.1 and 1 */
+ MPD_NEW_SHARED(vtmp, a); /* v shares data with a */
+ MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
+ MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
+ MPD_NEW_CONST(two,0,0,1,1,1,2); /* const 2 */
+ mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
+ mpd_ssize_t adj, maxprec, initprec;
+ uint8_t sign = mpd_sign(a);
+ int i;
+
+ assert(result != a);
+
+ v = &vtmp;
+ mpd_clear_flags(v);
+ adj = v->digits + v->exp;
+ v->exp = -v->digits;
+
+ /* initial approximation */
+ _mpd_qreciprocal_approx(z, v, status);
+
+ mpd_maxcontext(&varcontext);
+ mpd_maxcontext(&maxcontext);
+ varcontext.round = MPD_ROUND_TRUNC;
+ maxcontext.round = MPD_ROUND_TRUNC;
+
+ maxprec = (v->digits > ctx->prec) ? v->digits : ctx->prec;
+ maxprec += 2;
+ initprec = MPD_RDIGITS-3;
+
+ i = recpr_schedule_prec(klist, maxprec, initprec);
+ for (; i >= 0; i--) {
+ mpd_qmul(&s, z, z, &maxcontext, status);
+ varcontext.prec = 2*klist[i] + 5;
+ if (v->digits > varcontext.prec) {
+ mpd_qshiftr(&t, v, v->digits-varcontext.prec, status);
+ t.exp = -varcontext.prec;
+ mpd_qmul(&t, &t, &s, &varcontext, status);
+ }
+ else {
+ mpd_qmul(&t, v, &s, &varcontext, status);
+ }
+ mpd_qmul(&s, z, &two, &maxcontext, status);
+ mpd_qsub(z, &s, &t, &maxcontext, status);
+ }
+
+ if (!mpd_isspecial(z)) {
+ z->exp -= adj;
+ mpd_set_flags(z, sign);
+ }
+
+ mpd_del(&s);
+ mpd_del(&t);
+ mpd_qfinalize(z, ctx, status);
+}
+
+/*
+ * Integer division with remainder of the coefficients: coeff(a) / coeff(b).
+ * This function is for large numbers where it is faster to divide by
+ * multiplying the dividend by the reciprocal of the divisor.
+ * The inexact result is fixed by a small loop, which should not take
+ * more than 2 iterations.
+ */
+static void
+_mpd_qbarrett_divmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b,
+ uint32_t *status)
+{
+ mpd_context_t workctx;
+ mpd_t *qq = q, *rr = r;
+ mpd_t aa, bb;
+ int k;
+
+ mpd_maxcontext(&workctx);
+ _mpd_copy_shared(&aa, a);
+ _mpd_copy_shared(&bb, b);
+
+ mpd_set_positive(&aa);
+ mpd_set_positive(&bb);
+ aa.exp = 0;
+ bb.exp = 0;
+
+ if (q == a || q == b) {
+ if ((qq = mpd_qnew()) == NULL) {
+ *status |= MPD_Malloc_error;
+ goto nanresult;
+ }
+ }
+ if (r == a || r == b) {
+ if ((rr = mpd_qnew()) == NULL) {
+ *status |= MPD_Malloc_error;
+ goto nanresult;
+ }
+ }
+
+ /* maximum length of q + 3 digits */
+ workctx.prec = aa.digits - bb.digits + 1 + 3;
+ /* we get the reciprocal with precision maxlen(q) + 3 */
+ _mpd_qreciprocal(rr, &bb, &workctx, &workctx.status);
+
+ mpd_qmul(qq, &aa, rr, &workctx, &workctx.status);
+ mpd_qtrunc(qq, qq, &workctx, &workctx.status);
+
+ workctx.prec = aa.digits + 3;
+ /* get the remainder */
+ mpd_qmul(rr, &bb, qq, &workctx, &workctx.status);
+ mpd_qsub(rr, &aa, rr, &workctx, &workctx.status);
+
+ /* Fix the result. Algorithm from: Karl Hasselstrom, Fast Division of Large Integers */
+ for (k = 0;; k++) {
+ if (mpd_isspecial(rr)) {
+ *status |= (workctx.status&MPD_Errors);
+ goto nanresult;
+ }
+ if (k > 2) {
+ mpd_err_warn("libmpdec: internal error in " /* GCOV_NOT_REACHED */
+ "_mpd_qbarrett_divmod: please report"); /* GCOV_NOT_REACHED */
+ *status |= MPD_Invalid_operation; /* GCOV_NOT_REACHED */
+ goto nanresult; /* GCOV_NOT_REACHED */
+ }
+ else if (_mpd_cmp(&zero, rr) == 1) {
+ mpd_qadd(rr, rr, &bb, &workctx, &workctx.status);
+ mpd_qadd(qq, qq, &minus_one, &workctx, &workctx.status);
+ }
+ else if (_mpd_cmp(rr, &bb) == -1) {
+ break;
+ }
+ else {
+ mpd_qsub(rr, rr, &bb, &workctx, &workctx.status);
+ mpd_qadd(qq, qq, &one, &workctx, &workctx.status);
+ }
+ }
+
+ if (qq != q) {
+ if (!mpd_qcopy(q, qq, status)) {
+ goto nanresult; /* GCOV_UNLIKELY */
+ }
+ mpd_del(qq);
+ }
+ if (rr != r) {
+ if (!mpd_qcopy(r, rr, status)) {
+ goto nanresult; /* GCOV_UNLIKELY */
+ }
+ mpd_del(rr);
+ }
+
+ *status |= (workctx.status&MPD_Errors);
+ return;
+
+
+nanresult:
+ if (qq && qq != q) mpd_del(qq);
+ if (rr && rr != r) mpd_del(rr);
+ mpd_setspecial(q, MPD_POS, MPD_NAN);
+ mpd_setspecial(r, MPD_POS, MPD_NAN);
+}
+
+static inline int
+invroot_schedule_prec(mpd_ssize_t klist[MPD_MAX_PREC_LOG2],
+ mpd_ssize_t maxprec, mpd_ssize_t initprec)
+{
+ mpd_ssize_t k;
+ int i;
+
+ assert(maxprec >= 3 && initprec >= 3);
+ if (maxprec <= initprec) return -1;
+
+ i = 0; k = maxprec;
+ do {
+ k = (k+3) / 2;
+ klist[i++] = k;
+ } while (k > initprec);
+
+ return i-1;
+}
+
+/*
+ * Initial approximation for the inverse square root.
+ *
+ * Input:
+ * v := 7 or 8 decimal digits with an implicit exponent of 10**-6,
+ * representing a number 1 <= x < 100.
+ *
+ * Output:
+ * An approximation to 1/sqrt(v)
+ */
+static inline void
+_invroot_init_approx(mpd_t *z, mpd_uint_t v)
+{
+ mpd_uint_t lo = 1000;
+ mpd_uint_t hi = 10000;
+ mpd_uint_t a, sq;
+
+ assert(v >= lo*lo && v < (hi+1)*(hi+1));
+
+ for(;;) {
+ a = (lo + hi) / 2;
+ sq = a * a;
+ if (v >= sq) {
+ if (v < sq + 2*a + 1) {
+ break;
+ }
+ lo = a + 1;
+ }
+ else {
+ hi = a - 1;
+ }
+ }
+
+ /* At this point a/1000 is an approximation to sqrt(v). */
+ mpd_minalloc(z);
+ mpd_clear_flags(z);
+ z->data[0] = 1000000000UL / a;
+ z->len = 1;
+ z->exp = -6;
+ mpd_setdigits(z);
+}
+
+static void
+_mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ mpd_context_t varcontext, maxcontext;
+ mpd_t *z = result; /* current approximation */
+ mpd_t *v; /* a, normalized to a number between 1 and 100 */
+ MPD_NEW_SHARED(vtmp, a); /* by default v will share data with a */
+ MPD_NEW_STATIC(s,0,0,0,0); /* temporary variable */
+ MPD_NEW_STATIC(t,0,0,0,0); /* temporary variable */
+ MPD_NEW_CONST(one_half,0,-1,1,1,1,5);
+ MPD_NEW_CONST(three,0,0,1,1,1,3);
+ mpd_ssize_t klist[MPD_MAX_PREC_LOG2];
+ mpd_ssize_t ideal_exp, shift;
+ mpd_ssize_t adj, tz;
+ mpd_ssize_t maxprec, fracdigits;
+ mpd_uint_t x, dummy;
+ int i, n;
+
+
+ ideal_exp = -(a->exp - (a->exp & 1)) / 2;
+
+ v = &vtmp;
+ if (result == a) {
+ if ((v = mpd_qncopy(a)) == NULL) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ }
+
+ /* normalize a to 1 <= v < 100 */
+ if ((v->digits+v->exp) & 1) {
+ fracdigits = v->digits - 1;
+ v->exp = -fracdigits;
+ n = (v->digits > 7) ? 7 : (int)v->digits;
+ _mpd_get_msdigits(&dummy, &x, v, n);
+ if (n < 7) {
+ x *= mpd_pow10[7-n];
+ }
+ }
+ else {
+ fracdigits = v->digits - 2;
+ v->exp = -fracdigits;
+ n = (v->digits > 8) ? 8 : (int)v->digits;
+ _mpd_get_msdigits(&dummy, &x, v, n);
+ if (n < 8) {
+ x *= mpd_pow10[8-n];
+ }
+ }
+ adj = (a->exp-v->exp) / 2;
+
+ /* initial approximation */
+ _invroot_init_approx(z, x);
+
+ mpd_maxcontext(&maxcontext);
+ mpd_maxcontext(&varcontext);
+ varcontext.round = MPD_ROUND_TRUNC;
+ maxprec = ctx->prec + 2;
+
+ i = invroot_schedule_prec(klist, maxprec, 3);
+ for (; i >= 0; i--) {
+ varcontext.prec = 2*klist[i]+2;
+ mpd_qmul(&s, z, z, &maxcontext, &workstatus);
+ if (v->digits > varcontext.prec) {
+ shift = v->digits - varcontext.prec;
+ mpd_qshiftr(&t, v, shift, &workstatus);
+ t.exp += shift;
+ mpd_qmul(&t, &t, &s, &varcontext, &workstatus);
+ }
+ else {
+ mpd_qmul(&t, v, &s, &varcontext, &workstatus);
+ }
+ mpd_qsub(&t, &three, &t, &maxcontext, &workstatus);
+ mpd_qmul(z, z, &t, &varcontext, &workstatus);
+ mpd_qmul(z, z, &one_half, &maxcontext, &workstatus);
+ }
+
+ z->exp -= adj;
+
+ tz = mpd_trail_zeros(result);
+ shift = ideal_exp - result->exp;
+ shift = (tz > shift) ? shift : tz;
+ if (shift > 0) {
+ mpd_qshiftr_inplace(result, shift);
+ result->exp += shift;
+ }
+
+
+ mpd_del(&s);
+ mpd_del(&t);
+ if (v != &vtmp) mpd_del(v);
+ *status |= (workstatus&MPD_Errors);
+ varcontext = *ctx;
+ varcontext.round = MPD_ROUND_HALF_EVEN;
+ mpd_qfinalize(result, &varcontext, status);
+}
+
+void
+mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ if (mpd_isnegative(a)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ /* positive infinity */
+ _settriple(result, MPD_POS, 0, mpd_etiny(ctx));
+ *status |= MPD_Clamped;
+ return;
+ }
+ if (mpd_iszero(a)) {
+ mpd_setspecial(result, mpd_sign(a), MPD_INF);
+ *status |= MPD_Division_by_zero;
+ return;
+ }
+ if (mpd_isnegative(a)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ _mpd_qinvroot(result, a, ctx, status);
+}
+
+/*
+ * Ensure correct rounding. Algorithm after Hull & Abrham, "Properly Rounded
+ * Variable Precision Square Root", ACM Transactions on Mathematical Software,
+ * Vol. 11, No. 3.
+ */
+static void
+_mpd_fix_sqrt(mpd_t *result, const mpd_t *a, mpd_t *tmp,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_context_t maxctx;
+ MPD_NEW_CONST(u,0,0,1,1,1,5);
+
+ mpd_maxcontext(&maxctx);
+ u.exp = u.digits - ctx->prec + result->exp - 1;
+
+ _mpd_qsub(tmp, result, &u, &maxctx, status);
+ if (*status&MPD_Errors) goto nanresult;
+
+ _mpd_qmul(tmp, tmp, tmp, &maxctx, status);
+ if (*status&MPD_Errors) goto nanresult;
+
+ if (_mpd_cmp(tmp, a) == 1) {
+ u.exp += 1;
+ u.data[0] = 1;
+ _mpd_qsub(result, result, &u, &maxctx, status);
+ }
+ else {
+ _mpd_qadd(tmp, result, &u, &maxctx, status);
+ if (*status&MPD_Errors) goto nanresult;
+
+ _mpd_qmul(tmp, tmp, tmp, &maxctx, status);
+ if (*status&MPD_Errors) goto nanresult;
+
+ if (_mpd_cmp(tmp, a) == -1) {
+ u.exp += 1;
+ u.data[0] = 1;
+ _mpd_qadd(result, result, &u, &maxctx, status);
+ }
+ }
+
+ return;
+
+nanresult:
+ mpd_setspecial(result, MPD_POS, MPD_NAN);
+}
+
+void
+mpd_qsqrt(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
+ uint32_t *status)
+{
+ uint32_t workstatus = 0;
+ mpd_context_t varcontext;
+ mpd_t *z = result; /* current approximation */
+ MPD_NEW_STATIC(v,0,0,0,0); /* a, normalized to a number between 1 and 10 */
+ MPD_NEW_STATIC(vtmp,0,0,0,0);
+ MPD_NEW_STATIC(tmp,0,0,0,0);
+ mpd_ssize_t ideal_exp, shift;
+ mpd_ssize_t target_prec, fracdigits;
+ mpd_ssize_t a_exp, a_digits;
+ mpd_ssize_t adj, tz;
+ mpd_uint_t dummy, t;
+ int exact = 0;
+
+
+ varcontext = *ctx;
+ varcontext.round = MPD_ROUND_HALF_EVEN;
+ ideal_exp = (a->exp - (a->exp & 1)) / 2;
+
+ if (mpd_isspecial(a)) {
+ if (mpd_qcheck_nan(result, a, ctx, status)) {
+ return;
+ }
+ if (mpd_isnegative(a)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ mpd_setspecial(result, MPD_POS, MPD_INF);
+ return;
+ }
+ if (mpd_iszero(a)) {
+ _settriple(result, mpd_sign(a), 0, ideal_exp);
+ mpd_qfinalize(result, ctx, status);
+ return;
+ }
+ if (mpd_isnegative(a)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+
+ if (!mpd_qcopy(&v, a, status)) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ goto finish;
+ }
+
+ a_exp = a->exp;
+ a_digits = a->digits;
+
+ /* normalize a to 1 <= v < 100 */
+ if ((v.digits+v.exp) & 1) {
+ fracdigits = v.digits - 1;
+ v.exp = -fracdigits;
+ _mpd_get_msdigits(&dummy, &t, &v, 3);
+ t = t < 100 ? t*10 : t;
+ t = t < 100 ? t*10 : t;
+ }
+ else {
+ fracdigits = v.digits - 2;
+ v.exp = -fracdigits;
+ _mpd_get_msdigits(&dummy, &t, &v, 4);
+ t = t < 1000 ? t*10 : t;
+ t = t < 1000 ? t*10 : t;
+ t = t < 1000 ? t*10 : t;
+ }
+ adj = (a_exp-v.exp) / 2;
+
+
+ /* use excess digits */
+ target_prec = (a_digits > ctx->prec) ? a_digits : ctx->prec;
+ target_prec += 2;
+ varcontext.prec = target_prec + 3;
+
+ /* invroot is much faster for large numbers */
+ _mpd_qinvroot(&tmp, &v, &varcontext, &workstatus);
+
+ varcontext.prec = target_prec;
+ _mpd_qdiv(NO_IDEAL_EXP, z, &one, &tmp, &varcontext, &workstatus);
+
+
+ tz = mpd_trail_zeros(result);
+ if ((result->digits-tz)*2-1 <= v.digits) {
+ _mpd_qmul(&tmp, result, result, &varcontext, &workstatus);
+ if (workstatus&MPD_Errors) {
+ mpd_seterror(result, workstatus&MPD_Errors, status);
+ goto finish;
+ }
+ exact = (_mpd_cmp(&tmp, &v) == 0);
+ }
+ *status |= (workstatus&MPD_Errors);
+
+ if (!exact && !mpd_isspecial(result) && !mpd_iszero(result)) {
+ _mpd_fix_sqrt(result, &v, &tmp, &varcontext, status);
+ if (mpd_isspecial(result)) goto finish;
+ *status |= (MPD_Rounded|MPD_Inexact);
+ }
+
+ result->exp += adj;
+ if (exact) {
+ shift = ideal_exp - result->exp;
+ shift = (tz > shift) ? shift : tz;
+ if (shift > 0) {
+ mpd_qshiftr_inplace(result, shift);
+ result->exp += shift;
+ }
+ }
+
+
+finish:
+ mpd_del(&v);
+ mpd_del(&vtmp);
+ mpd_del(&tmp);
+ varcontext.prec = ctx->prec;
+ mpd_qfinalize(result, &varcontext, status);
+}
+
+
+/******************************************************************************/
+/* Base conversions */
+/******************************************************************************/
+
+/*
+ * Returns the space needed to represent an integer mpd_t in base 'base'.
+ * The result is undefined for non-integers.
+ *
+ * Max space needed:
+ *
+ * base^n >= 10^(digits+exp)
+ * n >= log10(10^(digits+exp))/log10(base) = (digits+exp) / log10(base)
+ */
+size_t
+mpd_sizeinbase(mpd_t *a, uint32_t base)
+{
+ size_t x;
+
+ assert(mpd_isinteger(a));
+ if (mpd_iszero(a)) {
+ return 1;
+ }
+
+ x = a->digits+a->exp;
+
+#ifdef CONFIG_64
+ #ifdef USE_80BIT_LONG_DOUBLE
+ return (long double)x / log10(base) + 3;
+ #else
+ /* x > floor(((1ULL<<53)-3) * log10(2)) */
+ if (x > 2711437152599294ULL) {
+ return SIZE_MAX;
+ }
+ return (double)x / log10(base) + 3;
+ #endif
+#else /* CONFIG_32 */
+{
+ double y = x / log10(base) + 3;
+ return (y > SIZE_MAX) ? SIZE_MAX : (size_t)y;
+}
+#endif
+}
+
+/*
+ * Returns the space needed to import a base 'base' integer of length 'srclen'.
+ */
+static inline mpd_ssize_t
+_mpd_importsize(size_t srclen, uint32_t base)
+{
+#if SIZE_MAX == UINT64_MAX
+ #ifdef USE_80BIT_LONG_DOUBLE
+ long double x = (long double)srclen * (log10(base)/MPD_RDIGITS) + 3;
+ #else
+ double x;
+ if (srclen > (1ULL<<53)) {
+ return MPD_SSIZE_MAX;
+ }
+ x = (double)srclen * (log10(base)/MPD_RDIGITS) + 3;
+ #endif
+#else
+ double x = srclen * (log10(base)/MPD_RDIGITS) + 3;
+#endif
+ return (x > MPD_MAXIMPORT) ? MPD_SSIZE_MAX : (mpd_ssize_t)x;
+}
+
+
+static inline size_t
+_to_base_u16(uint16_t *w, size_t wlen, mpd_uint_t wbase,
+ mpd_uint_t *u, mpd_ssize_t ulen)
+{
+ size_t n = 0;
+
+ assert(wlen > 0 && ulen > 0);
+
+ do {
+ w[n++] = (uint16_t)_mpd_shortdiv(u, u, ulen, wbase);
+ /* ulen will be at least 1. u[ulen-1] can only be zero if ulen == 1 */
+ ulen = _mpd_real_size(u, ulen);
+
+ } while (u[ulen-1] != 0 && n < wlen);
+
+ /* proper termination condition */
+ assert(u[ulen-1] == 0);
+
+ return n;
+}
+
+static inline void
+_from_base_u16(mpd_uint_t *w, mpd_ssize_t wlen,
+ const mpd_uint_t *u, size_t ulen, uint32_t ubase)
+{
+ mpd_ssize_t m = 1;
+ mpd_uint_t carry;
+
+ assert(wlen > 0 && ulen > 0);
+
+ w[0] = u[--ulen];
+ while (--ulen != SIZE_MAX && m < wlen) {
+ _mpd_shortmul(w, w, m, ubase);
+ m = _mpd_real_size(w, m+1);
+ carry = _mpd_shortadd(w, m, u[ulen]);
+ if (carry) w[m++] = carry;
+ }
+
+ /* proper termination condition */
+ assert(ulen == SIZE_MAX);
+}
+
+/* target base wbase <= source base ubase */
+static inline size_t
+_baseconv_to_smaller(uint32_t *w, size_t wlen, mpd_uint_t wbase,
+ mpd_uint_t *u, mpd_ssize_t ulen, mpd_uint_t ubase)
+{
+ size_t n = 0;
+
+ assert(wlen > 0 && ulen > 0);
+
+ do {
+ w[n++] = (uint32_t)_mpd_shortdiv_b(u, u, ulen, wbase, ubase);
+ /* ulen will be at least 1. u[ulen-1] can only be zero if ulen == 1 */
+ ulen = _mpd_real_size(u, ulen);
+
+ } while (u[ulen-1] != 0 && n < wlen);
+
+ /* proper termination condition */
+ assert(u[ulen-1] == 0);
+
+ return n;
+}
+
+/* target base wbase >= source base ubase */
+static inline void
+_baseconv_to_larger(mpd_uint_t *w, mpd_ssize_t wlen, mpd_uint_t wbase,
+ const mpd_uint_t *u, size_t ulen, mpd_uint_t ubase)
+{
+ mpd_ssize_t m = 1;
+ mpd_uint_t carry;
+
+ assert(wlen > 0 && ulen > 0);
+
+ w[0] = u[--ulen];
+ while (--ulen != SIZE_MAX && m < wlen) {
+ _mpd_shortmul_b(w, w, m, ubase, wbase);
+ m = _mpd_real_size(w, m+1);
+ carry = _mpd_shortadd_b(w, m, u[ulen], wbase);
+ if (carry) w[m++] = carry;
+ }
+
+ /* proper termination condition */
+ assert(ulen == SIZE_MAX);
+}
+
+
+/*
+ * Converts an integer mpd_t to a multiprecision integer with
+ * base <= UINT16_MAX+1. The least significant word of the result
+ * is rdata[0].
+ */
+size_t
+mpd_qexport_u16(uint16_t *rdata, size_t rlen, uint32_t rbase,
+ const mpd_t *src, uint32_t *status)
+{
+ mpd_t *tsrc;
+ size_t n;
+
+ assert(rbase <= (1U<<16));
+ assert(rlen <= SIZE_MAX/(sizeof *rdata));
+
+ if (mpd_isspecial(src) || !_mpd_isint(src)) {
+ *status |= MPD_Invalid_operation;
+ return SIZE_MAX;
+ }
+
+ memset(rdata, 0, rlen * (sizeof *rdata));
+
+ if (mpd_iszero(src)) {
+ return 1;
+ }
+
+ if ((tsrc = mpd_qnew()) == NULL) {
+ *status |= MPD_Malloc_error;
+ return SIZE_MAX;
+ }
+
+ if (src->exp >= 0) {
+ if (!mpd_qshiftl(tsrc, src, src->exp, status)) {
+ mpd_del(tsrc);
+ return SIZE_MAX;
+ }
+ }
+ else {
+ if (mpd_qshiftr(tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
+ mpd_del(tsrc);
+ return SIZE_MAX;
+ }
+ }
+
+ n = _to_base_u16(rdata, rlen, rbase, tsrc->data, tsrc->len);
+
+ mpd_del(tsrc);
+ return n;
+}
+
+/*
+ * Converts an integer mpd_t to a multiprecision integer with
+ * base <= UINT32_MAX. The least significant word of the result
+ * is rdata[0].
+ */
+size_t
+mpd_qexport_u32(uint32_t *rdata, size_t rlen, uint32_t rbase,
+ const mpd_t *src, uint32_t *status)
+{
+ mpd_t *tsrc;
+ size_t n;
+
+ if (mpd_isspecial(src) || !_mpd_isint(src)) {
+ *status |= MPD_Invalid_operation;
+ return SIZE_MAX;
+ }
+#if MPD_SIZE_MAX < SIZE_MAX
+ if (rlen > MPD_SSIZE_MAX) {
+ *status |= MPD_Invalid_operation;
+ return SIZE_MAX;
+ }
+#endif
+
+ assert(rlen <= SIZE_MAX/(sizeof *rdata));
+ memset(rdata, 0, rlen * (sizeof *rdata));
+
+ if (mpd_iszero(src)) {
+ return 1;
+ }
+
+ if ((tsrc = mpd_qnew()) == NULL) {
+ *status |= MPD_Malloc_error;
+ return SIZE_MAX;
+ }
+
+ if (src->exp >= 0) {
+ if (!mpd_qshiftl(tsrc, src, src->exp, status)) {
+ mpd_del(tsrc);
+ return SIZE_MAX;
+ }
+ }
+ else {
+ if (mpd_qshiftr(tsrc, src, -src->exp, status) == MPD_UINT_MAX) {
+ mpd_del(tsrc);
+ return SIZE_MAX;
+ }
+ }
+
+#ifdef CONFIG_64
+ n = _baseconv_to_smaller(rdata, rlen, rbase,
+ tsrc->data, tsrc->len, MPD_RADIX);
+#else
+ if (rbase <= MPD_RADIX) {
+ n = _baseconv_to_smaller(rdata, rlen, rbase,
+ tsrc->data, tsrc->len, MPD_RADIX);
+ }
+ else {
+ _baseconv_to_larger(rdata, (mpd_ssize_t)rlen, rbase,
+ tsrc->data, tsrc->len, MPD_RADIX);
+ n = _mpd_real_size(rdata, (mpd_ssize_t)rlen);
+ }
+#endif
+
+ mpd_del(tsrc);
+ return n;
+}
+
+
+/*
+ * Converts a multiprecision integer with base <= UINT16_MAX+1 to an mpd_t.
+ * The least significant word of the source is srcdata[0].
+ */
+void
+mpd_qimport_u16(mpd_t *result,
+ const uint16_t *srcdata, size_t srclen,
+ uint8_t srcsign, uint32_t srcbase,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_uint_t *usrc; /* uint16_t src copied to an mpd_uint_t array */
+ mpd_ssize_t rlen; /* length of the result */
+ size_t n = 0;
+
+ assert(srclen > 0);
+ assert(srcbase <= (1U<<16));
+
+ if ((rlen = _mpd_importsize(srclen, srcbase)) == MPD_SSIZE_MAX) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (srclen > MPD_SIZE_MAX/(sizeof *usrc)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if ((usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc)) == NULL) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ for (n = 0; n < srclen; n++) {
+ usrc[n] = srcdata[n];
+ }
+
+ /* result->data is initialized to zero */
+ if (!mpd_qresize_zero(result, rlen, status)) {
+ goto finish;
+ }
+
+ _from_base_u16(result->data, rlen, usrc, srclen, srcbase);
+
+ mpd_set_flags(result, srcsign);
+ result->exp = 0;
+ result->len = _mpd_real_size(result->data, rlen);
+ mpd_setdigits(result);
+
+ mpd_qresize(result, result->len, status);
+ mpd_qfinalize(result, ctx, status);
+
+
+finish:
+ mpd_free(usrc);
+}
+
+/*
+ * Converts a multiprecision integer with base <= UINT32_MAX to an mpd_t.
+ * The least significant word of the source is srcdata[0].
+ */
+void
+mpd_qimport_u32(mpd_t *result,
+ const uint32_t *srcdata, size_t srclen,
+ uint8_t srcsign, uint32_t srcbase,
+ const mpd_context_t *ctx, uint32_t *status)
+{
+ mpd_uint_t *usrc; /* uint32_t src copied to an mpd_uint_t array */
+ mpd_ssize_t rlen; /* length of the result */
+ size_t n = 0;
+
+ assert(srclen > 0);
+
+ if ((rlen = _mpd_importsize(srclen, srcbase)) == MPD_SSIZE_MAX) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if (srclen > MPD_SIZE_MAX/(sizeof *usrc)) {
+ mpd_seterror(result, MPD_Invalid_operation, status);
+ return;
+ }
+ if ((usrc = mpd_alloc((mpd_size_t)srclen, sizeof *usrc)) == NULL) {
+ mpd_seterror(result, MPD_Malloc_error, status);
+ return;
+ }
+ for (n = 0; n < srclen; n++) {
+ usrc[n] = srcdata[n];
+ }
+
+ /* result->data is initialized to zero */
+ if (!mpd_qresize_zero(result, rlen, status)) {
+ goto finish;
+ }
+
+#ifdef CONFIG_64
+ _baseconv_to_larger(result->data, rlen, MPD_RADIX,
+ usrc, srclen, srcbase);
+#else
+ if (srcbase <= MPD_RADIX) {
+ _baseconv_to_larger(result->data, rlen, MPD_RADIX,
+ usrc, srclen, srcbase);
+ }
+ else {
+ _baseconv_to_smaller(result->data, rlen, MPD_RADIX,
+ usrc, (mpd_ssize_t)srclen, srcbase);
+ }
+#endif
+
+ mpd_set_flags(result, srcsign);
+ result->exp = 0;
+ result->len = _mpd_real_size(result->data, rlen);
+ mpd_setdigits(result);
+
+ mpd_qresize(result, result->len, status);
+ mpd_qfinalize(result, ctx, status);
+
+
+finish:
+ mpd_free(usrc);
+}
+
+
+
diff --git a/Modules/_decimal/libmpdec/mpdecimal.h b/Modules/_decimal/libmpdec/mpdecimal.h
new file mode 100644
index 0000000000..f3f32ac601
--- /dev/null
+++ b/Modules/_decimal/libmpdec/mpdecimal.h
@@ -0,0 +1,800 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef MPDECIMAL_H
+#define MPDECIMAL_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#define __STDC_LIMIT_MACROS
+#endif
+
+
+#ifndef _MSC_VER
+ #include "pyconfig.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <assert.h>
+
+#ifdef _MSC_VER
+ #include "vccompat.h"
+ #ifndef UNUSED
+ #define UNUSED
+ #endif
+ #define EXTINLINE extern inline
+#else
+ #ifdef HAVE_STDINT_H
+ #include <stdint.h>
+ #endif
+ #ifdef HAVE_INTTYPES_H
+ #include <inttypes.h>
+ #endif
+ #ifndef __GNUC_STDC_INLINE__
+ #define __GNUC_STDC_INLINE__
+ #endif
+ #if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+ #define UNUSED __attribute__((unused))
+ #else
+ #define UNUSED
+ #endif
+ #define EXTINLINE
+#endif
+
+
+#if !defined(LEGACY_COMPILER)
+ #if !defined(UINT64_MAX)
+ /* The following #error is just a warning. If the compiler indeed does
+ * not have uint64_t, it is perfectly safe to comment out the #error. */
+ #error "Warning: Compiler without uint64_t. Comment out this line."
+ #define LEGACY_COMPILER
+ #endif
+#endif
+
+
+/******************************************************************************/
+/* Configuration */
+/******************************************************************************/
+
+#if defined(UNIVERSAL)
+ #if defined(CONFIG_64) || defined(CONFIG_32)
+ #error "cannot use CONFIG_64 or CONFIG_32 with UNIVERSAL."
+ #endif
+ #if defined(__ppc__)
+ #define CONFIG_32
+ #define ANSI
+ #elif defined(__ppc64__)
+ #define CONFIG_64
+ #define ANSI
+ #elif defined(__i386__)
+ #define CONFIG_32
+ #define ANSI
+ #elif defined(__x86_64__)
+ #define CONFIG_64
+ #define ASM
+ #else
+ #error "unknown architecture for universal build."
+ #endif
+#endif
+
+
+/* BEGIN CONFIG_64 */
+#if defined(CONFIG_64)
+/* types for modular and base arithmetic */
+#define MPD_UINT_MAX UINT64_MAX
+#define MPD_BITS_PER_UINT 64
+typedef uint64_t mpd_uint_t; /* unsigned mod type */
+
+#define MPD_SIZE_MAX SIZE_MAX
+typedef size_t mpd_size_t; /* unsigned size type */
+
+/* type for exp, digits, len, prec */
+#define MPD_SSIZE_MAX INT64_MAX
+#define MPD_SSIZE_MIN INT64_MIN
+typedef int64_t mpd_ssize_t;
+#define _mpd_strtossize strtoll
+
+/* decimal arithmetic */
+#define MPD_RADIX 10000000000000000000ULL /* 10**19 */
+#define MPD_RDIGITS 19
+#define MPD_MAX_POW10 19
+#define MPD_EXPDIGITS 19 /* MPD_EXPDIGITS <= MPD_RDIGITS+1 */
+
+#define MPD_MAXTRANSFORM_2N 4294967296ULL /* 2**32 */
+#define MPD_MAX_PREC 999999999999999999LL
+#define MPD_MAX_PREC_LOG2 64
+#define MPD_ELIMIT 1000000000000000000LL
+#define MPD_MAX_EMAX 999999999999999999LL /* ELIMIT-1 */
+#define MPD_MIN_EMIN (-999999999999999999LL) /* -EMAX */
+#define MPD_MIN_ETINY (MPD_MIN_EMIN-(MPD_MAX_PREC-1))
+#define MPD_EXP_INF 2000000000000000001LL
+#define MPD_EXP_CLAMP (-4000000000000000001LL)
+#define MPD_MAXIMPORT 105263157894736842L /* ceil((2*MPD_MAX_PREC)/MPD_RDIGITS) */
+
+/* conversion specifiers */
+#define PRI_mpd_uint_t PRIu64
+#define PRI_mpd_ssize_t PRIi64
+/* END CONFIG_64 */
+
+
+/* BEGIN CONFIG_32 */
+#elif defined(CONFIG_32)
+/* types for modular and base arithmetic */
+#define MPD_UINT_MAX UINT32_MAX
+#define MPD_BITS_PER_UINT 32
+typedef uint32_t mpd_uint_t; /* unsigned mod type */
+
+#ifndef LEGACY_COMPILER
+#define MPD_UUINT_MAX UINT64_MAX
+typedef uint64_t mpd_uuint_t; /* double width unsigned mod type */
+#endif
+
+#define MPD_SIZE_MAX SIZE_MAX
+typedef size_t mpd_size_t; /* unsigned size type */
+
+/* type for dec->len, dec->exp, ctx->prec */
+#define MPD_SSIZE_MAX INT32_MAX
+#define MPD_SSIZE_MIN INT32_MIN
+typedef int32_t mpd_ssize_t;
+#define _mpd_strtossize strtol
+
+/* decimal arithmetic */
+#define MPD_RADIX 1000000000UL /* 10**9 */
+#define MPD_RDIGITS 9
+#define MPD_MAX_POW10 9
+#define MPD_EXPDIGITS 10 /* MPD_EXPDIGITS <= MPD_RDIGITS+1 */
+
+#define MPD_MAXTRANSFORM_2N 33554432UL /* 2**25 */
+#define MPD_MAX_PREC 425000000L
+#define MPD_MAX_PREC_LOG2 32
+#define MPD_ELIMIT 425000001L
+#define MPD_MAX_EMAX 425000000L /* ELIMIT-1 */
+#define MPD_MIN_EMIN (-425000000L) /* -EMAX */
+#define MPD_MIN_ETINY (MPD_MIN_EMIN-(MPD_MAX_PREC-1))
+#define MPD_EXP_INF 1000000001L /* allows for emax=999999999 in the tests */
+#define MPD_EXP_CLAMP (-2000000001L) /* allows for emin=-999999999 in the tests */
+#define MPD_MAXIMPORT 94444445L /* ceil((2*MPD_MAX_PREC)/MPD_RDIGITS) */
+
+/* conversion specifiers */
+#define PRI_mpd_uint_t PRIu32
+#define PRI_mpd_ssize_t PRIi32
+/* END CONFIG_32 */
+
+#else
+ #error "define CONFIG_64 or CONFIG_32"
+#endif
+/* END CONFIG */
+
+
+#if MPD_SIZE_MAX != MPD_UINT_MAX
+ #error "unsupported platform: need mpd_size_t == mpd_uint_t"
+#endif
+
+
+/******************************************************************************/
+/* Context */
+/******************************************************************************/
+
+enum {
+ MPD_ROUND_UP, /* round away from 0 */
+ MPD_ROUND_DOWN, /* round toward 0 (truncate) */
+ MPD_ROUND_CEILING, /* round toward +infinity */
+ MPD_ROUND_FLOOR, /* round toward -infinity */
+ MPD_ROUND_HALF_UP, /* 0.5 is rounded up */
+ MPD_ROUND_HALF_DOWN, /* 0.5 is rounded down */
+ MPD_ROUND_HALF_EVEN, /* 0.5 is rounded to even */
+ MPD_ROUND_05UP, /* round zero or five away from 0 */
+ MPD_ROUND_TRUNC, /* truncate, but set infinity */
+ MPD_ROUND_GUARD
+};
+
+enum { MPD_CLAMP_DEFAULT, MPD_CLAMP_IEEE_754, MPD_CLAMP_GUARD };
+
+extern const char *mpd_round_string[MPD_ROUND_GUARD];
+extern const char *mpd_clamp_string[MPD_CLAMP_GUARD];
+
+
+typedef struct {
+ mpd_ssize_t prec; /* precision */
+ mpd_ssize_t emax; /* max positive exp */
+ mpd_ssize_t emin; /* min negative exp */
+ uint32_t traps; /* status events that should be trapped */
+ uint32_t status; /* status flags */
+ uint32_t newtrap; /* set by mpd_addstatus_raise() */
+ int round; /* rounding mode */
+ int clamp; /* clamp mode */
+ int allcr; /* all functions correctly rounded */
+} mpd_context_t;
+
+
+/* Status flags */
+#define MPD_Clamped 0x00000001U
+#define MPD_Conversion_syntax 0x00000002U
+#define MPD_Division_by_zero 0x00000004U
+#define MPD_Division_impossible 0x00000008U
+#define MPD_Division_undefined 0x00000010U
+#define MPD_Fpu_error 0x00000020U
+#define MPD_Inexact 0x00000040U
+#define MPD_Invalid_context 0x00000080U
+#define MPD_Invalid_operation 0x00000100U
+#define MPD_Malloc_error 0x00000200U
+#define MPD_Not_implemented 0x00000400U
+#define MPD_Overflow 0x00000800U
+#define MPD_Rounded 0x00001000U
+#define MPD_Subnormal 0x00002000U
+#define MPD_Underflow 0x00004000U
+#define MPD_Max_status (0x00008000U-1U)
+
+/* Conditions that result in an IEEE 754 exception */
+#define MPD_IEEE_Invalid_operation (MPD_Conversion_syntax | \
+ MPD_Division_impossible | \
+ MPD_Division_undefined | \
+ MPD_Fpu_error | \
+ MPD_Invalid_context | \
+ MPD_Invalid_operation | \
+ MPD_Malloc_error) \
+
+/* Errors that require the result of an operation to be set to NaN */
+#define MPD_Errors (MPD_IEEE_Invalid_operation | \
+ MPD_Division_by_zero)
+
+/* Default traps */
+#define MPD_Traps (MPD_IEEE_Invalid_operation | \
+ MPD_Division_by_zero | \
+ MPD_Overflow | \
+ MPD_Underflow)
+
+/* Official name */
+#define MPD_Insufficient_storage MPD_Malloc_error
+
+/* IEEE 754 interchange format contexts */
+#define MPD_IEEE_CONTEXT_MAX_BITS 512 /* 16*(log2(MPD_MAX_EMAX / 3)-3) */
+#define MPD_DECIMAL32 32
+#define MPD_DECIMAL64 64
+#define MPD_DECIMAL128 128
+
+
+#define MPD_MINALLOC_MIN 2
+#define MPD_MINALLOC_MAX 64
+extern mpd_ssize_t MPD_MINALLOC;
+extern void (* mpd_traphandler)(mpd_context_t *);
+void mpd_dflt_traphandler(mpd_context_t *);
+
+void mpd_setminalloc(mpd_ssize_t n);
+void mpd_init(mpd_context_t *ctx, mpd_ssize_t prec);
+
+void mpd_maxcontext(mpd_context_t *ctx);
+void mpd_defaultcontext(mpd_context_t *ctx);
+void mpd_basiccontext(mpd_context_t *ctx);
+int mpd_ieee_context(mpd_context_t *ctx, int bits);
+
+mpd_ssize_t mpd_getprec(const mpd_context_t *ctx);
+mpd_ssize_t mpd_getemax(const mpd_context_t *ctx);
+mpd_ssize_t mpd_getemin(const mpd_context_t *ctx);
+int mpd_getround(const mpd_context_t *ctx);
+uint32_t mpd_gettraps(const mpd_context_t *ctx);
+uint32_t mpd_getstatus(const mpd_context_t *ctx);
+int mpd_getclamp(const mpd_context_t *ctx);
+int mpd_getcr(const mpd_context_t *ctx);
+
+int mpd_qsetprec(mpd_context_t *ctx, mpd_ssize_t prec);
+int mpd_qsetemax(mpd_context_t *ctx, mpd_ssize_t emax);
+int mpd_qsetemin(mpd_context_t *ctx, mpd_ssize_t emin);
+int mpd_qsetround(mpd_context_t *ctx, int newround);
+int mpd_qsettraps(mpd_context_t *ctx, uint32_t flags);
+int mpd_qsetstatus(mpd_context_t *ctx, uint32_t flags);
+int mpd_qsetclamp(mpd_context_t *ctx, int c);
+int mpd_qsetcr(mpd_context_t *ctx, int c);
+void mpd_addstatus_raise(mpd_context_t *ctx, uint32_t flags);
+
+
+/******************************************************************************/
+/* Decimal Arithmetic */
+/******************************************************************************/
+
+/* mpd_t flags */
+#define MPD_POS ((uint8_t)0)
+#define MPD_NEG ((uint8_t)1)
+#define MPD_INF ((uint8_t)2)
+#define MPD_NAN ((uint8_t)4)
+#define MPD_SNAN ((uint8_t)8)
+#define MPD_SPECIAL (MPD_INF|MPD_NAN|MPD_SNAN)
+#define MPD_STATIC ((uint8_t)16)
+#define MPD_STATIC_DATA ((uint8_t)32)
+#define MPD_SHARED_DATA ((uint8_t)64)
+#define MPD_CONST_DATA ((uint8_t)128)
+#define MPD_DATAFLAGS (MPD_STATIC_DATA|MPD_SHARED_DATA|MPD_CONST_DATA)
+
+/* mpd_t */
+typedef struct {
+ uint8_t flags;
+ mpd_ssize_t exp;
+ mpd_ssize_t digits;
+ mpd_ssize_t len;
+ mpd_ssize_t alloc;
+ mpd_uint_t *data;
+} mpd_t;
+
+
+typedef unsigned char uchar;
+
+
+/******************************************************************************/
+/* Quiet, thread-safe functions */
+/******************************************************************************/
+
+/* format specification */
+typedef struct {
+ mpd_ssize_t min_width; /* minimum field width */
+ mpd_ssize_t prec; /* fraction digits or significant digits */
+ char type; /* conversion specifier */
+ char align; /* alignment */
+ char sign; /* sign printing/alignment */
+ char fill[5]; /* fill character */
+ const char *dot; /* decimal point */
+ const char *sep; /* thousands separator */
+ const char *grouping; /* grouping of digits */
+} mpd_spec_t;
+
+/* output to a string */
+char *mpd_to_sci(const mpd_t *dec, int fmt);
+char *mpd_to_eng(const mpd_t *dec, int fmt);
+mpd_ssize_t mpd_to_sci_size(char **res, const mpd_t *dec, int fmt);
+mpd_ssize_t mpd_to_eng_size(char **res, const mpd_t *dec, int fmt);
+int mpd_validate_lconv(mpd_spec_t *spec);
+int mpd_parse_fmt_str(mpd_spec_t *spec, const char *fmt, int caps);
+char * mpd_qformat_spec(const mpd_t *dec, const mpd_spec_t *spec, const mpd_context_t *ctx, uint32_t *status);
+char *mpd_qformat(const mpd_t *dec, const char *fmt, const mpd_context_t *ctx, uint32_t *status);
+
+#define MPD_NUM_FLAGS 15
+#define MPD_MAX_FLAG_STRING 208
+#define MPD_MAX_FLAG_LIST (MPD_MAX_FLAG_STRING+18)
+#define MPD_MAX_SIGNAL_LIST 121
+int mpd_snprint_flags(char *dest, int nmemb, uint32_t flags);
+int mpd_lsnprint_flags(char *dest, int nmemb, uint32_t flags, const char *flag_string[]);
+int mpd_lsnprint_signals(char *dest, int nmemb, uint32_t flags, const char *signal_string[]);
+
+/* output to a file */
+void mpd_fprint(FILE *file, const mpd_t *dec);
+void mpd_print(const mpd_t *dec);
+
+/* assignment from a string */
+void mpd_qset_string(mpd_t *dec, const char *s, const mpd_context_t *ctx, uint32_t *status);
+
+/* set to NaN with error flags */
+void mpd_seterror(mpd_t *result, uint32_t flags, uint32_t *status);
+/* set a special with sign and type */
+void mpd_setspecial(mpd_t *dec, uint8_t sign, uint8_t type);
+/* set coefficient to zero or all nines */
+void mpd_zerocoeff(mpd_t *result);
+void mpd_qmaxcoeff(mpd_t *result, const mpd_context_t *ctx, uint32_t *status);
+
+/* quietly assign a C integer type to an mpd_t */
+void mpd_qset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx, uint32_t *status);
+#ifndef LEGACY_COMPILER
+void mpd_qset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx, uint32_t *status);
+#endif
+
+/* quietly assign a C integer type to an mpd_t with a static coefficient */
+void mpd_qsset_ssize(mpd_t *result, mpd_ssize_t a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsset_i32(mpd_t *result, int32_t a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsset_uint(mpd_t *result, mpd_uint_t a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsset_u32(mpd_t *result, uint32_t a, const mpd_context_t *ctx, uint32_t *status);
+
+/* quietly get a C integer type from an mpd_t */
+mpd_ssize_t mpd_qget_ssize(const mpd_t *dec, uint32_t *status);
+mpd_uint_t mpd_qget_uint(const mpd_t *dec, uint32_t *status);
+mpd_uint_t mpd_qabs_uint(const mpd_t *dec, uint32_t *status);
+
+
+/* quiet functions */
+int mpd_qcheck_nan(mpd_t *nanresult, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+int mpd_qcheck_nans(mpd_t *nanresult, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qfinalize(mpd_t *result, const mpd_context_t *ctx, uint32_t *status);
+
+const char * mpd_class(const mpd_t *a, const mpd_context_t *ctx);
+
+int mpd_qcopy(mpd_t *result, const mpd_t *a, uint32_t *status);
+mpd_t *mpd_qncopy(const mpd_t *a);
+int mpd_qcopy_abs(mpd_t *result, const mpd_t *a, uint32_t *status);
+int mpd_qcopy_negate(mpd_t *result, const mpd_t *a, uint32_t *status);
+int mpd_qcopy_sign(mpd_t *result, const mpd_t *a, const mpd_t *b, uint32_t *status);
+
+void mpd_qand(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qinvert(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qlogb(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qor(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qscaleb(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qxor(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+int mpd_same_quantum(const mpd_t *a, const mpd_t *b);
+
+void mpd_qrotate(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+int mpd_qshiftl(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status);
+mpd_uint_t mpd_qshiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n, uint32_t *status);
+mpd_uint_t mpd_qshiftr_inplace(mpd_t *result, mpd_ssize_t n);
+void mpd_qshift(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qshiftn(mpd_t *result, const mpd_t *a, mpd_ssize_t n, const mpd_context_t *ctx, uint32_t *status);
+
+int mpd_qcmp(const mpd_t *a, const mpd_t *b, uint32_t *status);
+int mpd_qcompare(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+int mpd_qcompare_signal(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+int mpd_cmp_total(const mpd_t *a, const mpd_t *b);
+int mpd_cmp_total_mag(const mpd_t *a, const mpd_t *b);
+int mpd_compare_total(mpd_t *result, const mpd_t *a, const mpd_t *b);
+int mpd_compare_total_mag(mpd_t *result, const mpd_t *a, const mpd_t *b);
+
+void mpd_qround_to_intx(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qround_to_int(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qtrunc(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qfloor(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qceil(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+
+void mpd_qabs(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmax(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmax_mag(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmin(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmin_mag(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qminus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qplus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qnext_minus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qnext_plus(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qnext_toward(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qquantize(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qrescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qrescale_fmt(mpd_t *result, const mpd_t *a, mpd_ssize_t exp, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qreduce(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qadd(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qadd_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qadd_i32(mpd_t *result, const mpd_t *a, int32_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qadd_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qadd_u32(mpd_t *result, const mpd_t *a, uint32_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsub(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsub_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsub_i32(mpd_t *result, const mpd_t *a, int32_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsub_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsub_u32(mpd_t *result, const mpd_t *a, uint32_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmul(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmul_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmul_i32(mpd_t *result, const mpd_t *a, int32_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmul_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmul_u32(mpd_t *result, const mpd_t *a, uint32_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qfma(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qdiv(mpd_t *q, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qdiv_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qdiv_i32(mpd_t *result, const mpd_t *a, int32_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qdiv_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qdiv_u32(mpd_t *result, const mpd_t *a, uint32_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qdivint(mpd_t *q, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qrem(mpd_t *r, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qrem_near(mpd_t *r, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qdivmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qpow(mpd_t *result, const mpd_t *base, const mpd_t *exp, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qpowmod(mpd_t *result, const mpd_t *base, const mpd_t *exp, const mpd_t *mod, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qln10(mpd_t *result, mpd_ssize_t prec, uint32_t *status);
+void mpd_qln(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qlog10(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsqrt(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qinvroot(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx, uint32_t *status);
+
+
+size_t mpd_sizeinbase(mpd_t *a, uint32_t base);
+void mpd_qimport_u16(mpd_t *result, const uint16_t *srcdata, size_t srclen,
+ uint8_t srcsign, uint32_t srcbase,
+ const mpd_context_t *ctx, uint32_t *status);
+void mpd_qimport_u32(mpd_t *result, const uint32_t *srcdata, size_t srclen,
+ uint8_t srcsign, uint32_t srcbase,
+ const mpd_context_t *ctx, uint32_t *status);
+size_t mpd_qexport_u16(uint16_t *rdata, size_t rlen, uint32_t base,
+ const mpd_t *src, uint32_t *status);
+size_t mpd_qexport_u32(uint32_t *rdata, size_t rlen, uint32_t base,
+ const mpd_t *src, uint32_t *status);
+
+
+/******************************************************************************/
+/* Signalling functions */
+/******************************************************************************/
+
+char * mpd_format(const mpd_t *dec, const char *fmt, mpd_context_t *ctx);
+void mpd_import_u16(mpd_t *result, const uint16_t *srcdata, size_t srclen, uint8_t srcsign, uint32_t base, mpd_context_t *ctx);
+void mpd_import_u32(mpd_t *result, const uint32_t *srcdata, size_t srclen, uint8_t srcsign, uint32_t base, mpd_context_t *ctx);
+size_t mpd_export_u16(uint16_t *rdata, size_t rlen, uint32_t base, const mpd_t *src, mpd_context_t *ctx);
+size_t mpd_export_u32(uint32_t *rdata, size_t rlen, uint32_t base, const mpd_t *src, mpd_context_t *ctx);
+void mpd_finalize(mpd_t *result, mpd_context_t *ctx);
+int mpd_check_nan(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+int mpd_check_nans(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_set_string(mpd_t *result, const char *s, mpd_context_t *ctx);
+void mpd_maxcoeff(mpd_t *result, mpd_context_t *ctx);
+void mpd_sset_ssize(mpd_t *result, mpd_ssize_t a, mpd_context_t *ctx);
+void mpd_sset_i32(mpd_t *result, int32_t a, mpd_context_t *ctx);
+void mpd_sset_uint(mpd_t *result, mpd_uint_t a, mpd_context_t *ctx);
+void mpd_sset_u32(mpd_t *result, uint32_t a, mpd_context_t *ctx);
+void mpd_set_ssize(mpd_t *result, mpd_ssize_t a, mpd_context_t *ctx);
+void mpd_set_i32(mpd_t *result, int32_t a, mpd_context_t *ctx);
+void mpd_set_uint(mpd_t *result, mpd_uint_t a, mpd_context_t *ctx);
+void mpd_set_u32(mpd_t *result, uint32_t a, mpd_context_t *ctx);
+#ifndef LEGACY_COMPILER
+void mpd_set_i64(mpd_t *result, int64_t a, mpd_context_t *ctx);
+void mpd_set_u64(mpd_t *result, uint64_t a, mpd_context_t *ctx);
+#endif
+mpd_ssize_t mpd_get_ssize(const mpd_t *a, mpd_context_t *ctx);
+mpd_uint_t mpd_get_uint(const mpd_t *a, mpd_context_t *ctx);
+mpd_uint_t mpd_abs_uint(const mpd_t *a, mpd_context_t *ctx);
+void mpd_and(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_copy(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_canonical(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_copy_abs(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_copy_negate(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_copy_sign(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_invert(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_logb(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_or(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_rotate(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_scaleb(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_shiftl(mpd_t *result, const mpd_t *a, mpd_ssize_t n, mpd_context_t *ctx);
+mpd_uint_t mpd_shiftr(mpd_t *result, const mpd_t *a, mpd_ssize_t n, mpd_context_t *ctx);
+void mpd_shiftn(mpd_t *result, const mpd_t *a, mpd_ssize_t n, mpd_context_t *ctx);
+void mpd_shift(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_xor(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_abs(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+int mpd_cmp(const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+int mpd_compare(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+int mpd_compare_signal(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_add(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_add_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b, mpd_context_t *ctx);
+void mpd_add_i32(mpd_t *result, const mpd_t *a, int32_t b, mpd_context_t *ctx);
+void mpd_add_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b, mpd_context_t *ctx);
+void mpd_add_u32(mpd_t *result, const mpd_t *a, uint32_t b, mpd_context_t *ctx);
+void mpd_sub(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_sub_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b, mpd_context_t *ctx);
+void mpd_sub_i32(mpd_t *result, const mpd_t *a, int32_t b, mpd_context_t *ctx);
+void mpd_sub_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b, mpd_context_t *ctx);
+void mpd_sub_u32(mpd_t *result, const mpd_t *a, uint32_t b, mpd_context_t *ctx);
+void mpd_div(mpd_t *q, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_div_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b, mpd_context_t *ctx);
+void mpd_div_i32(mpd_t *result, const mpd_t *a, int32_t b, mpd_context_t *ctx);
+void mpd_div_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b, mpd_context_t *ctx);
+void mpd_div_u32(mpd_t *result, const mpd_t *a, uint32_t b, mpd_context_t *ctx);
+void mpd_divmod(mpd_t *q, mpd_t *r, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_divint(mpd_t *q, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_exp(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_fma(mpd_t *result, const mpd_t *a, const mpd_t *b, const mpd_t *c, mpd_context_t *ctx);
+void mpd_ln(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_log10(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_max(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_max_mag(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_min(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_min_mag(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_minus(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_mul(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_mul_ssize(mpd_t *result, const mpd_t *a, mpd_ssize_t b, mpd_context_t *ctx);
+void mpd_mul_i32(mpd_t *result, const mpd_t *a, int32_t b, mpd_context_t *ctx);
+void mpd_mul_uint(mpd_t *result, const mpd_t *a, mpd_uint_t b, mpd_context_t *ctx);
+void mpd_mul_u32(mpd_t *result, const mpd_t *a, uint32_t b, mpd_context_t *ctx);
+void mpd_next_minus(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_next_plus(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_next_toward(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_plus(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_pow(mpd_t *result, const mpd_t *base, const mpd_t *exp, mpd_context_t *ctx);
+void mpd_powmod(mpd_t *result, const mpd_t *base, const mpd_t *exp, const mpd_t *mod, mpd_context_t *ctx);
+void mpd_quantize(mpd_t *result, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_rescale(mpd_t *result, const mpd_t *a, mpd_ssize_t exp, mpd_context_t *ctx);
+void mpd_reduce(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_rem(mpd_t *r, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_rem_near(mpd_t *r, const mpd_t *a, const mpd_t *b, mpd_context_t *ctx);
+void mpd_round_to_intx(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_round_to_int(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_trunc(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_floor(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_ceil(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_sqrt(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+void mpd_invroot(mpd_t *result, const mpd_t *a, mpd_context_t *ctx);
+
+
+/******************************************************************************/
+/* Configuration specific */
+/******************************************************************************/
+
+#ifdef CONFIG_64
+void mpd_qsset_i64(mpd_t *result, int64_t a, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsset_u64(mpd_t *result, uint64_t a, const mpd_context_t *ctx, uint32_t *status);
+int64_t mpd_qget_i64(const mpd_t *dec, uint32_t *status);
+uint64_t mpd_qget_u64(const mpd_t *dec, uint32_t *status);
+
+void mpd_qadd_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qadd_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsub_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qsub_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmul_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qmul_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qdiv_i64(mpd_t *result, const mpd_t *a, int64_t b, const mpd_context_t *ctx, uint32_t *status);
+void mpd_qdiv_u64(mpd_t *result, const mpd_t *a, uint64_t b, const mpd_context_t *ctx, uint32_t *status);
+
+void mpd_sset_i64(mpd_t *result, int64_t a, mpd_context_t *ctx);
+void mpd_sset_u64(mpd_t *result, uint64_t a, mpd_context_t *ctx);
+int64_t mpd_get_i64(const mpd_t *a, mpd_context_t *ctx);
+uint64_t mpd_get_u64(const mpd_t *a, mpd_context_t *ctx);
+
+void mpd_add_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx);
+void mpd_add_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx);
+void mpd_sub_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx);
+void mpd_sub_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx);
+void mpd_div_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx);
+void mpd_div_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx);
+void mpd_mul_i64(mpd_t *result, const mpd_t *a, int64_t b, mpd_context_t *ctx);
+void mpd_mul_u64(mpd_t *result, const mpd_t *a, uint64_t b, mpd_context_t *ctx);
+#else
+int32_t mpd_qget_i32(const mpd_t *dec, uint32_t *status);
+uint32_t mpd_qget_u32(const mpd_t *dec, uint32_t *status);
+int32_t mpd_get_i32(const mpd_t *a, mpd_context_t *ctx);
+uint32_t mpd_get_u32(const mpd_t *a, mpd_context_t *ctx);
+#endif
+
+
+/******************************************************************************/
+/* Get attributes of a decimal */
+/******************************************************************************/
+
+EXTINLINE mpd_ssize_t mpd_adjexp(const mpd_t *dec);
+EXTINLINE mpd_ssize_t mpd_etiny(const mpd_context_t *ctx);
+EXTINLINE mpd_ssize_t mpd_etop(const mpd_context_t *ctx);
+EXTINLINE mpd_uint_t mpd_msword(const mpd_t *dec);
+EXTINLINE int mpd_word_digits(mpd_uint_t word);
+/* most significant digit of a word */
+EXTINLINE mpd_uint_t mpd_msd(mpd_uint_t word);
+/* least significant digit of a word */
+EXTINLINE mpd_uint_t mpd_lsd(mpd_uint_t word);
+/* coefficient size needed to store 'digits' */
+EXTINLINE mpd_ssize_t mpd_digits_to_size(mpd_ssize_t digits);
+/* number of digits in the exponent, undefined for MPD_SSIZE_MIN */
+EXTINLINE int mpd_exp_digits(mpd_ssize_t exp);
+EXTINLINE int mpd_iscanonical(const mpd_t *dec UNUSED);
+EXTINLINE int mpd_isfinite(const mpd_t *dec);
+EXTINLINE int mpd_isinfinite(const mpd_t *dec);
+EXTINLINE int mpd_isinteger(const mpd_t *dec);
+EXTINLINE int mpd_isnan(const mpd_t *dec);
+EXTINLINE int mpd_isnegative(const mpd_t *dec);
+EXTINLINE int mpd_ispositive(const mpd_t *dec);
+EXTINLINE int mpd_isqnan(const mpd_t *dec);
+EXTINLINE int mpd_issigned(const mpd_t *dec);
+EXTINLINE int mpd_issnan(const mpd_t *dec);
+EXTINLINE int mpd_isspecial(const mpd_t *dec);
+EXTINLINE int mpd_iszero(const mpd_t *dec);
+/* undefined for special numbers */
+EXTINLINE int mpd_iszerocoeff(const mpd_t *dec);
+EXTINLINE int mpd_isnormal(const mpd_t *dec, const mpd_context_t *ctx);
+EXTINLINE int mpd_issubnormal(const mpd_t *dec, const mpd_context_t *ctx);
+/* odd word */
+EXTINLINE int mpd_isoddword(mpd_uint_t word);
+/* odd coefficient */
+EXTINLINE int mpd_isoddcoeff(const mpd_t *dec);
+/* odd decimal, only defined for integers */
+int mpd_isodd(const mpd_t *dec);
+/* even decimal, only defined for integers */
+int mpd_iseven(const mpd_t *dec);
+/* 0 if dec is positive, 1 if dec is negative */
+EXTINLINE uint8_t mpd_sign(const mpd_t *dec);
+/* 1 if dec is positive, -1 if dec is negative */
+EXTINLINE int mpd_arith_sign(const mpd_t *dec);
+EXTINLINE long mpd_radix(void);
+EXTINLINE int mpd_isdynamic(mpd_t *dec);
+EXTINLINE int mpd_isstatic(mpd_t *dec);
+EXTINLINE int mpd_isdynamic_data(mpd_t *dec);
+EXTINLINE int mpd_isstatic_data(mpd_t *dec);
+EXTINLINE int mpd_isshared_data(mpd_t *dec);
+EXTINLINE int mpd_isconst_data(mpd_t *dec);
+EXTINLINE mpd_ssize_t mpd_trail_zeros(const mpd_t *dec);
+
+
+/******************************************************************************/
+/* Set attributes of a decimal */
+/******************************************************************************/
+
+/* set number of decimal digits in the coefficient */
+EXTINLINE void mpd_setdigits(mpd_t *result);
+EXTINLINE void mpd_set_sign(mpd_t *result, uint8_t sign);
+/* copy sign from another decimal */
+EXTINLINE void mpd_signcpy(mpd_t *result, mpd_t *a);
+EXTINLINE void mpd_set_infinity(mpd_t *result);
+EXTINLINE void mpd_set_qnan(mpd_t *result);
+EXTINLINE void mpd_set_snan(mpd_t *result);
+EXTINLINE void mpd_set_negative(mpd_t *result);
+EXTINLINE void mpd_set_positive(mpd_t *result);
+EXTINLINE void mpd_set_dynamic(mpd_t *result);
+EXTINLINE void mpd_set_static(mpd_t *result);
+EXTINLINE void mpd_set_dynamic_data(mpd_t *result);
+EXTINLINE void mpd_set_static_data(mpd_t *result);
+EXTINLINE void mpd_set_shared_data(mpd_t *result);
+EXTINLINE void mpd_set_const_data(mpd_t *result);
+EXTINLINE void mpd_clear_flags(mpd_t *result);
+EXTINLINE void mpd_set_flags(mpd_t *result, uint8_t flags);
+EXTINLINE void mpd_copy_flags(mpd_t *result, const mpd_t *a);
+
+
+/******************************************************************************/
+/* Error Macros */
+/******************************************************************************/
+
+#define mpd_err_fatal(...) \
+ do {fprintf(stderr, "%s:%d: error: ", __FILE__, __LINE__); \
+ fprintf(stderr, __VA_ARGS__); fputc('\n', stderr); \
+ abort(); \
+ } while (0)
+#define mpd_err_warn(...) \
+ do {fprintf(stderr, "%s:%d: warning: ", __FILE__, __LINE__); \
+ fprintf(stderr, __VA_ARGS__); fputc('\n', stderr); \
+ } while (0)
+
+
+/******************************************************************************/
+/* Memory handling */
+/******************************************************************************/
+
+extern void *(* mpd_mallocfunc)(size_t size);
+extern void *(* mpd_callocfunc)(size_t nmemb, size_t size);
+extern void *(* mpd_reallocfunc)(void *ptr, size_t size);
+extern void (* mpd_free)(void *ptr);
+
+void *mpd_callocfunc_em(size_t nmemb, size_t size);
+
+void *mpd_alloc(mpd_size_t nmemb, mpd_size_t size);
+void *mpd_calloc(mpd_size_t nmemb, mpd_size_t size);
+void *mpd_realloc(void *ptr, mpd_size_t nmemb, mpd_size_t size, uint8_t *err);
+void *mpd_sh_alloc(mpd_size_t struct_size, mpd_size_t nmemb, mpd_size_t size);
+
+mpd_t *mpd_qnew(void);
+mpd_t *mpd_new(mpd_context_t *ctx);
+mpd_t *mpd_qnew_size(mpd_ssize_t size);
+void mpd_del(mpd_t *dec);
+
+void mpd_uint_zero(mpd_uint_t *dest, mpd_size_t len);
+int mpd_qresize(mpd_t *result, mpd_ssize_t size, uint32_t *status);
+int mpd_qresize_zero(mpd_t *result, mpd_ssize_t size, uint32_t *status);
+void mpd_minalloc(mpd_t *result);
+
+int mpd_resize(mpd_t *result, mpd_ssize_t size, mpd_context_t *ctx);
+int mpd_resize_zero(mpd_t *result, mpd_ssize_t size, mpd_context_t *ctx);
+
+
+#ifdef __cplusplus
+} /* END extern "C" */
+#endif
+
+
+#endif /* MPDECIMAL_H */
+
+
+
diff --git a/Modules/_decimal/libmpdec/numbertheory.c b/Modules/_decimal/libmpdec/numbertheory.c
new file mode 100644
index 0000000000..10ce6dc146
--- /dev/null
+++ b/Modules/_decimal/libmpdec/numbertheory.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdlib.h>
+#include <assert.h>
+#include "bits.h"
+#include "umodarith.h"
+#include "numbertheory.h"
+
+
+/* Bignum: Initialize the Number Theoretic Transform. */
+
+
+/*
+ * Return the nth root of unity in F(p). This corresponds to e**((2*pi*i)/n)
+ * in the Fourier transform. We have w**n == 1 (mod p).
+ * n := transform length.
+ * sign := -1 for forward transform, 1 for backward transform.
+ * modnum := one of {P1, P2, P3}.
+ */
+mpd_uint_t
+_mpd_getkernel(mpd_uint_t n, int sign, int modnum)
+{
+ mpd_uint_t umod, p, r, xi;
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+
+ SETMODULUS(modnum);
+ r = mpd_roots[modnum]; /* primitive root of F(p) */
+ p = umod;
+ xi = (p-1) / n;
+
+ if (sign == -1)
+ return POWMOD(r, (p-1-xi));
+ else
+ return POWMOD(r, xi);
+}
+
+/*
+ * Initialize and return transform parameters.
+ * n := transform length.
+ * sign := -1 for forward transform, 1 for backward transform.
+ * modnum := one of {P1, P2, P3}.
+ */
+struct fnt_params *
+_mpd_init_fnt_params(mpd_size_t n, int sign, int modnum)
+{
+ struct fnt_params *tparams;
+ mpd_uint_t umod;
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_uint_t kernel, w;
+ mpd_uint_t i;
+ mpd_size_t nhalf;
+
+ assert(ispower2(n));
+ assert(sign == -1 || sign == 1);
+ assert(P1 <= modnum && modnum <= P3);
+
+ nhalf = n/2;
+ tparams = mpd_sh_alloc(sizeof *tparams, nhalf, sizeof (mpd_uint_t));
+ if (tparams == NULL) {
+ return NULL;
+ }
+
+ SETMODULUS(modnum);
+ kernel = _mpd_getkernel(n, sign, modnum);
+
+ tparams->modnum = modnum;
+ tparams->modulus = umod;
+ tparams->kernel = kernel;
+
+ /* wtable[] := w**0, w**1, ..., w**(nhalf-1) */
+ w = 1;
+ for (i = 0; i < nhalf; i++) {
+ tparams->wtable[i] = w;
+ w = MULMOD(w, kernel);
+ }
+
+ return tparams;
+}
+
+/* Initialize wtable of size three. */
+void
+_mpd_init_w3table(mpd_uint_t w3table[3], int sign, int modnum)
+{
+ mpd_uint_t umod;
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_uint_t kernel;
+
+ SETMODULUS(modnum);
+ kernel = _mpd_getkernel(3, sign, modnum);
+
+ w3table[0] = 1;
+ w3table[1] = kernel;
+ w3table[2] = POWMOD(kernel, 2);
+}
+
+
diff --git a/Modules/_decimal/libmpdec/numbertheory.h b/Modules/_decimal/libmpdec/numbertheory.h
new file mode 100644
index 0000000000..f54d11dc38
--- /dev/null
+++ b/Modules/_decimal/libmpdec/numbertheory.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef NUMBER_THEORY_H
+#define NUMBER_THEORY_H
+
+
+#include "constants.h"
+#include "mpdecimal.h"
+
+
+/* transform parameters */
+struct fnt_params {
+ int modnum;
+ mpd_uint_t modulus;
+ mpd_uint_t kernel;
+ mpd_uint_t wtable[];
+};
+
+
+mpd_uint_t _mpd_getkernel(mpd_uint_t n, int sign, int modnum);
+struct fnt_params *_mpd_init_fnt_params(mpd_size_t n, int sign, int modnum);
+void _mpd_init_w3table(mpd_uint_t w3table[3], int sign, int modnum);
+
+
+#ifdef PPRO
+static inline void
+ppro_setmodulus(int modnum, mpd_uint_t *umod, double *dmod, uint32_t dinvmod[3])
+{
+ *dmod = *umod = mpd_moduli[modnum];
+ dinvmod[0] = mpd_invmoduli[modnum][0];
+ dinvmod[1] = mpd_invmoduli[modnum][1];
+ dinvmod[2] = mpd_invmoduli[modnum][2];
+}
+#else
+static inline void
+std_setmodulus(int modnum, mpd_uint_t *umod)
+{
+ *umod = mpd_moduli[modnum];
+}
+#endif
+
+
+#endif
+
+
diff --git a/Modules/_decimal/libmpdec/sixstep.c b/Modules/_decimal/libmpdec/sixstep.c
new file mode 100644
index 0000000000..7d0542d641
--- /dev/null
+++ b/Modules/_decimal/libmpdec/sixstep.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "bits.h"
+#include "difradix2.h"
+#include "numbertheory.h"
+#include "transpose.h"
+#include "umodarith.h"
+#include "sixstep.h"
+
+
+/* Bignum: Cache efficient Matrix Fourier Transform for arrays of the
+ form 2**n (See literature/six-step.txt). */
+
+
+/* forward transform with sign = -1 */
+int
+six_step_fnt(mpd_uint_t *a, mpd_size_t n, int modnum)
+{
+ struct fnt_params *tparams;
+ mpd_size_t log2n, C, R;
+ mpd_uint_t kernel;
+ mpd_uint_t umod;
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_uint_t *x, w0, w1, wstep;
+ mpd_size_t i, k;
+
+
+ assert(ispower2(n));
+ assert(n >= 16);
+ assert(n <= MPD_MAXTRANSFORM_2N);
+
+ log2n = mpd_bsr(n);
+ C = ((mpd_size_t)1) << (log2n / 2); /* number of columns */
+ R = ((mpd_size_t)1) << (log2n - (log2n / 2)); /* number of rows */
+
+
+ /* Transpose the matrix. */
+ if (!transpose_pow2(a, R, C)) {
+ return 0;
+ }
+
+ /* Length R transform on the rows. */
+ if ((tparams = _mpd_init_fnt_params(R, -1, modnum)) == NULL) {
+ return 0;
+ }
+ for (x = a; x < a+n; x += R) {
+ fnt_dif2(x, R, tparams);
+ }
+
+ /* Transpose the matrix. */
+ if (!transpose_pow2(a, C, R)) {
+ mpd_free(tparams);
+ return 0;
+ }
+
+ /* Multiply each matrix element (addressed by i*C+k) by r**(i*k). */
+ SETMODULUS(modnum);
+ kernel = _mpd_getkernel(n, -1, modnum);
+ for (i = 1; i < R; i++) {
+ w0 = 1; /* r**(i*0): initial value for k=0 */
+ w1 = POWMOD(kernel, i); /* r**(i*1): initial value for k=1 */
+ wstep = MULMOD(w1, w1); /* r**(2*i) */
+ for (k = 0; k < C; k += 2) {
+ mpd_uint_t x0 = a[i*C+k];
+ mpd_uint_t x1 = a[i*C+k+1];
+ MULMOD2(&x0, w0, &x1, w1);
+ MULMOD2C(&w0, &w1, wstep); /* r**(i*(k+2)) = r**(i*k) * r**(2*i) */
+ a[i*C+k] = x0;
+ a[i*C+k+1] = x1;
+ }
+ }
+
+ /* Length C transform on the rows. */
+ if (C != R) {
+ mpd_free(tparams);
+ if ((tparams = _mpd_init_fnt_params(C, -1, modnum)) == NULL) {
+ return 0;
+ }
+ }
+ for (x = a; x < a+n; x += C) {
+ fnt_dif2(x, C, tparams);
+ }
+ mpd_free(tparams);
+
+#if 0
+ /* An unordered transform is sufficient for convolution. */
+ /* Transpose the matrix. */
+ if (!transpose_pow2(a, R, C)) {
+ return 0;
+ }
+#endif
+
+ return 1;
+}
+
+
+/* reverse transform, sign = 1 */
+int
+inv_six_step_fnt(mpd_uint_t *a, mpd_size_t n, int modnum)
+{
+ struct fnt_params *tparams;
+ mpd_size_t log2n, C, R;
+ mpd_uint_t kernel;
+ mpd_uint_t umod;
+#ifdef PPRO
+ double dmod;
+ uint32_t dinvmod[3];
+#endif
+ mpd_uint_t *x, w0, w1, wstep;
+ mpd_size_t i, k;
+
+
+ assert(ispower2(n));
+ assert(n >= 16);
+ assert(n <= MPD_MAXTRANSFORM_2N);
+
+ log2n = mpd_bsr(n);
+ C = ((mpd_size_t)1) << (log2n / 2); /* number of columns */
+ R = ((mpd_size_t)1) << (log2n - (log2n / 2)); /* number of rows */
+
+
+#if 0
+ /* An unordered transform is sufficient for convolution. */
+ /* Transpose the matrix, producing an R*C matrix. */
+ if (!transpose_pow2(a, C, R)) {
+ return 0;
+ }
+#endif
+
+ /* Length C transform on the rows. */
+ if ((tparams = _mpd_init_fnt_params(C, 1, modnum)) == NULL) {
+ return 0;
+ }
+ for (x = a; x < a+n; x += C) {
+ fnt_dif2(x, C, tparams);
+ }
+
+ /* Multiply each matrix element (addressed by i*C+k) by r**(i*k). */
+ SETMODULUS(modnum);
+ kernel = _mpd_getkernel(n, 1, modnum);
+ for (i = 1; i < R; i++) {
+ w0 = 1;
+ w1 = POWMOD(kernel, i);
+ wstep = MULMOD(w1, w1);
+ for (k = 0; k < C; k += 2) {
+ mpd_uint_t x0 = a[i*C+k];
+ mpd_uint_t x1 = a[i*C+k+1];
+ MULMOD2(&x0, w0, &x1, w1);
+ MULMOD2C(&w0, &w1, wstep);
+ a[i*C+k] = x0;
+ a[i*C+k+1] = x1;
+ }
+ }
+
+ /* Transpose the matrix. */
+ if (!transpose_pow2(a, R, C)) {
+ mpd_free(tparams);
+ return 0;
+ }
+
+ /* Length R transform on the rows. */
+ if (R != C) {
+ mpd_free(tparams);
+ if ((tparams = _mpd_init_fnt_params(R, 1, modnum)) == NULL) {
+ return 0;
+ }
+ }
+ for (x = a; x < a+n; x += R) {
+ fnt_dif2(x, R, tparams);
+ }
+ mpd_free(tparams);
+
+ /* Transpose the matrix. */
+ if (!transpose_pow2(a, C, R)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+
diff --git a/Modules/_decimal/libmpdec/sixstep.h b/Modules/_decimal/libmpdec/sixstep.h
new file mode 100644
index 0000000000..4d251df059
--- /dev/null
+++ b/Modules/_decimal/libmpdec/sixstep.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef SIX_STEP_H
+#define SIX_STEP_H
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+
+
+int six_step_fnt(mpd_uint_t *a, mpd_size_t n, int modnum);
+int inv_six_step_fnt(mpd_uint_t *a, mpd_size_t n, int modnum);
+
+
+#endif
diff --git a/Modules/_decimal/libmpdec/transpose.c b/Modules/_decimal/libmpdec/transpose.c
new file mode 100644
index 0000000000..5e5d4b6625
--- /dev/null
+++ b/Modules/_decimal/libmpdec/transpose.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <assert.h>
+#include "bits.h"
+#include "constants.h"
+#include "typearith.h"
+#include "transpose.h"
+
+
+#define BUFSIZE 4096
+#define SIDE 128
+
+
+/* Bignum: The transpose functions are used for very large transforms
+ in sixstep.c and fourstep.c. */
+
+
+/* Definition of the matrix transpose */
+void
+std_trans(mpd_uint_t dest[], mpd_uint_t src[], mpd_size_t rows, mpd_size_t cols)
+{
+ mpd_size_t idest, isrc;
+ mpd_size_t r, c;
+
+ for (r = 0; r < rows; r++) {
+ isrc = r * cols;
+ idest = r;
+ for (c = 0; c < cols; c++) {
+ dest[idest] = src[isrc];
+ isrc += 1;
+ idest += rows;
+ }
+ }
+}
+
+/*
+ * Swap half-rows of 2^n * (2*2^n) matrix.
+ * FORWARD_CYCLE: even/odd permutation of the halfrows.
+ * BACKWARD_CYCLE: reverse the even/odd permutation.
+ */
+static int
+swap_halfrows_pow2(mpd_uint_t *matrix, mpd_size_t rows, mpd_size_t cols, int dir)
+{
+ mpd_uint_t buf1[BUFSIZE];
+ mpd_uint_t buf2[BUFSIZE];
+ mpd_uint_t *readbuf, *writebuf, *hp;
+ mpd_size_t *done, dbits;
+ mpd_size_t b = BUFSIZE, stride;
+ mpd_size_t hn, hmax; /* halfrow number */
+ mpd_size_t m, r=0;
+ mpd_size_t offset;
+ mpd_size_t next;
+
+
+ assert(cols == mul_size_t(2, rows));
+
+ if (dir == FORWARD_CYCLE) {
+ r = rows;
+ }
+ else if (dir == BACKWARD_CYCLE) {
+ r = 2;
+ }
+ else {
+ abort(); /* GCOV_NOT_REACHED */
+ }
+
+ m = cols - 1;
+ hmax = rows; /* cycles start at odd halfrows */
+ dbits = 8 * sizeof *done;
+ if ((done = mpd_calloc(hmax/(sizeof *done) + 1, sizeof *done)) == NULL) {
+ return 0;
+ }
+
+ for (hn = 1; hn <= hmax; hn += 2) {
+
+ if (done[hn/dbits] & mpd_bits[hn%dbits]) {
+ continue;
+ }
+
+ readbuf = buf1; writebuf = buf2;
+
+ for (offset = 0; offset < cols/2; offset += b) {
+
+ stride = (offset + b < cols/2) ? b : cols/2-offset;
+
+ hp = matrix + hn*cols/2;
+ memcpy(readbuf, hp+offset, stride*(sizeof *readbuf));
+ pointerswap(&readbuf, &writebuf);
+
+ next = mulmod_size_t(hn, r, m);
+ hp = matrix + next*cols/2;
+
+ while (next != hn) {
+
+ memcpy(readbuf, hp+offset, stride*(sizeof *readbuf));
+ memcpy(hp+offset, writebuf, stride*(sizeof *writebuf));
+ pointerswap(&readbuf, &writebuf);
+
+ done[next/dbits] |= mpd_bits[next%dbits];
+
+ next = mulmod_size_t(next, r, m);
+ hp = matrix + next*cols/2;
+
+ }
+
+ memcpy(hp+offset, writebuf, stride*(sizeof *writebuf));
+
+ done[hn/dbits] |= mpd_bits[hn%dbits];
+ }
+ }
+
+ mpd_free(done);
+ return 1;
+}
+
+/* In-place transpose of a square matrix */
+static inline void
+squaretrans(mpd_uint_t *buf, mpd_size_t cols)
+{
+ mpd_uint_t tmp;
+ mpd_size_t idest, isrc;
+ mpd_size_t r, c;
+
+ for (r = 0; r < cols; r++) {
+ c = r+1;
+ isrc = r*cols + c;
+ idest = c*cols + r;
+ for (c = r+1; c < cols; c++) {
+ tmp = buf[isrc];
+ buf[isrc] = buf[idest];
+ buf[idest] = tmp;
+ isrc += 1;
+ idest += cols;
+ }
+ }
+}
+
+/*
+ * Transpose 2^n * 2^n matrix. For cache efficiency, the matrix is split into
+ * square blocks with side length 'SIDE'. First, the blocks are transposed,
+ * then a square tranposition is done on each individual block.
+ */
+static void
+squaretrans_pow2(mpd_uint_t *matrix, mpd_size_t size)
+{
+ mpd_uint_t buf1[SIDE*SIDE];
+ mpd_uint_t buf2[SIDE*SIDE];
+ mpd_uint_t *to, *from;
+ mpd_size_t b = size;
+ mpd_size_t r, c;
+ mpd_size_t i;
+
+ while (b > SIDE) b >>= 1;
+
+ for (r = 0; r < size; r += b) {
+
+ for (c = r; c < size; c += b) {
+
+ from = matrix + r*size + c;
+ to = buf1;
+ for (i = 0; i < b; i++) {
+ memcpy(to, from, b*(sizeof *to));
+ from += size;
+ to += b;
+ }
+ squaretrans(buf1, b);
+
+ if (r == c) {
+ to = matrix + r*size + c;
+ from = buf1;
+ for (i = 0; i < b; i++) {
+ memcpy(to, from, b*(sizeof *to));
+ from += b;
+ to += size;
+ }
+ continue;
+ }
+ else {
+ from = matrix + c*size + r;
+ to = buf2;
+ for (i = 0; i < b; i++) {
+ memcpy(to, from, b*(sizeof *to));
+ from += size;
+ to += b;
+ }
+ squaretrans(buf2, b);
+
+ to = matrix + c*size + r;
+ from = buf1;
+ for (i = 0; i < b; i++) {
+ memcpy(to, from, b*(sizeof *to));
+ from += b;
+ to += size;
+ }
+
+ to = matrix + r*size + c;
+ from = buf2;
+ for (i = 0; i < b; i++) {
+ memcpy(to, from, b*(sizeof *to));
+ from += b;
+ to += size;
+ }
+ }
+ }
+ }
+
+}
+
+/*
+ * In-place transposition of a 2^n x 2^n or a 2^n x (2*2^n)
+ * or a (2*2^n) x 2^n matrix.
+ */
+int
+transpose_pow2(mpd_uint_t *matrix, mpd_size_t rows, mpd_size_t cols)
+{
+ mpd_size_t size = mul_size_t(rows, cols);
+
+ assert(ispower2(rows));
+ assert(ispower2(cols));
+
+ if (cols == rows) {
+ squaretrans_pow2(matrix, rows);
+ }
+ else if (cols == mul_size_t(2, rows)) {
+ if (!swap_halfrows_pow2(matrix, rows, cols, FORWARD_CYCLE)) {
+ return 0;
+ }
+ squaretrans_pow2(matrix, rows);
+ squaretrans_pow2(matrix+(size/2), rows);
+ }
+ else if (rows == mul_size_t(2, cols)) {
+ squaretrans_pow2(matrix, cols);
+ squaretrans_pow2(matrix+(size/2), cols);
+ if (!swap_halfrows_pow2(matrix, cols, rows, BACKWARD_CYCLE)) {
+ return 0;
+ }
+ }
+ else {
+ abort(); /* GCOV_NOT_REACHED */
+ }
+
+ return 1;
+}
+
+
diff --git a/Modules/_decimal/libmpdec/transpose.h b/Modules/_decimal/libmpdec/transpose.h
new file mode 100644
index 0000000000..dd0aec6600
--- /dev/null
+++ b/Modules/_decimal/libmpdec/transpose.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef TRANSPOSE_H
+#define TRANSPOSE_H
+
+
+#include "mpdecimal.h"
+#include <stdio.h>
+
+
+enum {FORWARD_CYCLE, BACKWARD_CYCLE};
+
+
+void std_trans(mpd_uint_t dest[], mpd_uint_t src[], mpd_size_t rows, mpd_size_t cols);
+int transpose_pow2(mpd_uint_t *matrix, mpd_size_t rows, mpd_size_t cols);
+void transpose_3xpow2(mpd_uint_t *matrix, mpd_size_t rows, mpd_size_t cols);
+
+
+static inline void pointerswap(mpd_uint_t **a, mpd_uint_t **b)
+{
+ mpd_uint_t *tmp;
+
+ tmp = *b;
+ *b = *a;
+ *a = tmp;
+}
+
+
+#endif
diff --git a/Modules/_decimal/libmpdec/typearith.h b/Modules/_decimal/libmpdec/typearith.h
new file mode 100644
index 0000000000..eeba8dd5ba
--- /dev/null
+++ b/Modules/_decimal/libmpdec/typearith.h
@@ -0,0 +1,669 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef TYPEARITH_H
+#define TYPEARITH_H
+
+
+#include "mpdecimal.h"
+
+
+/*****************************************************************************/
+/* Low level native arithmetic on basic types */
+/*****************************************************************************/
+
+
+/** ------------------------------------------------------------
+ ** Double width multiplication and division
+ ** ------------------------------------------------------------
+ */
+
+#if defined(CONFIG_64)
+#if defined(ANSI)
+#if defined(HAVE_UINT128_T)
+static inline void
+_mpd_mul_words(mpd_uint_t *hi, mpd_uint_t *lo, mpd_uint_t a, mpd_uint_t b)
+{
+ __uint128_t hl;
+
+ hl = (__uint128_t)a * b;
+
+ *hi = hl >> 64;
+ *lo = (mpd_uint_t)hl;
+}
+
+static inline void
+_mpd_div_words(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo,
+ mpd_uint_t d)
+{
+ __uint128_t hl;
+
+ hl = ((__uint128_t)hi<<64) + lo;
+ *q = (mpd_uint_t)(hl / d); /* quotient is known to fit */
+ *r = (mpd_uint_t)(hl - (__uint128_t)(*q) * d);
+}
+#else
+static inline void
+_mpd_mul_words(mpd_uint_t *hi, mpd_uint_t *lo, mpd_uint_t a, mpd_uint_t b)
+{
+ uint32_t w[4], carry;
+ uint32_t ah, al, bh, bl;
+ uint64_t hl;
+
+ ah = (uint32_t)(a>>32); al = (uint32_t)a;
+ bh = (uint32_t)(b>>32); bl = (uint32_t)b;
+
+ hl = (uint64_t)al * bl;
+ w[0] = (uint32_t)hl;
+ carry = (uint32_t)(hl>>32);
+
+ hl = (uint64_t)ah * bl + carry;
+ w[1] = (uint32_t)hl;
+ w[2] = (uint32_t)(hl>>32);
+
+ hl = (uint64_t)al * bh + w[1];
+ w[1] = (uint32_t)hl;
+ carry = (uint32_t)(hl>>32);
+
+ hl = ((uint64_t)ah * bh + w[2]) + carry;
+ w[2] = (uint32_t)hl;
+ w[3] = (uint32_t)(hl>>32);
+
+ *hi = ((uint64_t)w[3]<<32) + w[2];
+ *lo = ((uint64_t)w[1]<<32) + w[0];
+}
+
+/*
+ * By Henry S. Warren: http://www.hackersdelight.org/HDcode/divlu.c.txt
+ * http://www.hackersdelight.org/permissions.htm:
+ * "You are free to use, copy, and distribute any of the code on this web
+ * site, whether modified by you or not. You need not give attribution."
+ *
+ * Slightly modified, comments are mine.
+ */
+static inline int
+nlz(uint64_t x)
+{
+ int n;
+
+ if (x == 0) return(64);
+
+ n = 0;
+ if (x <= 0x00000000FFFFFFFF) {n = n +32; x = x <<32;}
+ if (x <= 0x0000FFFFFFFFFFFF) {n = n +16; x = x <<16;}
+ if (x <= 0x00FFFFFFFFFFFFFF) {n = n + 8; x = x << 8;}
+ if (x <= 0x0FFFFFFFFFFFFFFF) {n = n + 4; x = x << 4;}
+ if (x <= 0x3FFFFFFFFFFFFFFF) {n = n + 2; x = x << 2;}
+ if (x <= 0x7FFFFFFFFFFFFFFF) {n = n + 1;}
+
+ return n;
+}
+
+static inline void
+_mpd_div_words(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t u1, mpd_uint_t u0,
+ mpd_uint_t v)
+{
+ const mpd_uint_t b = 4294967296;
+ mpd_uint_t un1, un0,
+ vn1, vn0,
+ q1, q0,
+ un32, un21, un10,
+ rhat, t;
+ int s;
+
+ assert(u1 < v);
+
+ s = nlz(v);
+ v = v << s;
+ vn1 = v >> 32;
+ vn0 = v & 0xFFFFFFFF;
+
+ t = (s == 0) ? 0 : u0 >> (64 - s);
+ un32 = (u1 << s) | t;
+ un10 = u0 << s;
+
+ un1 = un10 >> 32;
+ un0 = un10 & 0xFFFFFFFF;
+
+ q1 = un32 / vn1;
+ rhat = un32 - q1*vn1;
+again1:
+ if (q1 >= b || q1*vn0 > b*rhat + un1) {
+ q1 = q1 - 1;
+ rhat = rhat + vn1;
+ if (rhat < b) goto again1;
+ }
+
+ /*
+ * Before again1 we had:
+ * (1) q1*vn1 + rhat = un32
+ * (2) q1*vn1*b + rhat*b + un1 = un32*b + un1
+ *
+ * The statements inside the if-clause do not change the value
+ * of the left-hand side of (2), and the loop is only exited
+ * if q1*vn0 <= rhat*b + un1, so:
+ *
+ * (3) q1*vn1*b + q1*vn0 <= un32*b + un1
+ * (4) q1*v <= un32*b + un1
+ * (5) 0 <= un32*b + un1 - q1*v
+ *
+ * By (5) we are certain that the possible add-back step from
+ * Knuth's algorithm D is never required.
+ *
+ * Since the final quotient is less than 2**64, the following
+ * must be true:
+ *
+ * (6) un32*b + un1 - q1*v <= UINT64_MAX
+ *
+ * This means that in the following line, the high words
+ * of un32*b and q1*v can be discarded without any effect
+ * on the result.
+ */
+ un21 = un32*b + un1 - q1*v;
+
+ q0 = un21 / vn1;
+ rhat = un21 - q0*vn1;
+again2:
+ if (q0 >= b || q0*vn0 > b*rhat + un0) {
+ q0 = q0 - 1;
+ rhat = rhat + vn1;
+ if (rhat < b) goto again2;
+ }
+
+ *q = q1*b + q0;
+ *r = (un21*b + un0 - q0*v) >> s;
+}
+#endif
+
+/* END ANSI */
+#elif defined(ASM)
+static inline void
+_mpd_mul_words(mpd_uint_t *hi, mpd_uint_t *lo, mpd_uint_t a, mpd_uint_t b)
+{
+ mpd_uint_t h, l;
+
+ asm ( "mulq %3\n\t"
+ : "=d" (h), "=a" (l)
+ : "%a" (a), "rm" (b)
+ : "cc"
+ );
+
+ *hi = h;
+ *lo = l;
+}
+
+static inline void
+_mpd_div_words(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo,
+ mpd_uint_t d)
+{
+ mpd_uint_t qq, rr;
+
+ asm ( "divq %4\n\t"
+ : "=a" (qq), "=d" (rr)
+ : "a" (lo), "d" (hi), "rm" (d)
+ : "cc"
+ );
+
+ *q = qq;
+ *r = rr;
+}
+/* END GCC ASM */
+#elif defined(MASM)
+#include <intrin.h>
+#pragma intrinsic(_umul128)
+
+static inline void
+_mpd_mul_words(mpd_uint_t *hi, mpd_uint_t *lo, mpd_uint_t a, mpd_uint_t b)
+{
+ *lo = _umul128(a, b, hi);
+}
+
+void _mpd_div_words(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo,
+ mpd_uint_t d);
+
+/* END MASM (_MSC_VER) */
+#else
+ #error "need platform specific 128 bit multiplication and division"
+#endif
+
+#define DIVMOD(q, r, v, d) *q = v / d; *r = v - *q * d
+static inline void
+_mpd_divmod_pow10(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t v, mpd_uint_t exp)
+{
+ assert(exp <= 19);
+
+ if (exp <= 9) {
+ if (exp <= 4) {
+ switch (exp) {
+ case 0: *q = v; *r = 0; break;
+ case 1: DIVMOD(q, r, v, 10UL); break;
+ case 2: DIVMOD(q, r, v, 100UL); break;
+ case 3: DIVMOD(q, r, v, 1000UL); break;
+ case 4: DIVMOD(q, r, v, 10000UL); break;
+ }
+ }
+ else {
+ switch (exp) {
+ case 5: DIVMOD(q, r, v, 100000UL); break;
+ case 6: DIVMOD(q, r, v, 1000000UL); break;
+ case 7: DIVMOD(q, r, v, 10000000UL); break;
+ case 8: DIVMOD(q, r, v, 100000000UL); break;
+ case 9: DIVMOD(q, r, v, 1000000000UL); break;
+ }
+ }
+ }
+ else {
+ if (exp <= 14) {
+ switch (exp) {
+ case 10: DIVMOD(q, r, v, 10000000000ULL); break;
+ case 11: DIVMOD(q, r, v, 100000000000ULL); break;
+ case 12: DIVMOD(q, r, v, 1000000000000ULL); break;
+ case 13: DIVMOD(q, r, v, 10000000000000ULL); break;
+ case 14: DIVMOD(q, r, v, 100000000000000ULL); break;
+ }
+ }
+ else {
+ switch (exp) {
+ case 15: DIVMOD(q, r, v, 1000000000000000ULL); break;
+ case 16: DIVMOD(q, r, v, 10000000000000000ULL); break;
+ case 17: DIVMOD(q, r, v, 100000000000000000ULL); break;
+ case 18: DIVMOD(q, r, v, 1000000000000000000ULL); break;
+ case 19: DIVMOD(q, r, v, 10000000000000000000ULL); break; /* GCOV_NOT_REACHED */
+ }
+ }
+ }
+}
+
+/* END CONFIG_64 */
+#elif defined(CONFIG_32)
+#if defined(ANSI)
+#if !defined(LEGACY_COMPILER)
+static inline void
+_mpd_mul_words(mpd_uint_t *hi, mpd_uint_t *lo, mpd_uint_t a, mpd_uint_t b)
+{
+ mpd_uuint_t hl;
+
+ hl = (mpd_uuint_t)a * b;
+
+ *hi = hl >> 32;
+ *lo = (mpd_uint_t)hl;
+}
+
+static inline void
+_mpd_div_words(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo,
+ mpd_uint_t d)
+{
+ mpd_uuint_t hl;
+
+ hl = ((mpd_uuint_t)hi<<32) + lo;
+ *q = (mpd_uint_t)(hl / d); /* quotient is known to fit */
+ *r = (mpd_uint_t)(hl - (mpd_uuint_t)(*q) * d);
+}
+/* END ANSI + uint64_t */
+#else
+static inline void
+_mpd_mul_words(mpd_uint_t *hi, mpd_uint_t *lo, mpd_uint_t a, mpd_uint_t b)
+{
+ uint16_t w[4], carry;
+ uint16_t ah, al, bh, bl;
+ uint32_t hl;
+
+ ah = (uint16_t)(a>>16); al = (uint16_t)a;
+ bh = (uint16_t)(b>>16); bl = (uint16_t)b;
+
+ hl = (uint32_t)al * bl;
+ w[0] = (uint16_t)hl;
+ carry = (uint16_t)(hl>>16);
+
+ hl = (uint32_t)ah * bl + carry;
+ w[1] = (uint16_t)hl;
+ w[2] = (uint16_t)(hl>>16);
+
+ hl = (uint32_t)al * bh + w[1];
+ w[1] = (uint16_t)hl;
+ carry = (uint16_t)(hl>>16);
+
+ hl = ((uint32_t)ah * bh + w[2]) + carry;
+ w[2] = (uint16_t)hl;
+ w[3] = (uint16_t)(hl>>16);
+
+ *hi = ((uint32_t)w[3]<<16) + w[2];
+ *lo = ((uint32_t)w[1]<<16) + w[0];
+}
+
+/*
+ * By Henry S. Warren: http://www.hackersdelight.org/HDcode/divlu.c.txt
+ * http://www.hackersdelight.org/permissions.htm:
+ * "You are free to use, copy, and distribute any of the code on this web
+ * site, whether modified by you or not. You need not give attribution."
+ *
+ * Slightly modified, comments are mine.
+ */
+static inline int
+nlz(uint32_t x)
+{
+ int n;
+
+ if (x == 0) return(32);
+
+ n = 0;
+ if (x <= 0x0000FFFF) {n = n +16; x = x <<16;}
+ if (x <= 0x00FFFFFF) {n = n + 8; x = x << 8;}
+ if (x <= 0x0FFFFFFF) {n = n + 4; x = x << 4;}
+ if (x <= 0x3FFFFFFF) {n = n + 2; x = x << 2;}
+ if (x <= 0x7FFFFFFF) {n = n + 1;}
+
+ return n;
+}
+
+static inline void
+_mpd_div_words(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t u1, mpd_uint_t u0,
+ mpd_uint_t v)
+{
+ const mpd_uint_t b = 65536;
+ mpd_uint_t un1, un0,
+ vn1, vn0,
+ q1, q0,
+ un32, un21, un10,
+ rhat, t;
+ int s;
+
+ assert(u1 < v);
+
+ s = nlz(v);
+ v = v << s;
+ vn1 = v >> 16;
+ vn0 = v & 0xFFFF;
+
+ t = (s == 0) ? 0 : u0 >> (32 - s);
+ un32 = (u1 << s) | t;
+ un10 = u0 << s;
+
+ un1 = un10 >> 16;
+ un0 = un10 & 0xFFFF;
+
+ q1 = un32 / vn1;
+ rhat = un32 - q1*vn1;
+again1:
+ if (q1 >= b || q1*vn0 > b*rhat + un1) {
+ q1 = q1 - 1;
+ rhat = rhat + vn1;
+ if (rhat < b) goto again1;
+ }
+
+ /*
+ * Before again1 we had:
+ * (1) q1*vn1 + rhat = un32
+ * (2) q1*vn1*b + rhat*b + un1 = un32*b + un1
+ *
+ * The statements inside the if-clause do not change the value
+ * of the left-hand side of (2), and the loop is only exited
+ * if q1*vn0 <= rhat*b + un1, so:
+ *
+ * (3) q1*vn1*b + q1*vn0 <= un32*b + un1
+ * (4) q1*v <= un32*b + un1
+ * (5) 0 <= un32*b + un1 - q1*v
+ *
+ * By (5) we are certain that the possible add-back step from
+ * Knuth's algorithm D is never required.
+ *
+ * Since the final quotient is less than 2**32, the following
+ * must be true:
+ *
+ * (6) un32*b + un1 - q1*v <= UINT32_MAX
+ *
+ * This means that in the following line, the high words
+ * of un32*b and q1*v can be discarded without any effect
+ * on the result.
+ */
+ un21 = un32*b + un1 - q1*v;
+
+ q0 = un21 / vn1;
+ rhat = un21 - q0*vn1;
+again2:
+ if (q0 >= b || q0*vn0 > b*rhat + un0) {
+ q0 = q0 - 1;
+ rhat = rhat + vn1;
+ if (rhat < b) goto again2;
+ }
+
+ *q = q1*b + q0;
+ *r = (un21*b + un0 - q0*v) >> s;
+}
+#endif /* END ANSI + LEGACY_COMPILER */
+
+/* END ANSI */
+#elif defined(ASM)
+static inline void
+_mpd_mul_words(mpd_uint_t *hi, mpd_uint_t *lo, mpd_uint_t a, mpd_uint_t b)
+{
+ mpd_uint_t h, l;
+
+ asm ( "mull %3\n\t"
+ : "=d" (h), "=a" (l)
+ : "%a" (a), "rm" (b)
+ : "cc"
+ );
+
+ *hi = h;
+ *lo = l;
+}
+
+static inline void
+_mpd_div_words(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo,
+ mpd_uint_t d)
+{
+ mpd_uint_t qq, rr;
+
+ asm ( "divl %4\n\t"
+ : "=a" (qq), "=d" (rr)
+ : "a" (lo), "d" (hi), "rm" (d)
+ : "cc"
+ );
+
+ *q = qq;
+ *r = rr;
+}
+/* END GCC ASM */
+#elif defined(MASM)
+static inline void __cdecl
+_mpd_mul_words(mpd_uint_t *hi, mpd_uint_t *lo, mpd_uint_t a, mpd_uint_t b)
+{
+ mpd_uint_t h, l;
+
+ __asm {
+ mov eax, a
+ mul b
+ mov h, edx
+ mov l, eax
+ }
+
+ *hi = h;
+ *lo = l;
+}
+
+static inline void __cdecl
+_mpd_div_words(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t hi, mpd_uint_t lo,
+ mpd_uint_t d)
+{
+ mpd_uint_t qq, rr;
+
+ __asm {
+ mov eax, lo
+ mov edx, hi
+ div d
+ mov qq, eax
+ mov rr, edx
+ }
+
+ *q = qq;
+ *r = rr;
+}
+/* END MASM (_MSC_VER) */
+#else
+ #error "need platform specific 64 bit multiplication and division"
+#endif
+
+#define DIVMOD(q, r, v, d) *q = v / d; *r = v - *q * d
+static inline void
+_mpd_divmod_pow10(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t v, mpd_uint_t exp)
+{
+ assert(exp <= 9);
+
+ if (exp <= 4) {
+ switch (exp) {
+ case 0: *q = v; *r = 0; break;
+ case 1: DIVMOD(q, r, v, 10UL); break;
+ case 2: DIVMOD(q, r, v, 100UL); break;
+ case 3: DIVMOD(q, r, v, 1000UL); break;
+ case 4: DIVMOD(q, r, v, 10000UL); break;
+ }
+ }
+ else {
+ switch (exp) {
+ case 5: DIVMOD(q, r, v, 100000UL); break;
+ case 6: DIVMOD(q, r, v, 1000000UL); break;
+ case 7: DIVMOD(q, r, v, 10000000UL); break;
+ case 8: DIVMOD(q, r, v, 100000000UL); break;
+ case 9: DIVMOD(q, r, v, 1000000000UL); break; /* GCOV_NOT_REACHED */
+ }
+ }
+}
+/* END CONFIG_32 */
+
+/* NO CONFIG */
+#else
+ #error "define CONFIG_64 or CONFIG_32"
+#endif /* CONFIG */
+
+
+static inline void
+_mpd_div_word(mpd_uint_t *q, mpd_uint_t *r, mpd_uint_t v, mpd_uint_t d)
+{
+ *q = v / d;
+ *r = v - *q * d;
+}
+
+static inline void
+_mpd_idiv_word(mpd_ssize_t *q, mpd_ssize_t *r, mpd_ssize_t v, mpd_ssize_t d)
+{
+ *q = v / d;
+ *r = v - *q * d;
+}
+
+
+/** ------------------------------------------------------------
+ ** Arithmetic with overflow checking
+ ** ------------------------------------------------------------
+ */
+
+/* The following macros do call exit() in case of an overflow.
+ If the library is used correctly (i.e. with valid context
+ parameters), such overflows cannot occur. The macros are used
+ as sanity checks in a couple of strategic places and should
+ be viewed as a handwritten version of gcc's -ftrapv option. */
+
+static inline mpd_size_t
+add_size_t(mpd_size_t a, mpd_size_t b)
+{
+ if (a > MPD_SIZE_MAX - b) {
+ mpd_err_fatal("add_size_t(): overflow: check the context"); /* GCOV_NOT_REACHED */
+ }
+ return a + b;
+}
+
+static inline mpd_size_t
+sub_size_t(mpd_size_t a, mpd_size_t b)
+{
+ if (b > a) {
+ mpd_err_fatal("sub_size_t(): overflow: check the context"); /* GCOV_NOT_REACHED */
+ }
+ return a - b;
+}
+
+#if MPD_SIZE_MAX != MPD_UINT_MAX
+ #error "adapt mul_size_t() and mulmod_size_t()"
+#endif
+
+static inline mpd_size_t
+mul_size_t(mpd_size_t a, mpd_size_t b)
+{
+ mpd_uint_t hi, lo;
+
+ _mpd_mul_words(&hi, &lo, (mpd_uint_t)a, (mpd_uint_t)b);
+ if (hi) {
+ mpd_err_fatal("mul_size_t(): overflow: check the context"); /* GCOV_NOT_REACHED */
+ }
+ return lo;
+}
+
+static inline mpd_size_t
+add_size_t_overflow(mpd_size_t a, mpd_size_t b, mpd_size_t *overflow)
+{
+ mpd_size_t ret;
+
+ *overflow = 0;
+ ret = a + b;
+ if (ret < a) *overflow = 1;
+ return ret;
+}
+
+static inline mpd_size_t
+mul_size_t_overflow(mpd_size_t a, mpd_size_t b, mpd_size_t *overflow)
+{
+ mpd_uint_t lo;
+
+ _mpd_mul_words((mpd_uint_t *)overflow, &lo, (mpd_uint_t)a,
+ (mpd_uint_t)b);
+ return lo;
+}
+
+static inline mpd_ssize_t
+mod_mpd_ssize_t(mpd_ssize_t a, mpd_ssize_t m)
+{
+ mpd_ssize_t r = a % m;
+ return (r < 0) ? r + m : r;
+}
+
+static inline mpd_size_t
+mulmod_size_t(mpd_size_t a, mpd_size_t b, mpd_size_t m)
+{
+ mpd_uint_t hi, lo;
+ mpd_uint_t q, r;
+
+ _mpd_mul_words(&hi, &lo, (mpd_uint_t)a, (mpd_uint_t)b);
+ _mpd_div_words(&q, &r, hi, lo, (mpd_uint_t)m);
+
+ return r;
+}
+
+
+#endif /* TYPEARITH_H */
+
+
+
diff --git a/Modules/_decimal/libmpdec/umodarith.h b/Modules/_decimal/libmpdec/umodarith.h
new file mode 100644
index 0000000000..06cde0a7ad
--- /dev/null
+++ b/Modules/_decimal/libmpdec/umodarith.h
@@ -0,0 +1,650 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef UMODARITH_H
+#define UMODARITH_H
+
+
+#include "constants.h"
+#include "mpdecimal.h"
+#include "typearith.h"
+
+
+/* Bignum: Low level routines for unsigned modular arithmetic. These are
+ used in the fast convolution functions for very large coefficients. */
+
+
+/**************************************************************************/
+/* ANSI modular arithmetic */
+/**************************************************************************/
+
+
+/*
+ * Restrictions: a < m and b < m
+ * ACL2 proof: umodarith.lisp: addmod-correct
+ */
+static inline mpd_uint_t
+addmod(mpd_uint_t a, mpd_uint_t b, mpd_uint_t m)
+{
+ mpd_uint_t s;
+
+ s = a + b;
+ s = (s < a) ? s - m : s;
+ s = (s >= m) ? s - m : s;
+
+ return s;
+}
+
+/*
+ * Restrictions: a < m and b < m
+ * ACL2 proof: umodarith.lisp: submod-2-correct
+ */
+static inline mpd_uint_t
+submod(mpd_uint_t a, mpd_uint_t b, mpd_uint_t m)
+{
+ mpd_uint_t d;
+
+ d = a - b;
+ d = (a < b) ? d + m : d;
+
+ return d;
+}
+
+/*
+ * Restrictions: a < 2m and b < 2m
+ * ACL2 proof: umodarith.lisp: section ext-submod
+ */
+static inline mpd_uint_t
+ext_submod(mpd_uint_t a, mpd_uint_t b, mpd_uint_t m)
+{
+ mpd_uint_t d;
+
+ a = (a >= m) ? a - m : a;
+ b = (b >= m) ? b - m : b;
+
+ d = a - b;
+ d = (a < b) ? d + m : d;
+
+ return d;
+}
+
+/*
+ * Reduce double word modulo m.
+ * Restrictions: m != 0
+ * ACL2 proof: umodarith.lisp: section dw-reduce
+ */
+static inline mpd_uint_t
+dw_reduce(mpd_uint_t hi, mpd_uint_t lo, mpd_uint_t m)
+{
+ mpd_uint_t r1, r2, w;
+
+ _mpd_div_word(&w, &r1, hi, m);
+ _mpd_div_words(&w, &r2, r1, lo, m);
+
+ return r2;
+}
+
+/*
+ * Subtract double word from a.
+ * Restrictions: a < m
+ * ACL2 proof: umodarith.lisp: section dw-submod
+ */
+static inline mpd_uint_t
+dw_submod(mpd_uint_t a, mpd_uint_t hi, mpd_uint_t lo, mpd_uint_t m)
+{
+ mpd_uint_t d, r;
+
+ r = dw_reduce(hi, lo, m);
+ d = a - r;
+ d = (a < r) ? d + m : d;
+
+ return d;
+}
+
+#ifdef CONFIG_64
+
+/**************************************************************************/
+/* 64-bit modular arithmetic */
+/**************************************************************************/
+
+/*
+ * A proof of the algorithm is in literature/mulmod-64.txt. An ACL2
+ * proof is in umodarith.lisp: section "Fast modular reduction".
+ *
+ * Algorithm: calculate (a * b) % p:
+ *
+ * a) hi, lo <- a * b # Calculate a * b.
+ *
+ * b) hi, lo <- R(hi, lo) # Reduce modulo p.
+ *
+ * c) Repeat step b) until 0 <= hi * 2**64 + lo < 2*p.
+ *
+ * d) If the result is less than p, return lo. Otherwise return lo - p.
+ */
+
+static inline mpd_uint_t
+x64_mulmod(mpd_uint_t a, mpd_uint_t b, mpd_uint_t m)
+{
+ mpd_uint_t hi, lo, x, y;
+
+
+ _mpd_mul_words(&hi, &lo, a, b);
+
+ if (m & (1ULL<<32)) { /* P1 */
+
+ /* first reduction */
+ x = y = hi;
+ hi >>= 32;
+
+ x = lo - x;
+ if (x > lo) hi--;
+
+ y <<= 32;
+ lo = y + x;
+ if (lo < y) hi++;
+
+ /* second reduction */
+ x = y = hi;
+ hi >>= 32;
+
+ x = lo - x;
+ if (x > lo) hi--;
+
+ y <<= 32;
+ lo = y + x;
+ if (lo < y) hi++;
+
+ return (hi || lo >= m ? lo - m : lo);
+ }
+ else if (m & (1ULL<<34)) { /* P2 */
+
+ /* first reduction */
+ x = y = hi;
+ hi >>= 30;
+
+ x = lo - x;
+ if (x > lo) hi--;
+
+ y <<= 34;
+ lo = y + x;
+ if (lo < y) hi++;
+
+ /* second reduction */
+ x = y = hi;
+ hi >>= 30;
+
+ x = lo - x;
+ if (x > lo) hi--;
+
+ y <<= 34;
+ lo = y + x;
+ if (lo < y) hi++;
+
+ /* third reduction */
+ x = y = hi;
+ hi >>= 30;
+
+ x = lo - x;
+ if (x > lo) hi--;
+
+ y <<= 34;
+ lo = y + x;
+ if (lo < y) hi++;
+
+ return (hi || lo >= m ? lo - m : lo);
+ }
+ else { /* P3 */
+
+ /* first reduction */
+ x = y = hi;
+ hi >>= 24;
+
+ x = lo - x;
+ if (x > lo) hi--;
+
+ y <<= 40;
+ lo = y + x;
+ if (lo < y) hi++;
+
+ /* second reduction */
+ x = y = hi;
+ hi >>= 24;
+
+ x = lo - x;
+ if (x > lo) hi--;
+
+ y <<= 40;
+ lo = y + x;
+ if (lo < y) hi++;
+
+ /* third reduction */
+ x = y = hi;
+ hi >>= 24;
+
+ x = lo - x;
+ if (x > lo) hi--;
+
+ y <<= 40;
+ lo = y + x;
+ if (lo < y) hi++;
+
+ return (hi || lo >= m ? lo - m : lo);
+ }
+}
+
+static inline void
+x64_mulmod2c(mpd_uint_t *a, mpd_uint_t *b, mpd_uint_t w, mpd_uint_t m)
+{
+ *a = x64_mulmod(*a, w, m);
+ *b = x64_mulmod(*b, w, m);
+}
+
+static inline void
+x64_mulmod2(mpd_uint_t *a0, mpd_uint_t b0, mpd_uint_t *a1, mpd_uint_t b1,
+ mpd_uint_t m)
+{
+ *a0 = x64_mulmod(*a0, b0, m);
+ *a1 = x64_mulmod(*a1, b1, m);
+}
+
+static inline mpd_uint_t
+x64_powmod(mpd_uint_t base, mpd_uint_t exp, mpd_uint_t umod)
+{
+ mpd_uint_t r = 1;
+
+ while (exp > 0) {
+ if (exp & 1)
+ r = x64_mulmod(r, base, umod);
+ base = x64_mulmod(base, base, umod);
+ exp >>= 1;
+ }
+
+ return r;
+}
+
+/* END CONFIG_64 */
+#else /* CONFIG_32 */
+
+
+/**************************************************************************/
+/* 32-bit modular arithmetic */
+/**************************************************************************/
+
+#if defined(ANSI)
+#if !defined(LEGACY_COMPILER)
+/* HAVE_UINT64_T */
+static inline mpd_uint_t
+std_mulmod(mpd_uint_t a, mpd_uint_t b, mpd_uint_t m)
+{
+ return ((mpd_uuint_t) a * b) % m;
+}
+
+static inline void
+std_mulmod2c(mpd_uint_t *a, mpd_uint_t *b, mpd_uint_t w, mpd_uint_t m)
+{
+ *a = ((mpd_uuint_t) *a * w) % m;
+ *b = ((mpd_uuint_t) *b * w) % m;
+}
+
+static inline void
+std_mulmod2(mpd_uint_t *a0, mpd_uint_t b0, mpd_uint_t *a1, mpd_uint_t b1,
+ mpd_uint_t m)
+{
+ *a0 = ((mpd_uuint_t) *a0 * b0) % m;
+ *a1 = ((mpd_uuint_t) *a1 * b1) % m;
+}
+/* END HAVE_UINT64_T */
+#else
+/* LEGACY_COMPILER */
+static inline mpd_uint_t
+std_mulmod(mpd_uint_t a, mpd_uint_t b, mpd_uint_t m)
+{
+ mpd_uint_t hi, lo, q, r;
+ _mpd_mul_words(&hi, &lo, a, b);
+ _mpd_div_words(&q, &r, hi, lo, m);
+ return r;
+}
+
+static inline void
+std_mulmod2c(mpd_uint_t *a, mpd_uint_t *b, mpd_uint_t w, mpd_uint_t m)
+{
+ *a = std_mulmod(*a, w, m);
+ *b = std_mulmod(*b, w, m);
+}
+
+static inline void
+std_mulmod2(mpd_uint_t *a0, mpd_uint_t b0, mpd_uint_t *a1, mpd_uint_t b1,
+ mpd_uint_t m)
+{
+ *a0 = std_mulmod(*a0, b0, m);
+ *a1 = std_mulmod(*a1, b1, m);
+}
+/* END LEGACY_COMPILER */
+#endif
+
+static inline mpd_uint_t
+std_powmod(mpd_uint_t base, mpd_uint_t exp, mpd_uint_t umod)
+{
+ mpd_uint_t r = 1;
+
+ while (exp > 0) {
+ if (exp & 1)
+ r = std_mulmod(r, base, umod);
+ base = std_mulmod(base, base, umod);
+ exp >>= 1;
+ }
+
+ return r;
+}
+#endif /* ANSI CONFIG_32 */
+
+
+/**************************************************************************/
+/* Pentium Pro modular arithmetic */
+/**************************************************************************/
+
+/*
+ * A proof of the algorithm is in literature/mulmod-ppro.txt. The FPU
+ * control word must be set to 64-bit precision and truncation mode
+ * prior to using these functions.
+ *
+ * Algorithm: calculate (a * b) % p:
+ *
+ * p := prime < 2**31
+ * pinv := (long double)1.0 / p (precalculated)
+ *
+ * a) n = a * b # Calculate exact product.
+ * b) qest = n * pinv # Calculate estimate for q = n / p.
+ * c) q = (qest+2**63)-2**63 # Truncate qest to the exact quotient.
+ * d) r = n - q * p # Calculate remainder.
+ *
+ * Remarks:
+ *
+ * - p = dmod and pinv = dinvmod.
+ * - dinvmod points to an array of three uint32_t, which is interpreted
+ * as an 80 bit long double by fldt.
+ * - Intel compilers prior to version 11 do not seem to handle the
+ * __GNUC__ inline assembly correctly.
+ * - random tests are provided in tests/extended/ppro_mulmod.c
+ */
+
+#if defined(PPRO)
+#if defined(ASM)
+
+/* Return (a * b) % dmod */
+static inline mpd_uint_t
+ppro_mulmod(mpd_uint_t a, mpd_uint_t b, double *dmod, uint32_t *dinvmod)
+{
+ mpd_uint_t retval;
+
+ asm (
+ "fildl %2\n\t"
+ "fildl %1\n\t"
+ "fmulp %%st, %%st(1)\n\t"
+ "fldt (%4)\n\t"
+ "fmul %%st(1), %%st\n\t"
+ "flds %5\n\t"
+ "fadd %%st, %%st(1)\n\t"
+ "fsubrp %%st, %%st(1)\n\t"
+ "fldl (%3)\n\t"
+ "fmulp %%st, %%st(1)\n\t"
+ "fsubrp %%st, %%st(1)\n\t"
+ "fistpl %0\n\t"
+ : "=m" (retval)
+ : "m" (a), "m" (b), "r" (dmod), "r" (dinvmod), "m" (MPD_TWO63)
+ : "st", "memory"
+ );
+
+ return retval;
+}
+
+/*
+ * Two modular multiplications in parallel:
+ * *a0 = (*a0 * w) % dmod
+ * *a1 = (*a1 * w) % dmod
+ */
+static inline void
+ppro_mulmod2c(mpd_uint_t *a0, mpd_uint_t *a1, mpd_uint_t w,
+ double *dmod, uint32_t *dinvmod)
+{
+ asm (
+ "fildl %2\n\t"
+ "fildl (%1)\n\t"
+ "fmul %%st(1), %%st\n\t"
+ "fxch %%st(1)\n\t"
+ "fildl (%0)\n\t"
+ "fmulp %%st, %%st(1) \n\t"
+ "fldt (%4)\n\t"
+ "flds %5\n\t"
+ "fld %%st(2)\n\t"
+ "fmul %%st(2)\n\t"
+ "fadd %%st(1)\n\t"
+ "fsub %%st(1)\n\t"
+ "fmull (%3)\n\t"
+ "fsubrp %%st, %%st(3)\n\t"
+ "fxch %%st(2)\n\t"
+ "fistpl (%0)\n\t"
+ "fmul %%st(2)\n\t"
+ "fadd %%st(1)\n\t"
+ "fsubp %%st, %%st(1)\n\t"
+ "fmull (%3)\n\t"
+ "fsubrp %%st, %%st(1)\n\t"
+ "fistpl (%1)\n\t"
+ : : "r" (a0), "r" (a1), "m" (w),
+ "r" (dmod), "r" (dinvmod),
+ "m" (MPD_TWO63)
+ : "st", "memory"
+ );
+}
+
+/*
+ * Two modular multiplications in parallel:
+ * *a0 = (*a0 * b0) % dmod
+ * *a1 = (*a1 * b1) % dmod
+ */
+static inline void
+ppro_mulmod2(mpd_uint_t *a0, mpd_uint_t b0, mpd_uint_t *a1, mpd_uint_t b1,
+ double *dmod, uint32_t *dinvmod)
+{
+ asm (
+ "fildl %3\n\t"
+ "fildl (%2)\n\t"
+ "fmulp %%st, %%st(1)\n\t"
+ "fildl %1\n\t"
+ "fildl (%0)\n\t"
+ "fmulp %%st, %%st(1)\n\t"
+ "fldt (%5)\n\t"
+ "fld %%st(2)\n\t"
+ "fmul %%st(1), %%st\n\t"
+ "fxch %%st(1)\n\t"
+ "fmul %%st(2), %%st\n\t"
+ "flds %6\n\t"
+ "fldl (%4)\n\t"
+ "fxch %%st(3)\n\t"
+ "fadd %%st(1), %%st\n\t"
+ "fxch %%st(2)\n\t"
+ "fadd %%st(1), %%st\n\t"
+ "fxch %%st(2)\n\t"
+ "fsub %%st(1), %%st\n\t"
+ "fxch %%st(2)\n\t"
+ "fsubp %%st, %%st(1)\n\t"
+ "fxch %%st(1)\n\t"
+ "fmul %%st(2), %%st\n\t"
+ "fxch %%st(1)\n\t"
+ "fmulp %%st, %%st(2)\n\t"
+ "fsubrp %%st, %%st(3)\n\t"
+ "fsubrp %%st, %%st(1)\n\t"
+ "fxch %%st(1)\n\t"
+ "fistpl (%2)\n\t"
+ "fistpl (%0)\n\t"
+ : : "r" (a0), "m" (b0), "r" (a1), "m" (b1),
+ "r" (dmod), "r" (dinvmod),
+ "m" (MPD_TWO63)
+ : "st", "memory"
+ );
+}
+/* END PPRO GCC ASM */
+#elif defined(MASM)
+
+/* Return (a * b) % dmod */
+static inline mpd_uint_t __cdecl
+ppro_mulmod(mpd_uint_t a, mpd_uint_t b, double *dmod, uint32_t *dinvmod)
+{
+ mpd_uint_t retval;
+
+ __asm {
+ mov eax, dinvmod
+ mov edx, dmod
+ fild b
+ fild a
+ fmulp st(1), st
+ fld TBYTE PTR [eax]
+ fmul st, st(1)
+ fld MPD_TWO63
+ fadd st(1), st
+ fsubp st(1), st
+ fld QWORD PTR [edx]
+ fmulp st(1), st
+ fsubp st(1), st
+ fistp retval
+ }
+
+ return retval;
+}
+
+/*
+ * Two modular multiplications in parallel:
+ * *a0 = (*a0 * w) % dmod
+ * *a1 = (*a1 * w) % dmod
+ */
+static inline mpd_uint_t __cdecl
+ppro_mulmod2c(mpd_uint_t *a0, mpd_uint_t *a1, mpd_uint_t w,
+ double *dmod, uint32_t *dinvmod)
+{
+ __asm {
+ mov ecx, dmod
+ mov edx, a1
+ mov ebx, dinvmod
+ mov eax, a0
+ fild w
+ fild DWORD PTR [edx]
+ fmul st, st(1)
+ fxch st(1)
+ fild DWORD PTR [eax]
+ fmulp st(1), st
+ fld TBYTE PTR [ebx]
+ fld MPD_TWO63
+ fld st(2)
+ fmul st, st(2)
+ fadd st, st(1)
+ fsub st, st(1)
+ fmul QWORD PTR [ecx]
+ fsubp st(3), st
+ fxch st(2)
+ fistp DWORD PTR [eax]
+ fmul st, st(2)
+ fadd st, st(1)
+ fsubrp st(1), st
+ fmul QWORD PTR [ecx]
+ fsubp st(1), st
+ fistp DWORD PTR [edx]
+ }
+}
+
+/*
+ * Two modular multiplications in parallel:
+ * *a0 = (*a0 * b0) % dmod
+ * *a1 = (*a1 * b1) % dmod
+ */
+static inline void __cdecl
+ppro_mulmod2(mpd_uint_t *a0, mpd_uint_t b0, mpd_uint_t *a1, mpd_uint_t b1,
+ double *dmod, uint32_t *dinvmod)
+{
+ __asm {
+ mov ecx, dmod
+ mov edx, a1
+ mov ebx, dinvmod
+ mov eax, a0
+ fild b1
+ fild DWORD PTR [edx]
+ fmulp st(1), st
+ fild b0
+ fild DWORD PTR [eax]
+ fmulp st(1), st
+ fld TBYTE PTR [ebx]
+ fld st(2)
+ fmul st, st(1)
+ fxch st(1)
+ fmul st, st(2)
+ fld DWORD PTR MPD_TWO63
+ fld QWORD PTR [ecx]
+ fxch st(3)
+ fadd st, st(1)
+ fxch st(2)
+ fadd st, st(1)
+ fxch st(2)
+ fsub st, st(1)
+ fxch st(2)
+ fsubrp st(1), st
+ fxch st(1)
+ fmul st, st(2)
+ fxch st(1)
+ fmulp st(2), st
+ fsubp st(3), st
+ fsubp st(1), st
+ fxch st(1)
+ fistp DWORD PTR [edx]
+ fistp DWORD PTR [eax]
+ }
+}
+#endif /* PPRO MASM (_MSC_VER) */
+
+
+/* Return (base ** exp) % dmod */
+static inline mpd_uint_t
+ppro_powmod(mpd_uint_t base, mpd_uint_t exp, double *dmod, uint32_t *dinvmod)
+{
+ mpd_uint_t r = 1;
+
+ while (exp > 0) {
+ if (exp & 1)
+ r = ppro_mulmod(r, base, dmod, dinvmod);
+ base = ppro_mulmod(base, base, dmod, dinvmod);
+ exp >>= 1;
+ }
+
+ return r;
+}
+#endif /* PPRO */
+#endif /* CONFIG_32 */
+
+
+#endif /* UMODARITH_H */
+
+
+
diff --git a/Modules/_decimal/libmpdec/vccompat.h b/Modules/_decimal/libmpdec/vccompat.h
new file mode 100644
index 0000000000..276e0372cb
--- /dev/null
+++ b/Modules/_decimal/libmpdec/vccompat.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef VCCOMPAT_H
+#define VCCOMPAT_H
+
+
+/* Visual C fixes: no stdint.h, no snprintf ... */
+#ifdef _MSC_VER
+ #include "vcstdint.h"
+ #undef inline
+ #define inline __inline
+ #undef random
+ #define random rand
+ #undef srandom
+ #define srandom srand
+ #undef snprintf
+ #define snprintf sprintf_s
+ #define HAVE_SNPRINTF
+ #undef strncasecmp
+ #define strncasecmp _strnicmp
+ #undef strcasecmp
+ #define strcasecmp _stricmp
+ #undef strtoll
+ #define strtoll _strtoi64
+ #define strdup _strdup
+ #define PRIi64 "I64i"
+ #define PRIu64 "I64u"
+ #define PRIi32 "I32i"
+ #define PRIu32 "I32u"
+#endif
+
+
+#endif /* VCCOMPAT_H */
+
+
+
diff --git a/Modules/_decimal/libmpdec/vcdiv64.asm b/Modules/_decimal/libmpdec/vcdiv64.asm
new file mode 100644
index 0000000000..31bba08b0c
--- /dev/null
+++ b/Modules/_decimal/libmpdec/vcdiv64.asm
@@ -0,0 +1,48 @@
+;
+; Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; 1. Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+;
+; 2. Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in the
+; documentation and/or other materials provided with the distribution.
+;
+; THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
+; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+; OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+; HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+; LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+; OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+; SUCH DAMAGE.
+;
+
+
+PUBLIC _mpd_div_words
+_TEXT SEGMENT
+q$ = 8
+r$ = 16
+hi$ = 24
+lo$ = 32
+d$ = 40
+_mpd_div_words PROC
+ mov r10, rdx
+ mov rdx, r8
+ mov rax, r9
+ div QWORD PTR d$[rsp]
+ mov QWORD PTR [r10], rdx
+ mov QWORD PTR [rcx], rax
+ ret 0
+_mpd_div_words ENDP
+_TEXT ENDS
+END
+
+
diff --git a/Modules/_decimal/libmpdec/vcstdint.h b/Modules/_decimal/libmpdec/vcstdint.h
new file mode 100644
index 0000000000..e032ff1605
--- /dev/null
+++ b/Modules/_decimal/libmpdec/vcstdint.h
@@ -0,0 +1,232 @@
+// ISO C9x compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006-2008 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. The name of the author may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_STDINT_H_ // [
+#define _MSC_STDINT_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <limits.h>
+
+// For Visual Studio 6 in C++ mode wrap <wchar.h> include with 'extern "C++" {}'
+// or compiler give many errors like this:
+// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
+#if (_MSC_VER < 1300) && defined(__cplusplus)
+ extern "C++" {
+#endif
+# include <wchar.h>
+#if (_MSC_VER < 1300) && defined(__cplusplus)
+ }
+#endif
+
+// Define _W64 macros to mark types changing their size, like intptr_t.
+#ifndef _W64
+# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
+# define _W64 __w64
+# else
+# define _W64
+# endif
+#endif
+
+
+// 7.18.1 Integer types
+
+// 7.18.1.1 Exact-width integer types
+typedef __int8 int8_t;
+typedef __int16 int16_t;
+typedef __int32 int32_t;
+typedef __int64 int64_t;
+typedef unsigned __int8 uint8_t;
+typedef unsigned __int16 uint16_t;
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+
+// 7.18.1.2 Minimum-width integer types
+typedef int8_t int_least8_t;
+typedef int16_t int_least16_t;
+typedef int32_t int_least32_t;
+typedef int64_t int_least64_t;
+typedef uint8_t uint_least8_t;
+typedef uint16_t uint_least16_t;
+typedef uint32_t uint_least32_t;
+typedef uint64_t uint_least64_t;
+
+// 7.18.1.3 Fastest minimum-width integer types
+typedef int8_t int_fast8_t;
+typedef int16_t int_fast16_t;
+typedef int32_t int_fast32_t;
+typedef int64_t int_fast64_t;
+typedef uint8_t uint_fast8_t;
+typedef uint16_t uint_fast16_t;
+typedef uint32_t uint_fast32_t;
+typedef uint64_t uint_fast64_t;
+
+// 7.18.1.4 Integer types capable of holding object pointers
+#ifdef _WIN64 // [
+ typedef __int64 intptr_t;
+ typedef unsigned __int64 uintptr_t;
+#else // _WIN64 ][
+ typedef _W64 int intptr_t;
+ typedef _W64 unsigned int uintptr_t;
+#endif // _WIN64 ]
+
+// 7.18.1.5 Greatest-width integer types
+typedef int64_t intmax_t;
+typedef uint64_t uintmax_t;
+
+
+// 7.18.2 Limits of specified-width integer types
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
+
+// 7.18.2.1 Limits of exact-width integer types
+#define INT8_MIN ((int8_t)_I8_MIN)
+#define INT8_MAX _I8_MAX
+#define INT16_MIN ((int16_t)_I16_MIN)
+#define INT16_MAX _I16_MAX
+#define INT32_MIN ((int32_t)_I32_MIN)
+#define INT32_MAX _I32_MAX
+#define INT64_MIN ((int64_t)_I64_MIN)
+#define INT64_MAX _I64_MAX
+#define UINT8_MAX _UI8_MAX
+#define UINT16_MAX _UI16_MAX
+#define UINT32_MAX _UI32_MAX
+#define UINT64_MAX _UI64_MAX
+
+// 7.18.2.2 Limits of minimum-width integer types
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+// 7.18.2.3 Limits of fastest minimum-width integer types
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST8_MAX INT8_MAX
+#define INT_FAST16_MIN INT16_MIN
+#define INT_FAST16_MAX INT16_MAX
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST8_MAX UINT8_MAX
+#define UINT_FAST16_MAX UINT16_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+// 7.18.2.4 Limits of integer types capable of holding object pointers
+#ifdef _WIN64 // [
+# define INTPTR_MIN INT64_MIN
+# define INTPTR_MAX INT64_MAX
+# define UINTPTR_MAX UINT64_MAX
+#else // _WIN64 ][
+# define INTPTR_MIN INT32_MIN
+# define INTPTR_MAX INT32_MAX
+# define UINTPTR_MAX UINT32_MAX
+#endif // _WIN64 ]
+
+// 7.18.2.5 Limits of greatest-width integer types
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+// 7.18.3 Limits of other integer types
+
+#ifdef _WIN64 // [
+# define PTRDIFF_MIN _I64_MIN
+# define PTRDIFF_MAX _I64_MAX
+#else // _WIN64 ][
+# define PTRDIFF_MIN _I32_MIN
+# define PTRDIFF_MAX _I32_MAX
+#endif // _WIN64 ]
+
+#define SIG_ATOMIC_MIN INT_MIN
+#define SIG_ATOMIC_MAX INT_MAX
+
+#ifndef SIZE_MAX // [
+# ifdef _WIN64 // [
+# define SIZE_MAX _UI64_MAX
+# else // _WIN64 ][
+# define SIZE_MAX _UI32_MAX
+# endif // _WIN64 ]
+#endif // SIZE_MAX ]
+
+// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
+#ifndef WCHAR_MIN // [
+# define WCHAR_MIN 0
+#endif // WCHAR_MIN ]
+#ifndef WCHAR_MAX // [
+# define WCHAR_MAX _UI16_MAX
+#endif // WCHAR_MAX ]
+
+#define WINT_MIN 0
+#define WINT_MAX _UI16_MAX
+
+#endif // __STDC_LIMIT_MACROS ]
+
+
+// 7.18.4 Limits of other integer types
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val) val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val) val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+#define INTMAX_C INT64_C
+#define UINTMAX_C UINT64_C
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+
+#endif // _MSC_STDINT_H_ ]